hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
c6b16f2c7d857b2b47204453f0fb33eb9ac99559.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.py // //user function __device__ void res_calc_gpu( const double *x1, const double *x2, const double *q1, const double *q2, const double *adt1, const double *adt2, double *res1, double *res2) { double dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0] - x2[0]; dy = x1[1] - x2[1]; ri = 1.0f/q1[0]; p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2])); vol1 = ri*(q1[1]*dy - q1[2]*dx); ri = 1.0f/q2[0]; p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2])); vol2 = ri*(q2[1]*dy - q2[2]*dx); mu = 0.5f*((*adt1)+(*adt2))*eps; f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]); res1[0] += f; res2[0] -= f; f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]); res1[1] += f; res2[1] -= f; f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]); res1[2] += f; res2[2] -= f; f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const double *__restrict ind_arg0, const double *__restrict ind_arg1, const double *__restrict ind_arg2, double *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int *ind_map, short *arg_map, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg6_l[4]; double arg7_l[4]; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ double *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg3_size = ind_arg_sizes[0+blockId*1]; ind_arg3_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; //set shared memory pointers int nbytes = 0; ind_arg3_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3_s[n] = ZERO_double; } __syncthreads(); for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; int map3idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_double; } for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_double; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; map3idx = opDat2Map[n + offset_b + set_size * 1]; //user-supplied kernel call res_calc_gpu(ind_arg0+map0idx*2, ind_arg0+map1idx*2, ind_arg1+map2idx*4, ind_arg1+map3idx*4, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); col2 = colors[n+offset_b]; } //store local variables int arg6_map; int arg7_map; if (col2>=0) { arg6_map = arg_map[0*set_size+n+offset_b]; arg7_map = arg_map[1*set_size+n+offset_b]; } for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg6_l[0] += ind_arg3_s[0+arg6_map*4]; arg6_l[1] += ind_arg3_s[1+arg6_map*4]; arg6_l[2] += ind_arg3_s[2+arg6_map*4]; arg6_l[3] += ind_arg3_s[3+arg6_map*4]; arg7_l[0] += ind_arg3_s[0+arg7_map*4]; arg7_l[1] += ind_arg3_s[1+arg7_map*4]; arg7_l[2] += ind_arg3_s[2+arg7_map*4]; arg7_l[3] += ind_arg3_s[3+arg7_map*4]; ind_arg3_s[0+arg6_map*4] = arg6_l[0]; ind_arg3_s[1+arg6_map*4] = arg6_l[1]; ind_arg3_s[2+arg6_map*4] = arg6_l[2]; ind_arg3_s[3+arg6_map*4] = arg6_l[3]; ind_arg3_s[0+arg7_map*4] = arg7_l[0]; ind_arg3_s[1+arg7_map*4] = arg7_l[1]; ind_arg3_s[2+arg7_map*4] = arg7_l[2]; ind_arg3_s[3+arg7_map*4] = arg7_l[3]; } __syncthreads(); } } for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } //get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; hipLaunchKernelGGL(( op_cuda_res_calc), dim3(nblocks),dim3(nthread),nshared, 0, (double *)arg0.data_d, (double *)arg2.data_d, (double *)arg4.data_d, (double *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, Plan->ind_map, Plan->loc_map, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(hipDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
c6b16f2c7d857b2b47204453f0fb33eb9ac99559.cu
// // auto-generated by op2.py // //user function __device__ void res_calc_gpu( const double *x1, const double *x2, const double *q1, const double *q2, const double *adt1, const double *adt2, double *res1, double *res2) { double dx,dy,mu, ri, p1,vol1, p2,vol2, f; dx = x1[0] - x2[0]; dy = x1[1] - x2[1]; ri = 1.0f/q1[0]; p1 = gm1*(q1[3]-0.5f*ri*(q1[1]*q1[1]+q1[2]*q1[2])); vol1 = ri*(q1[1]*dy - q1[2]*dx); ri = 1.0f/q2[0]; p2 = gm1*(q2[3]-0.5f*ri*(q2[1]*q2[1]+q2[2]*q2[2])); vol2 = ri*(q2[1]*dy - q2[2]*dx); mu = 0.5f*((*adt1)+(*adt2))*eps; f = 0.5f*(vol1* q1[0] + vol2* q2[0] ) + mu*(q1[0]-q2[0]); res1[0] += f; res2[0] -= f; f = 0.5f*(vol1* q1[1] + p1*dy + vol2* q2[1] + p2*dy) + mu*(q1[1]-q2[1]); res1[1] += f; res2[1] -= f; f = 0.5f*(vol1* q1[2] - p1*dx + vol2* q2[2] - p2*dx) + mu*(q1[2]-q2[2]); res1[2] += f; res2[2] -= f; f = 0.5f*(vol1*(q1[3]+p1) + vol2*(q2[3]+p2) ) + mu*(q1[3]-q2[3]); res1[3] += f; res2[3] -= f; } // CUDA kernel function __global__ void op_cuda_res_calc( const double *__restrict ind_arg0, const double *__restrict ind_arg1, const double *__restrict ind_arg2, double *__restrict ind_arg3, const int *__restrict opDat0Map, const int *__restrict opDat2Map, int *ind_map, short *arg_map, int *ind_arg_sizes, int *ind_arg_offs, int block_offset, int *blkmap, int *offset, int *nelems, int *ncolors, int *colors, int nblocks, int set_size) { double arg6_l[4]; double arg7_l[4]; __shared__ int *ind_arg3_map, ind_arg3_size; __shared__ double *ind_arg3_s; __shared__ int nelems2, ncolor; __shared__ int nelem, offset_b; extern __shared__ char shared[]; if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) { return; } if (threadIdx.x==0) { //get sizes and shift pointers and direct-mapped data int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset]; nelem = nelems[blockId]; offset_b = offset[blockId]; nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x); ncolor = ncolors[blockId]; ind_arg3_size = ind_arg_sizes[0+blockId*1]; ind_arg3_map = &ind_map[0*set_size] + ind_arg_offs[0+blockId*1]; //set shared memory pointers int nbytes = 0; ind_arg3_s = (double *) &shared[nbytes]; } __syncthreads(); // make sure all of above completed for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3_s[n] = ZERO_double; } __syncthreads(); for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){ int col2 = -1; int map0idx; int map1idx; int map2idx; int map3idx; if (n<nelem) { //initialise local variables for ( int d=0; d<4; d++ ){ arg6_l[d] = ZERO_double; } for ( int d=0; d<4; d++ ){ arg7_l[d] = ZERO_double; } map0idx = opDat0Map[n + offset_b + set_size * 0]; map1idx = opDat0Map[n + offset_b + set_size * 1]; map2idx = opDat2Map[n + offset_b + set_size * 0]; map3idx = opDat2Map[n + offset_b + set_size * 1]; //user-supplied kernel call res_calc_gpu(ind_arg0+map0idx*2, ind_arg0+map1idx*2, ind_arg1+map2idx*4, ind_arg1+map3idx*4, ind_arg2+map2idx*1, ind_arg2+map3idx*1, arg6_l, arg7_l); col2 = colors[n+offset_b]; } //store local variables int arg6_map; int arg7_map; if (col2>=0) { arg6_map = arg_map[0*set_size+n+offset_b]; arg7_map = arg_map[1*set_size+n+offset_b]; } for ( int col=0; col<ncolor; col++ ){ if (col2==col) { arg6_l[0] += ind_arg3_s[0+arg6_map*4]; arg6_l[1] += ind_arg3_s[1+arg6_map*4]; arg6_l[2] += ind_arg3_s[2+arg6_map*4]; arg6_l[3] += ind_arg3_s[3+arg6_map*4]; arg7_l[0] += ind_arg3_s[0+arg7_map*4]; arg7_l[1] += ind_arg3_s[1+arg7_map*4]; arg7_l[2] += ind_arg3_s[2+arg7_map*4]; arg7_l[3] += ind_arg3_s[3+arg7_map*4]; ind_arg3_s[0+arg6_map*4] = arg6_l[0]; ind_arg3_s[1+arg6_map*4] = arg6_l[1]; ind_arg3_s[2+arg6_map*4] = arg6_l[2]; ind_arg3_s[3+arg6_map*4] = arg6_l[3]; ind_arg3_s[0+arg7_map*4] = arg7_l[0]; ind_arg3_s[1+arg7_map*4] = arg7_l[1]; ind_arg3_s[2+arg7_map*4] = arg7_l[2]; ind_arg3_s[3+arg7_map*4] = arg7_l[3]; } __syncthreads(); } } for ( int n=threadIdx.x; n<ind_arg3_size*4; n+=blockDim.x ){ ind_arg3[n%4+ind_arg3_map[n/4]*4] += ind_arg3_s[n]; } } //host stub function void op_par_loop_res_calc(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2, op_arg arg3, op_arg arg4, op_arg arg5, op_arg arg6, op_arg arg7){ int nargs = 8; op_arg args[8]; args[0] = arg0; args[1] = arg1; args[2] = arg2; args[3] = arg3; args[4] = arg4; args[5] = arg5; args[6] = arg6; args[7] = arg7; // initialise timers double cpu_t1, cpu_t2, wall_t1, wall_t2; op_timing_realloc(2); op_timers_core(&cpu_t1, &wall_t1); OP_kernels[2].name = name; OP_kernels[2].count += 1; int ninds = 4; int inds[8] = {0,0,1,1,2,2,3,3}; if (OP_diags>2) { printf(" kernel routine with indirection: res_calc\n"); } //get plan #ifdef OP_PART_SIZE_2 int part_size = OP_PART_SIZE_2; #else int part_size = OP_part_size; #endif int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args); if (set->size > 0) { op_plan *Plan = op_plan_get_stage(name,set,part_size,nargs,args,ninds,inds,OP_STAGE_INC); //execute plan int block_offset = 0; for ( int col=0; col<Plan->ncolors; col++ ){ if (col==Plan->ncolors_core) { op_mpi_wait_all_cuda(nargs, args); } #ifdef OP_BLOCK_SIZE_2 int nthread = OP_BLOCK_SIZE_2; #else int nthread = OP_block_size; #endif dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col], Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1); if (Plan->ncolblk[col] > 0) { int nshared = Plan->nsharedCol[col]; op_cuda_res_calc<<<nblocks,nthread,nshared>>>( (double *)arg0.data_d, (double *)arg2.data_d, (double *)arg4.data_d, (double *)arg6.data_d, arg0.map_data_d, arg2.map_data_d, Plan->ind_map, Plan->loc_map, Plan->ind_sizes, Plan->ind_offs, block_offset, Plan->blkmap, Plan->offset, Plan->nelems, Plan->nthrcol, Plan->thrcol, Plan->ncolblk[col], set->size+set->exec_size); } block_offset += Plan->ncolblk[col]; } OP_kernels[2].transfer += Plan->transfer; OP_kernels[2].transfer2 += Plan->transfer2; } op_mpi_set_dirtybit_cuda(nargs, args); cutilSafeCall(cudaDeviceSynchronize()); //update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[2].time += wall_t2 - wall_t1; }
b2ced903b8f2e6e67b64934f3734de1c1774e534.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/index_calculator.h" namespace phi { template <typename T> __global__ void Cross(const T* x, const T* y, T* out, const int stride, const int N, phi::funcs::IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; using MPType = typename phi::dtype::MPTypeTrait<T>::Type; MPType x_pos0_mp = static_cast<MPType>(x[pos0]); MPType x_pos1_mp = static_cast<MPType>(x[pos1]); MPType x_pos2_mp = static_cast<MPType>(x[pos2]); MPType y_pos0_mp = static_cast<MPType>(y[pos0]); MPType y_pos1_mp = static_cast<MPType>(y[pos1]); MPType y_pos2_mp = static_cast<MPType>(y[pos2]); out[pos0] = static_cast<T>(x_pos1_mp * y_pos2_mp - x_pos2_mp * y_pos1_mp); out[pos1] = static_cast<T>(x_pos2_mp * y_pos0_mp - x_pos0_mp * y_pos2_mp); out[pos2] = static_cast<T>(x_pos0_mp * y_pos1_mp - x_pos1_mp * y_pos0_mp); } } template <typename T, typename Context> void CrossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { auto& input_x = x; auto& input_y = y; auto* output = out; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, phi::errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, phi::errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ(dim == DDim::kMaxRank, false, phi::errors::InvalidArgument( "There must be at least one dimension 'd' so that " "Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; std::vector<int> merged_dims; for (int i = 0; i < dim; i++) { if (i == 0) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[0] *= input_x_dims[i]; } } int merge_axis = merged_dims.size(); merged_dims.push_back(input_x_dims[dim]); for (int i = dim + 1; i < input_x_dims.size(); i++) { if (i == dim + 1) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[merge_axis + 1] *= input_x_dims[i]; } } int full_dim = 1; for (int i = 0; i < merged_dims.size(); i++) { full_strides.insert(full_strides.begin(), full_dim); full_dim *= merged_dims[merged_dims.size() - i - 1]; if (i == merge_axis) { continue; } cal_dims.push_back(i); } int left_dim = 1; for (int i = merged_dims.size() - 1; i >= 0; i--) { if (i == merge_axis) { continue; } left_strides.insert(left_strides.begin(), left_dim); left_dim *= merged_dims[i]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); auto index_calculator = phi::funcs::IndexCalculator( merged_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); hipLaunchKernelGGL(( Cross), dim3(config.block_per_grid), dim3(config.thread_per_block), 0, dev_ctx.stream(), input_x_data, input_y_data, out_data, full_strides[merge_axis], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL(cross, GPU, ALL_LAYOUT, phi::CrossKernel, phi::dtype::float16, float, double, int, int64_t) {}
b2ced903b8f2e6e67b64934f3734de1c1774e534.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/cross_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_launch_config.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/index_calculator.h" namespace phi { template <typename T> __global__ void Cross(const T* x, const T* y, T* out, const int stride, const int N, phi::funcs::IndexCalculator index_calculator) { CUDA_KERNEL_LOOP(i, N) { int offset = index_calculator(i); auto pos0 = offset + 0 * stride; auto pos1 = offset + 1 * stride; auto pos2 = offset + 2 * stride; using MPType = typename phi::dtype::MPTypeTrait<T>::Type; MPType x_pos0_mp = static_cast<MPType>(x[pos0]); MPType x_pos1_mp = static_cast<MPType>(x[pos1]); MPType x_pos2_mp = static_cast<MPType>(x[pos2]); MPType y_pos0_mp = static_cast<MPType>(y[pos0]); MPType y_pos1_mp = static_cast<MPType>(y[pos1]); MPType y_pos2_mp = static_cast<MPType>(y[pos2]); out[pos0] = static_cast<T>(x_pos1_mp * y_pos2_mp - x_pos2_mp * y_pos1_mp); out[pos1] = static_cast<T>(x_pos2_mp * y_pos0_mp - x_pos0_mp * y_pos2_mp); out[pos2] = static_cast<T>(x_pos0_mp * y_pos1_mp - x_pos1_mp * y_pos0_mp); } } template <typename T, typename Context> void CrossKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& y, int axis, DenseTensor* out) { auto& input_x = x; auto& input_y = y; auto* output = out; int dim = axis; auto input_x_dims = input_x.dims(); if (dim != DDim::kMaxRank) { PADDLE_ENFORCE_EQ( dim < input_x_dims.size() && dim >= (0 - input_x_dims.size()), true, phi::errors::OutOfRange( "Attr(dim) is out of range, It's expected " "to be in range of [-%d, %d]. But received Attr(dim) = %d.", input_x_dims.size(), input_x_dims.size() - 1, dim)); if (dim < 0) { dim += input_x_dims.size(); } PADDLE_ENFORCE_EQ( input_x_dims[dim] == 3, true, phi::errors::InvalidArgument( "Input(X/Y).dims[dim] must be equal to 3. But received: " "Input(X/Y).dims[dim] = [%d].", input_x_dims[dim])); } else { for (auto i = 0; i < input_x_dims.size(); i++) { if (input_x_dims[i] == 3) { dim = i; break; } } PADDLE_ENFORCE_EQ(dim == DDim::kMaxRank, false, phi::errors::InvalidArgument( "There must be at least one dimension 'd' so that " "Input(X/Y).dims()[d] is equal to 3. " "But received: Input(X/Y).dims() == [%s].", input_x_dims)); } std::vector<int> cal_dims; std::vector<int> left_strides; std::vector<int> full_strides; std::vector<int> merged_dims; for (int i = 0; i < dim; i++) { if (i == 0) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[0] *= input_x_dims[i]; } } int merge_axis = merged_dims.size(); merged_dims.push_back(input_x_dims[dim]); for (int i = dim + 1; i < input_x_dims.size(); i++) { if (i == dim + 1) { merged_dims.push_back(input_x_dims[i]); } else { merged_dims[merge_axis + 1] *= input_x_dims[i]; } } int full_dim = 1; for (int i = 0; i < merged_dims.size(); i++) { full_strides.insert(full_strides.begin(), full_dim); full_dim *= merged_dims[merged_dims.size() - i - 1]; if (i == merge_axis) { continue; } cal_dims.push_back(i); } int left_dim = 1; for (int i = merged_dims.size() - 1; i >= 0; i--) { if (i == merge_axis) { continue; } left_strides.insert(left_strides.begin(), left_dim); left_dim *= merged_dims[i]; } const auto* input_x_data = input_x.data<T>(); const auto* input_y_data = input_y.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); auto index_calculator = phi::funcs::IndexCalculator( merged_dims.size() - 1, cal_dims, left_strides, full_strides); int64_t numel = x.numel(); backends::gpu::GpuLaunchConfig config = backends::gpu::GetGpuLaunchConfig1D(dev_ctx, numel / 3); Cross<<<config.block_per_grid, config.thread_per_block, 0, dev_ctx.stream()>>>(input_x_data, input_y_data, out_data, full_strides[merge_axis], numel / 3, index_calculator); } } // namespace phi PD_REGISTER_KERNEL(cross, GPU, ALL_LAYOUT, phi::CrossKernel, phi::dtype::float16, float, double, int, int64_t) {}
18bb0a8fee72e6325a8a28dba139a58760cb384b.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <pointercast.h> #include <types/types.h> #include <types/float16.h> #include <op_boilerplate.h> #include <loops/summarystatsreduce.h> #include <helpers/shape.h> #include <helpers/TAD.h> #include <dll.h> #include <Environment.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helpers/DebugHelper.h> #include <specials_cuda.h> using namespace simdOps; namespace functions { namespace summarystats { template <typename X, typename Z> void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets); } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto extraParams = static_cast<Z*>(vextraParams); SummaryStatsData<X> *sPartials = *sPartialsRef; Nd4jLong floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { SummaryStatsData<X> prev = sPartials[tid - floorPow2]; SummaryStatsData<X> curr = sPartials[tid]; sPartials[tid - floorPow2] = update(prev, curr, extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numElements) { SummaryStatsData<X> curr = sPartials[tid]; SummaryStatsData<X> next = sPartials[tid + activeThreads]; sPartials[tid] = update(curr, next, extraParams); } __syncthreads(); } }; /** * @param n n is the number of * elements to loop through * @param dx the data to operate on * @param xVectorInfo the meta data for the vector: * 0 is the offset * 1 is the increment/stride * 2 is the real length of the buffer (n and dx.length won't always be the same) * 3 is the element wise stride for the buffer * 4 is the number of elements it takes to get to the next row/column/tensor * @param gpuInformation * 0 is the block size * 1 is the grid size * 2 is the shared memory size * @param problemDefinition * 0 is the number of elements per vector * 1 is the number of vectors */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto dx = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); auto reductionBuffer = static_cast<Z*>(vreductionBuffer); int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ volatile int resultScalar; __shared__ int xElementWiseStride; int numElements = blockDim.x; //shared memory space for storing intermediate results __shared__ SummaryStatsData<X> *sPartials; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem); } __syncthreads(); Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; //length for the tad __shared__ volatile int xLength; __shared__ volatile int resultLength; SummaryStatsData<X> reduction; reduction.initWithValue(0.0); reduction.n = 0; if (threadIdx.x == 0) { if (zShapeInfo != nullptr) resultLength = shape::length(zShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION)) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); auto xOrder = shape::order(xShapeInfo); if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) { xElementWiseStride = xStride[dimension[0]]; } else { xElementWiseStride = shape::elementWiseStride(xShapeInfo); } xLength = shape::length(xShapeInfo); } __syncthreads(); if (!resultScalar) { __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); if (tadEWS == 0) { for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto tadOffsetForBlock = tadOffsets[r]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[xOffset]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); } __syncthreads(); } } else { for (int i = blockIdx.x; i < numTads; i += gridDim.x) { auto tadOffsetForBlock = tadOffsets[i]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int x = threadIdx.x; x < tadLength; x += blockDim.x) { auto indexX = tadOffsetForBlock + x * tadEWS; SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[indexX]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams); } } } } else if (resultScalar) { __shared__ int n; if (threadIdx.x == 0) { xElementWiseStride = shape::elementWiseStride(xShapeInfo); n = shape::length(xShapeInfo); } __syncthreads(); if (xElementWiseStride >= 1) { for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) { SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[i * xElementWiseStride]); reduction = update(reduction, indexVal2, extraParams); } } else { for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) { auto offset = shape::getIndexOffset(i, xShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[offset]); reduction = update(reduction, indexVal2, extraParams); } } sPartials[threadIdx.x] = reduction; __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams); __syncthreads(); if (gridDim.x > 1) { __shared__ bool amLast; unsigned int *tc = (unsigned int *)reductionBuffer; tid = threadIdx.x; if (threadIdx.x == 0) { SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer; pBuffer[blockIdx.x] = sPartials[0]; } __threadfence(); __syncthreads(); if (tid == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer; Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams); __syncthreads(); if (tid == 0) { z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } }; template <typename X, typename Y> _CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS); }; template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto extraParams = static_cast<Z*>(vextraParams); auto z = reinterpret_cast<Z*>(vz); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); // this is blocking method since method should return scalar nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed"); } template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); hipLaunchKernelGGL(( summaryStatsReduceT<X,Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, hipStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum); hipLaunchKernelGGL(( summaryStatsReduceT<X, Z>), dim3(launchDims.x),dim3(launchDims.y),launchDims.z, *stream, opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension, dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template <typename X, typename Y> Y SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::exec(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } template <typename X, typename Y> template<typename OpType> Y SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { // } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::exec(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES); } }
18bb0a8fee72e6325a8a28dba139a58760cb384b.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author [email protected] // #include <pointercast.h> #include <types/types.h> #include <types/float16.h> #include <op_boilerplate.h> #include <loops/summarystatsreduce.h> #include <helpers/shape.h> #include <helpers/TAD.h> #include <dll.h> #include <Environment.h> #include <cuda.h> #include <cuda_runtime.h> #include <helpers/DebugHelper.h> #include <specials_cuda.h> using namespace simdOps; namespace functions { namespace summarystats { template <typename X, typename Z> void _CUDA_G summaryStatsReduceT(int op, void *dx, Nd4jLong *xShapeInfo, int xRank, void *extraParams, void *z, Nd4jLong *zShapeInfo, int zRank, int *dimension, int dimensionLength, int postProcessOrNot,bool biasCorrected,int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { functions::summarystats::SummaryStatsReduce<X,Z>::transform(op,dx,xShapeInfo,extraParams,z,zShapeInfo,dimension,dimensionLength,biasCorrected,allocationBuffer,reductionBuffer,tadOnlyShapeInfo,tadOffsets); } /** * * @param sPartialsRef * @param tid * @param extraParams */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::aggregatePartials(SummaryStatsData<X> **sPartialsRef, Nd4jLong tid, Nd4jLong numElements, void *vextraParams) { // start the shared memory loop on the next power of 2 less // than the block size. If block size is not a power of 2, // accumulate the intermediate sums in the remainder range. auto extraParams = static_cast<Z*>(vextraParams); SummaryStatsData<X> *sPartials = *sPartialsRef; Nd4jLong floorPow2 = blockDim.x; if (floorPow2 & (floorPow2 - 1)) { while (floorPow2 & (floorPow2 - 1)) { floorPow2 &= floorPow2 - 1; } if (tid >= floorPow2) { SummaryStatsData<X> prev = sPartials[tid - floorPow2]; SummaryStatsData<X> curr = sPartials[tid]; sPartials[tid - floorPow2] = update(prev, curr, extraParams); } __syncthreads(); } for (Nd4jLong activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) { if (tid < activeThreads && tid + activeThreads < numElements) { SummaryStatsData<X> curr = sPartials[tid]; SummaryStatsData<X> next = sPartials[tid + activeThreads]; sPartials[tid] = update(curr, next, extraParams); } __syncthreads(); } }; /** * @param n n is the number of * elements to loop through * @param dx the data to operate on * @param xVectorInfo the meta data for the vector: * 0 is the offset * 1 is the increment/stride * 2 is the real length of the buffer (n and dx.length won't always be the same) * 3 is the element wise stride for the buffer * 4 is the number of elements it takes to get to the next row/column/tensor * @param gpuInformation * 0 is the block size * 1 is the grid size * 2 is the shared memory size * @param problemDefinition * 0 is the number of elements per vector * 1 is the number of vectors */ template<typename X, typename Z> template<typename OpType> _CUDA_D void SummaryStatsReduce<X,Z>::transform(void *vx, Nd4jLong *xShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *vreductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { auto dx = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); auto reductionBuffer = static_cast<Z*>(vreductionBuffer); int tid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ volatile int resultScalar; __shared__ int xElementWiseStride; int numElements = blockDim.x; //shared memory space for storing intermediate results __shared__ SummaryStatsData<X> *sPartials; if(threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; sPartials = reinterpret_cast<SummaryStatsData<X>*>(shmem); } __syncthreads(); Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; //length for the tad __shared__ volatile int xLength; __shared__ volatile int resultLength; SummaryStatsData<X> reduction; reduction.initWithValue(0.0); reduction.n = 0; if (threadIdx.x == 0) { if (zShapeInfo != nullptr) resultLength = shape::length(zShapeInfo); else resultLength = 1; if (dimensionLength == 1) { if (resultLength == 1 && (dimension == nullptr || dimension[0] == MAX_DIMENSION)) resultScalar = 1; else resultScalar = 0; } else resultScalar = 0; if (resultLength == 1) resultScalar = 1; auto xStride = shape::stride(xShapeInfo); auto xOrder = shape::order(xShapeInfo); if (dimension != nullptr && (dimension[0] != MAX_DIMENSION && dimensionLength == 1)) { xElementWiseStride = xStride[dimension[0]]; } else { xElementWiseStride = shape::elementWiseStride(xShapeInfo); } xLength = shape::length(xShapeInfo); } __syncthreads(); if (!resultScalar) { __shared__ int tadLength; __shared__ int tadEWS; __shared__ int numTads; if (threadIdx.x == 0) { tadLength = shape::length(tadOnlyShapeInfo);//shape::tadLength(xShapeInfo, dimension, dimensionLength); tadEWS = shape::elementWiseStride(tadOnlyShapeInfo); numTads = shape::length(xShapeInfo) / tadLength; } __syncthreads(); if (tadEWS == 0) { for (int r = blockIdx.x; r < numTads; r += gridDim.x) { auto tadOffsetForBlock = tadOffsets[r]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < tadLength; i += blockDim.x) { auto xOffset = tadOffsetForBlock + shape::getIndexOffset(i, tadOnlyShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[xOffset]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[r] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); } __syncthreads(); } } else { for (int i = blockIdx.x; i < numTads; i += gridDim.x) { auto tadOffsetForBlock = tadOffsets[i]; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int x = threadIdx.x; x < tadLength; x += blockDim.x) { auto indexX = tadOffsetForBlock + x * tadEWS; SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[indexX]); sPartials[threadIdx.x] = update(sPartials[threadIdx.x], OpType::op(indexVal2, extraParams), extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), extraParams); __syncthreads(); if (threadIdx.x == 0) { z[i] = OpType::getValue(postProcessOrNot, sPartials[threadIdx.x]); //postProcess(sPartials[0],tadLength ,extraParams); } } } } else if (resultScalar) { __shared__ int n; if (threadIdx.x == 0) { xElementWiseStride = shape::elementWiseStride(xShapeInfo); n = shape::length(xShapeInfo); } __syncthreads(); if (xElementWiseStride >= 1) { for (Nd4jLong i = tid; i < n; i += (blockDim.x * gridDim.x)) { SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[i * xElementWiseStride]); reduction = update(reduction, indexVal2, extraParams); } } else { for (Nd4jLong i = tid; i < n; i += blockDim.x * gridDim.x) { auto offset = shape::getIndexOffset(i, xShapeInfo); SummaryStatsData<X> indexVal2; indexVal2.initWithValue(dx[offset]); reduction = update(reduction, indexVal2, extraParams); } } sPartials[threadIdx.x] = reduction; __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, blockDim.x, extraParams); __syncthreads(); if (gridDim.x > 1) { __shared__ bool amLast; unsigned int *tc = (unsigned int *)reductionBuffer; tid = threadIdx.x; if (threadIdx.x == 0) { SummaryStatsData<X> *pBuffer = (SummaryStatsData<X>*) reductionBuffer; pBuffer[blockIdx.x] = sPartials[0]; } __threadfence(); __syncthreads(); if (tid == 0) { unsigned int ticket = atomicInc(&tc[16384], gridDim.x); amLast = (ticket == gridDim.x - 1); } __syncthreads(); if (amLast) { tc[16384] = 0; SummaryStatsData<X>* pBuffer = (SummaryStatsData<X>*) reductionBuffer; Z startingVal = startingValue(dx); SummaryStatsData<X> val; val.initWithValue(startingVal); val.n = 0; sPartials[threadIdx.x] = val; for (int i = threadIdx.x; i < gridDim.x; i += blockDim.x) { sPartials[threadIdx.x] = update(sPartials[threadIdx.x], pBuffer[i], extraParams); } __syncthreads(); aggregatePartials<OpType>(&sPartials, threadIdx.x, gridDim.x, extraParams); __syncthreads(); if (tid == 0) { z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } else { if (tid == 0) { unsigned int *tc = (unsigned *)reductionBuffer; tc[16384] = 0; z[0] = z[0] = OpType::getValue(postProcessOrNot, sPartials[0]); } } } }; template <typename X, typename Y> _CUDA_D void SummaryStatsReduce<X,Y>::transform(const int opNum, void *dx, Nd4jLong *xShapeInfo, void *extraParams, void *z, Nd4jLong *zShapeInfo, int *dimension, int dimensionLength, int postProcessOrNot, int *allocationBuffer, void *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) { DISPATCH_BY_OPNUM_TT(transform, PARAMS(dx, xShapeInfo, extraParams, z, zShapeInfo, dimension, dimensionLength, postProcessOrNot, allocationBuffer, reductionBuffer, tadOnlyShapeInfo, tadOffsets), SUMMARY_STATS_OPS); }; template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduceScalar(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto extraParams = static_cast<Z*>(vextraParams); auto z = reinterpret_cast<Z*>(vz); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D16 opNum:[%i]\n", opNum); summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); // this is blocking method since method should return scalar nd4j::DebugHelper::checkErrorCode(stream, "execSSReduceScalar(...) failed"); } template <typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("F17 opNum:[%i]\n", opNum); auto reductionPointerA = reinterpret_cast<Z*>(reductionBuffer); summaryStatsReduceT<X,Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), nullptr, 1, 1,biasCorrected, nullptr, reductionPointerA, tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template<typename X, typename Z> _CUDA_H void SummaryStatsReduce<X,Z>::execSummaryStatsReduce(dim3& launchDims, cudaStream_t *stream, int opNum, void *vx, Nd4jLong *xShapeInfo, Nd4jLong *hxShapeInfo, void *vextraParams, void *vz, Nd4jLong *zShapeInfo, Nd4jLong *hzShapeInfo, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets, bool biasCorrected, void *reductionBuffer) { auto x = static_cast<X*>(vx); auto z = static_cast<Z*>(vz); auto extraParams = static_cast<Z*>(vextraParams); if (nd4j::Environment::getInstance()->isDebugAndVerbose()) printf("D18 opNum:[%i]\n", opNum); summaryStatsReduceT<X, Z><<<launchDims.x,launchDims.y,launchDims.z, *stream>>>( opNum, x, xShapeInfo, shape::rank(hxShapeInfo), extraParams, z, zShapeInfo, shape::rank(hzShapeInfo), dimension, dimensionLength, 1, biasCorrected, nullptr, reinterpret_cast<Z*>(reductionBuffer), tadShapeInfo, tadOffsets); DEBUG_KERNEL(stream, opNum); } template <typename X, typename Y> Y SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::execScalar(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { } template <typename X, typename Y> void SummaryStatsReduce<X,Y>::exec(int opNum, bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } template <typename X, typename Y> template<typename OpType> Y SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams) { return 0; } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::execScalar(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer) { // } template <typename X, typename Y> template<typename OpType> void SummaryStatsReduce<X,Y>::exec(bool biasCorrected, void *x, Nd4jLong *xShapeInfo, void *extraParams, void *vz, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength) { } BUILD_DOUBLE_TEMPLATE(template class ND4J_EXPORT SummaryStatsReduce, , LIBND4J_TYPES, FLOAT_TYPES); } }
05c6c99631d1f8d9be60c473375bb8f907dbb70d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/filler.hpp" #include "caffe/layers/local_conv_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" namespace caffe{ template <typename Dtype> __global__ void crop_loc_patch_kernel(int count, const Dtype *src, int src_w, int src_h, int src_c, int crop_width, int crop_height, int w_off, int h_off, Dtype *local_patch_data) { // int index = blockIdx.x * blockDim.x + threadIdx.x; CUDA_KERNEL_LOOP(index, count){ int spatial_dim = crop_width * crop_height; int channel = index / spatial_dim; int offset = index % spatial_dim; int height_out = offset / crop_width; int width_out = offset % crop_width; local_patch_data[(channel * crop_height + height_out) * crop_width + width_out] = src[(channel * src_h + (height_out + h_off)) * src_w + width_out + w_off]; } } template <typename Dtype> void LocalConvolutionLayer<Dtype>::crop_loc_patch_gpu(const Dtype *src , int src_w, int src_h, int src_c , int crop_width, int crop_height , int w_off, int h_off , Dtype *local_patch_data) { //We are going to launch channels * crop_width * crop_height kernels, each kernel responsible for //croping one element int num_kernels = src_c * crop_width * crop_height; hipLaunchKernelGGL(( crop_loc_patch_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels , src , src_w, src_h, src_c , crop_width, crop_height , w_off, h_off , local_patch_data); CUDA_POST_KERNEL_CHECK; } /* template<typename Dtype> __global__ void realign_loc_conv_result_kernel(int count, const Dtype *local_conv_data , int loc_num_h, int loc_num_w , int loc_out_h, int loc_out_w , int num_out , int dst_h, int dst_w , Dtype *dst_data) { int loc_out_spatial_dim = loc_out_h * loc_out_w; int loc_num = loc_num_h * loc_num_w; int loc_out_step = loc_out_spatial_dim * num_out; CUDA_KERNEL_LOOP(index, count){ int o_c = index / loc_out_spatial_dim; int offset = index % loc_out_spatial_dim; int loc_h = offset / loc_out_w; int loc_w = offset % loc_out_w; int loc_offset = o_c * loc_out_spatial_dim + loc_h * loc_out_w + loc_w; int dst_c_offset = o_c * dst_h * dst_w; for (int i = 0; i < loc_num; ++i){ int loc_idx_h = i / loc_num_w; int loc_idx_w = i % loc_num_w; int dst_idx = dst_c_offset + (loc_idx_h * loc_out_h + loc_h) * dst_w + loc_idx_w * loc_out_w + loc_w; int src_idx = i * loc_out_step + loc_offset; dst_data[dst_idx] = local_conv_data[src_idx]; } } } */ template<typename Dtype> __global__ void realign_loc_conv_result_kernel2(int count, const Dtype *local_conv_data , int loc_num_h, int loc_num_w , int loc_out_h, int loc_out_w , int num_out , int dst_h, int dst_w , Dtype *dst_data) { int loc_spatial_dim = loc_out_h * loc_out_w; int dst_spatial_dim = dst_h * dst_w; int loc_num = loc_num_h * loc_num_w; int loc_out_step = loc_spatial_dim * num_out; CUDA_KERNEL_LOOP(index, count){ int loc_count = index / loc_out_step; int loc_out_offset = index % loc_out_step; int loc_idx_h = loc_count / loc_num_w; int loc_idx_w = loc_count % loc_num_w; int c = loc_out_offset / loc_spatial_dim; int loc_offset = loc_out_offset % loc_spatial_dim; int loc_h = loc_offset / loc_out_w; int loc_w = loc_offset % loc_out_w; int dst_idx = c * dst_spatial_dim + (loc_idx_h * loc_out_h + loc_h) * dst_w + loc_idx_w * loc_out_w + loc_w; dst_data[dst_idx] = local_conv_data[index]; } } template <typename Dtype> void LocalConvolutionLayer<Dtype>::realign_loc_conv_result_gpu(const Dtype *local_conv_data, Dtype *dst_data) { //We are going to launch num_output * height_out * width_out kernels, each kernel responsible for //copying one local conv result per local region //int num_kernels = this->num_output_ * this->output_shape_[0] * this->output_shape_[1]; //for realign_loc_conv_result_kernel() int num_kernels = this->num_output_ * this->output_shape_[0] * this->output_shape_[1] * this->L_; //To get bigger size of Block hipLaunchKernelGGL(( realign_loc_conv_result_kernel2<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, num_kernels, local_conv_data , this->local_region_num_h_ , this->local_region_num_w_ , this->output_shape_[0], this->output_shape_[1] , this->num_output_ , this->top_height_, this->top_width_ , dst_data); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> __global__ void realign_bottoom_diff_kernel(int count, const Dtype *loc_bottom_diff , int loc_region_h, int loc_region_w , int loc_num_h, int loc_num_w , int channels , int dst_height, int dst_width , const int *loc_idx_to_off_data , Dtype *dst_data) { int loc_spatial_dim = loc_region_h * loc_region_w; int loc_num = loc_num_h * loc_num_w; int loc_step = channels * loc_spatial_dim; CUDA_KERNEL_LOOP(index, count){ int b_c = index / loc_spatial_dim; int offset = index % loc_spatial_dim; int loc_h = offset / loc_region_w; int loc_w = offset % loc_region_w; int loc_offset = b_c * loc_spatial_dim + loc_h * loc_region_w + loc_w; int dst_c_offset = b_c * dst_height * dst_width; for (int i = 0; i < loc_num; ++i){ int loc_idx_h = i / loc_num_w; int loc_idx_w = i % loc_num_w; int src_idx = loc_offset + i * loc_step; int loc_idx_to_off_index = (loc_idx_h * loc_num_w + loc_idx_w) * 2; int dst_idx = dst_c_offset + (loc_idx_to_off_data[loc_idx_to_off_index] + loc_h) * dst_width + loc_idx_to_off_data[loc_idx_to_off_index + 1] + loc_w; dst_data[dst_idx] += loc_bottom_diff[src_idx]; } } } template <typename Dtype> void LocalConvolutionLayer<Dtype>::realign_bottom_diff_gpu(const Dtype *loc_bottom_diff_buffer, Dtype *bottom_diff) { //We are going to launch channels * loc_region_h * loc_region_w kernels, each kernel responsible for //aligning one local bottom diff per local region int conv_input_h = this->conv_input_shape_.cpu_data()[1]; int conv_input_w = this->conv_input_shape_.cpu_data()[2]; int conv_input_c = this->conv_input_shape_.cpu_data()[0]; int num_kernels = conv_input_c * conv_input_h * conv_input_w; hipLaunchKernelGGL(( realign_bottoom_diff_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0, num_kernels , loc_bottom_diff_buffer , conv_input_h, conv_input_w , this->local_region_num_h_, this->local_region_num_w_ , conv_input_c , this->bottom_height_, this->bottom_width_ , this->loc_idx_to_offset_.gpu_data() , bottom_diff); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void LocalConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype *loc_bottom_data = loc_bottom_buffer_.mutable_gpu_data(); Dtype* loc_top_data = loc_top_buffer_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Blob<int> *idx_to_off = &this->loc_idx_to_offset_; const int *idx_to_off_data = idx_to_off->cpu_data(); int loc_h = this->conv_input_shape_.cpu_data()[1]; int loc_w = this->conv_input_shape_.cpu_data()[2]; for (int i = 0; i < bottom.size(); i++) { const Dtype* bottom_data = bottom[i]->gpu_data(); int bottom_w = bottom[i]->width(); int bottom_h = bottom[i]->height(); int bottom_c = bottom[i]->channels(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; n++) { const Dtype *single_bottom_data = bottom_data + bottom[i]->offset(n); for (int lh = 0; lh < local_region_num_h_; lh++){ for (int lw = 0; lw < local_region_num_w_; lw++){ int loc_num = lh * local_region_num_w_ + lw; const Dtype* loc_weight = weight + this->blobs_[0]->offset(loc_num); Dtype *loc_bottom = loc_bottom_data + loc_bottom_buffer_.offset(loc_num); Dtype *loc_top = loc_top_data + loc_top_buffer_.offset(loc_num); crop_loc_patch_gpu(single_bottom_data , bottom_w , bottom_h , bottom_c , loc_w , loc_h , idx_to_off_data[idx_to_off->offset(lh, lw, 1, 0)] , idx_to_off_data[idx_to_off->offset(lh, lw, 0, 0)] , loc_bottom); this->forward_gpu_gemm(loc_bottom, loc_weight, loc_top); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data() + this->blobs_[1]->offset(loc_num); this->forward_gpu_bias(loc_top, bias); } } } realign_loc_conv_result_gpu(loc_top_data, top_data + top[i]->offset(n)); } } } template<typename Dtype> void LocalConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype *bias_diff = (this->bias_term_ ? this->blobs_[1]->mutable_gpu_diff() : NULL); if (this->param_propagate_down_[0]) { caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } if (this->bias_term_ && this->param_propagate_down_[1]) { caffe_gpu_set(this->blobs_[1]->count(), Dtype(0) , this->blobs_[1]->mutable_gpu_diff()); } const Blob<int> *idx_to_off = &this->loc_idx_to_offset_; const int *idx_to_off_data = idx_to_off->cpu_data(); const int *idx_to_off_gpu_data = idx_to_off->gpu_data(); Dtype *loc_bottom_data_buffer = loc_bottom_buffer_.mutable_gpu_data(); Dtype *loc_bottom_diff_buffer = loc_bottom_buffer_.mutable_gpu_diff(); Dtype* loc_top_data_buffer = loc_top_buffer_.mutable_gpu_data(); Dtype* loc_top_diff_buffer = loc_top_buffer_.mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); if (propagate_down[i]){ caffe_gpu_set(bottom[i]->count(), Dtype(0), bottom_diff); } int top_w = top[i]->width(); int top_h = top[i]->height(); int top_c = top[i]->channels(); int bottom_w = bottom[i]->width(); int bottom_h = bottom[i]->height(); int bottom_c = bottom[i]->channels(); int conv_out_h = this->output_shape_[0]; int conv_out_w = this->output_shape_[1]; int conv_in_h = this->conv_input_shape_.cpu_data()[1]; int conv_in_w = this->conv_input_shape_.cpu_data()[2]; for (int n = 0; n < top[i]->num(); n++) { const Dtype *single_top_diff = top_diff + top[i]->offset(n); const Dtype *single_bottom_data = bottom_data + bottom[i]->offset(n); Dtype *single_bottom_diff = bottom_diff + bottom[i]->offset(n); for (int lh = 0; lh < local_region_num_h_; lh++) { for (int lw = 0; lw < local_region_num_w_; lw++) { int loc_num = lh * local_region_num_w_ + lw; Dtype *loc_top_diff = loc_top_diff_buffer + loc_top_buffer_.offset(loc_num); Dtype *loc_bias_diff = bias_diff + this->blobs_[1]->offset(loc_num); Dtype *loc_bottom_data = loc_bottom_data_buffer + loc_bottom_buffer_.offset(loc_num); Dtype *loc_bottom_diff = loc_bottom_diff_buffer + loc_bottom_buffer_.offset(loc_num); Dtype *loc_weight_diff = weight_diff + this->blobs_[0]->offset(loc_num); const Dtype *loc_weight = weight + this->blobs_[0]->offset(loc_num); crop_loc_patch_gpu(single_top_diff, top_w, top_h, top_c , conv_out_w , conv_out_h , lw * conv_out_w , lh * conv_out_h , loc_top_diff); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { this->backward_gpu_bias(loc_bias_diff, loc_top_diff); } if (this->param_propagate_down_[0] || propagate_down[i]) { // gradient w.r.t. weight. Note that we will accumulate diffs for n = [0, num - 1] if (this->param_propagate_down_[0]) { crop_loc_patch_gpu(single_bottom_data , bottom_w , bottom_h , bottom_c , conv_in_w , conv_in_h , idx_to_off_data[idx_to_off->offset(lh, lw, 1, 0)] , idx_to_off_data[idx_to_off->offset(lh, lw, 0, 0)] , loc_bottom_data); this->weight_gpu_gemm(loc_bottom_data, loc_top_diff, loc_weight_diff); } //gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_gpu_gemm(loc_top_diff, loc_weight, loc_bottom_diff); } } }//for (int lw = 0; lw < local_region_num_w_; lw++) }//for (int lh = 0; lh< local_region_num_h_; lh++) //realign different local regions' gradients to proper bottom location realign_bottom_diff_gpu(loc_bottom_diff_buffer, single_bottom_diff); }//for (int n = 0; n < top[i]->num(); n++) }//for (int i = 0; i < top.size(); ++i) } INSTANTIATE_LAYER_GPU_FUNCS(LocalConvolutionLayer); }//namespace caffe
05c6c99631d1f8d9be60c473375bb8f907dbb70d.cu
#include <vector> #include "caffe/filler.hpp" #include "caffe/layers/local_conv_layer.hpp" #include "caffe/util/im2col.hpp" #include "caffe/util/math_functions.hpp" namespace caffe{ template <typename Dtype> __global__ void crop_loc_patch_kernel(int count, const Dtype *src, int src_w, int src_h, int src_c, int crop_width, int crop_height, int w_off, int h_off, Dtype *local_patch_data) { // int index = blockIdx.x * blockDim.x + threadIdx.x; CUDA_KERNEL_LOOP(index, count){ int spatial_dim = crop_width * crop_height; int channel = index / spatial_dim; int offset = index % spatial_dim; int height_out = offset / crop_width; int width_out = offset % crop_width; local_patch_data[(channel * crop_height + height_out) * crop_width + width_out] = src[(channel * src_h + (height_out + h_off)) * src_w + width_out + w_off]; } } template <typename Dtype> void LocalConvolutionLayer<Dtype>::crop_loc_patch_gpu(const Dtype *src , int src_w, int src_h, int src_c , int crop_width, int crop_height , int w_off, int h_off , Dtype *local_patch_data) { //We are going to launch channels * crop_width * crop_height kernels, each kernel responsible for //croping one element int num_kernels = src_c * crop_width * crop_height; crop_loc_patch_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels , src , src_w, src_h, src_c , crop_width, crop_height , w_off, h_off , local_patch_data); CUDA_POST_KERNEL_CHECK; } /* template<typename Dtype> __global__ void realign_loc_conv_result_kernel(int count, const Dtype *local_conv_data , int loc_num_h, int loc_num_w , int loc_out_h, int loc_out_w , int num_out , int dst_h, int dst_w , Dtype *dst_data) { int loc_out_spatial_dim = loc_out_h * loc_out_w; int loc_num = loc_num_h * loc_num_w; int loc_out_step = loc_out_spatial_dim * num_out; CUDA_KERNEL_LOOP(index, count){ int o_c = index / loc_out_spatial_dim; int offset = index % loc_out_spatial_dim; int loc_h = offset / loc_out_w; int loc_w = offset % loc_out_w; int loc_offset = o_c * loc_out_spatial_dim + loc_h * loc_out_w + loc_w; int dst_c_offset = o_c * dst_h * dst_w; for (int i = 0; i < loc_num; ++i){ int loc_idx_h = i / loc_num_w; int loc_idx_w = i % loc_num_w; int dst_idx = dst_c_offset + (loc_idx_h * loc_out_h + loc_h) * dst_w + loc_idx_w * loc_out_w + loc_w; int src_idx = i * loc_out_step + loc_offset; dst_data[dst_idx] = local_conv_data[src_idx]; } } } */ template<typename Dtype> __global__ void realign_loc_conv_result_kernel2(int count, const Dtype *local_conv_data , int loc_num_h, int loc_num_w , int loc_out_h, int loc_out_w , int num_out , int dst_h, int dst_w , Dtype *dst_data) { int loc_spatial_dim = loc_out_h * loc_out_w; int dst_spatial_dim = dst_h * dst_w; int loc_num = loc_num_h * loc_num_w; int loc_out_step = loc_spatial_dim * num_out; CUDA_KERNEL_LOOP(index, count){ int loc_count = index / loc_out_step; int loc_out_offset = index % loc_out_step; int loc_idx_h = loc_count / loc_num_w; int loc_idx_w = loc_count % loc_num_w; int c = loc_out_offset / loc_spatial_dim; int loc_offset = loc_out_offset % loc_spatial_dim; int loc_h = loc_offset / loc_out_w; int loc_w = loc_offset % loc_out_w; int dst_idx = c * dst_spatial_dim + (loc_idx_h * loc_out_h + loc_h) * dst_w + loc_idx_w * loc_out_w + loc_w; dst_data[dst_idx] = local_conv_data[index]; } } template <typename Dtype> void LocalConvolutionLayer<Dtype>::realign_loc_conv_result_gpu(const Dtype *local_conv_data, Dtype *dst_data) { //We are going to launch num_output * height_out * width_out kernels, each kernel responsible for //copying one local conv result per local region //int num_kernels = this->num_output_ * this->output_shape_[0] * this->output_shape_[1]; //for realign_loc_conv_result_kernel() int num_kernels = this->num_output_ * this->output_shape_[0] * this->output_shape_[1] * this->L_; //To get bigger size of Block realign_loc_conv_result_kernel2<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS >>>( num_kernels, local_conv_data , this->local_region_num_h_ , this->local_region_num_w_ , this->output_shape_[0], this->output_shape_[1] , this->num_output_ , this->top_height_, this->top_width_ , dst_data); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> __global__ void realign_bottoom_diff_kernel(int count, const Dtype *loc_bottom_diff , int loc_region_h, int loc_region_w , int loc_num_h, int loc_num_w , int channels , int dst_height, int dst_width , const int *loc_idx_to_off_data , Dtype *dst_data) { int loc_spatial_dim = loc_region_h * loc_region_w; int loc_num = loc_num_h * loc_num_w; int loc_step = channels * loc_spatial_dim; CUDA_KERNEL_LOOP(index, count){ int b_c = index / loc_spatial_dim; int offset = index % loc_spatial_dim; int loc_h = offset / loc_region_w; int loc_w = offset % loc_region_w; int loc_offset = b_c * loc_spatial_dim + loc_h * loc_region_w + loc_w; int dst_c_offset = b_c * dst_height * dst_width; for (int i = 0; i < loc_num; ++i){ int loc_idx_h = i / loc_num_w; int loc_idx_w = i % loc_num_w; int src_idx = loc_offset + i * loc_step; int loc_idx_to_off_index = (loc_idx_h * loc_num_w + loc_idx_w) * 2; int dst_idx = dst_c_offset + (loc_idx_to_off_data[loc_idx_to_off_index] + loc_h) * dst_width + loc_idx_to_off_data[loc_idx_to_off_index + 1] + loc_w; dst_data[dst_idx] += loc_bottom_diff[src_idx]; } } } template <typename Dtype> void LocalConvolutionLayer<Dtype>::realign_bottom_diff_gpu(const Dtype *loc_bottom_diff_buffer, Dtype *bottom_diff) { //We are going to launch channels * loc_region_h * loc_region_w kernels, each kernel responsible for //aligning one local bottom diff per local region int conv_input_h = this->conv_input_shape_.cpu_data()[1]; int conv_input_w = this->conv_input_shape_.cpu_data()[2]; int conv_input_c = this->conv_input_shape_.cpu_data()[0]; int num_kernels = conv_input_c * conv_input_h * conv_input_w; realign_bottoom_diff_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS >>>( num_kernels , loc_bottom_diff_buffer , conv_input_h, conv_input_w , this->local_region_num_h_, this->local_region_num_w_ , conv_input_c , this->bottom_height_, this->bottom_width_ , this->loc_idx_to_offset_.gpu_data() , bottom_diff); CUDA_POST_KERNEL_CHECK; } template<typename Dtype> void LocalConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { Dtype *loc_bottom_data = loc_bottom_buffer_.mutable_gpu_data(); Dtype* loc_top_data = loc_top_buffer_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); const Blob<int> *idx_to_off = &this->loc_idx_to_offset_; const int *idx_to_off_data = idx_to_off->cpu_data(); int loc_h = this->conv_input_shape_.cpu_data()[1]; int loc_w = this->conv_input_shape_.cpu_data()[2]; for (int i = 0; i < bottom.size(); i++) { const Dtype* bottom_data = bottom[i]->gpu_data(); int bottom_w = bottom[i]->width(); int bottom_h = bottom[i]->height(); int bottom_c = bottom[i]->channels(); Dtype* top_data = top[i]->mutable_gpu_data(); for (int n = 0; n < this->num_; n++) { const Dtype *single_bottom_data = bottom_data + bottom[i]->offset(n); for (int lh = 0; lh < local_region_num_h_; lh++){ for (int lw = 0; lw < local_region_num_w_; lw++){ int loc_num = lh * local_region_num_w_ + lw; const Dtype* loc_weight = weight + this->blobs_[0]->offset(loc_num); Dtype *loc_bottom = loc_bottom_data + loc_bottom_buffer_.offset(loc_num); Dtype *loc_top = loc_top_data + loc_top_buffer_.offset(loc_num); crop_loc_patch_gpu(single_bottom_data , bottom_w , bottom_h , bottom_c , loc_w , loc_h , idx_to_off_data[idx_to_off->offset(lh, lw, 1, 0)] , idx_to_off_data[idx_to_off->offset(lh, lw, 0, 0)] , loc_bottom); this->forward_gpu_gemm(loc_bottom, loc_weight, loc_top); if (this->bias_term_) { const Dtype* bias = this->blobs_[1]->gpu_data() + this->blobs_[1]->offset(loc_num); this->forward_gpu_bias(loc_top, bias); } } } realign_loc_conv_result_gpu(loc_top_data, top_data + top[i]->offset(n)); } } } template<typename Dtype> void LocalConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); Dtype *bias_diff = (this->bias_term_ ? this->blobs_[1]->mutable_gpu_diff() : NULL); if (this->param_propagate_down_[0]) { caffe_gpu_set(this->blobs_[0]->count(), Dtype(0), weight_diff); } if (this->bias_term_ && this->param_propagate_down_[1]) { caffe_gpu_set(this->blobs_[1]->count(), Dtype(0) , this->blobs_[1]->mutable_gpu_diff()); } const Blob<int> *idx_to_off = &this->loc_idx_to_offset_; const int *idx_to_off_data = idx_to_off->cpu_data(); const int *idx_to_off_gpu_data = idx_to_off->gpu_data(); Dtype *loc_bottom_data_buffer = loc_bottom_buffer_.mutable_gpu_data(); Dtype *loc_bottom_diff_buffer = loc_bottom_buffer_.mutable_gpu_diff(); Dtype* loc_top_data_buffer = loc_top_buffer_.mutable_gpu_data(); Dtype* loc_top_diff_buffer = loc_top_buffer_.mutable_gpu_diff(); for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); if (propagate_down[i]){ caffe_gpu_set(bottom[i]->count(), Dtype(0), bottom_diff); } int top_w = top[i]->width(); int top_h = top[i]->height(); int top_c = top[i]->channels(); int bottom_w = bottom[i]->width(); int bottom_h = bottom[i]->height(); int bottom_c = bottom[i]->channels(); int conv_out_h = this->output_shape_[0]; int conv_out_w = this->output_shape_[1]; int conv_in_h = this->conv_input_shape_.cpu_data()[1]; int conv_in_w = this->conv_input_shape_.cpu_data()[2]; for (int n = 0; n < top[i]->num(); n++) { const Dtype *single_top_diff = top_diff + top[i]->offset(n); const Dtype *single_bottom_data = bottom_data + bottom[i]->offset(n); Dtype *single_bottom_diff = bottom_diff + bottom[i]->offset(n); for (int lh = 0; lh < local_region_num_h_; lh++) { for (int lw = 0; lw < local_region_num_w_; lw++) { int loc_num = lh * local_region_num_w_ + lw; Dtype *loc_top_diff = loc_top_diff_buffer + loc_top_buffer_.offset(loc_num); Dtype *loc_bias_diff = bias_diff + this->blobs_[1]->offset(loc_num); Dtype *loc_bottom_data = loc_bottom_data_buffer + loc_bottom_buffer_.offset(loc_num); Dtype *loc_bottom_diff = loc_bottom_diff_buffer + loc_bottom_buffer_.offset(loc_num); Dtype *loc_weight_diff = weight_diff + this->blobs_[0]->offset(loc_num); const Dtype *loc_weight = weight + this->blobs_[0]->offset(loc_num); crop_loc_patch_gpu(single_top_diff, top_w, top_h, top_c , conv_out_w , conv_out_h , lw * conv_out_w , lh * conv_out_h , loc_top_diff); // Bias gradient, if necessary. if (this->bias_term_ && this->param_propagate_down_[1]) { this->backward_gpu_bias(loc_bias_diff, loc_top_diff); } if (this->param_propagate_down_[0] || propagate_down[i]) { // gradient w.r.t. weight. Note that we will accumulate diffs for n = [0, num - 1] if (this->param_propagate_down_[0]) { crop_loc_patch_gpu(single_bottom_data , bottom_w , bottom_h , bottom_c , conv_in_w , conv_in_h , idx_to_off_data[idx_to_off->offset(lh, lw, 1, 0)] , idx_to_off_data[idx_to_off->offset(lh, lw, 0, 0)] , loc_bottom_data); this->weight_gpu_gemm(loc_bottom_data, loc_top_diff, loc_weight_diff); } //gradient w.r.t. bottom data, if necessary. if (propagate_down[i]) { this->backward_gpu_gemm(loc_top_diff, loc_weight, loc_bottom_diff); } } }//for (int lw = 0; lw < local_region_num_w_; lw++) }//for (int lh = 0; lh< local_region_num_h_; lh++) //realign different local regions' gradients to proper bottom location realign_bottom_diff_gpu(loc_bottom_diff_buffer, single_bottom_diff); }//for (int n = 0; n < top[i]->num(); n++) }//for (int i = 0; i < top.size(); ++i) } INSTANTIATE_LAYER_GPU_FUNCS(LocalConvolutionLayer); }//namespace caffe
a9598b68475ec7c8e35d89db9359252a496306fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendona Lopes This file is part of GPUMLib. GPUMLib is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "MBPkernels.h" #define OUTPUT_NEURON threadIdx.x #define OUTPUT_INCLUDING_BIAS (threadIdx.x + 1) #define NUM_OUTPUTS blockDim.x #define NEURON threadIdx.y #define NUM_NEURONS blockDim.y #define NUM_INPUTS_OUTPUT_NEURON (NUM_NEURONS + 1) #define PATTERN blockIdx.x namespace GPUMLib { KERNEL CalculateLocalGradient(cudafloat * rmsF, cudafloat * bestRMS, cudafloat maxErrorGrowth, cudafloat * outputs, cudafloat * weights, cudafloat * m, int mOffset, int totalNeuronsWithSelectiveActivation, cudafloat * localGradientNextLayer, cudafloat * localGradient, cudafloat * localGradientSpaceNet) { extern __shared__ cudafloat lg[]; if (bestRMS != nullptr) { __shared__ cudafloat rms; __shared__ cudafloat bRMS; rms = *rmsF; bRMS = *bestRMS; if (rms >= bRMS * maxErrorGrowth) return; } cudafloat * lgNextLayer = (lg + (NUM_OUTPUTS * NUM_NEURONS)); if (NEURON == 0) lgNextLayer[OUTPUT_NEURON] = localGradientNextLayer[PATTERN * NUM_OUTPUTS + OUTPUT_NEURON]; int connection = OUTPUT_NEURON * NUM_INPUTS_OUTPUT_NEURON + NEURON + 1; int threadId = (NEURON * NUM_OUTPUTS + OUTPUT_NEURON); __syncthreads(); lg[threadId] = weights[connection] * lgNextLayer[OUTPUT_NEURON]; __syncthreads(); int numberElemSum = NUM_OUTPUTS; for(int sumUpTo = (numberElemSum >> 1); numberElemSum > 1; sumUpTo = (numberElemSum >> 1)) { int nextNumberElemSum = sumUpTo; if (numberElemSum & 1) nextNumberElemSum++; if (OUTPUT_NEURON < sumUpTo) lg[threadId] += lg[threadId + nextNumberElemSum]; numberElemSum = nextNumberElemSum; __syncthreads(); } if (OUTPUT_NEURON == 0) { int n = PATTERN * NUM_NEURONS + NEURON; cudafloat Fh = outputs[n]; cudafloat lgn = lg[threadId]; if (m != nullptr) { int nSelAct = PATTERN * totalNeuronsWithSelectiveActivation + NEURON + mOffset; cudafloat M = m[nSelAct]; if (M == CUDA_VALUE(0.0)) { localGradientSpaceNet[nSelAct] = CUDA_VALUE(0.0); } else { Fh = Fh / M; localGradientSpaceNet[nSelAct] = lgn * Fh * CUDA_SIGMOID_DERIVATE(M); } lgn *= M; } localGradient[n] = lgn * CUDA_SIGMOID_DERIVATE(Fh); } } }
a9598b68475ec7c8e35d89db9359252a496306fe.cu
/* Noel Lopes is an Assistant Professor at the Polytechnic Institute of Guarda, Portugal Copyright (C) 2009, 2010, 2011, 2012 Noel de Jesus Mendonša Lopes This file is part of GPUMLib. GPUMLib is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "MBPkernels.h" #define OUTPUT_NEURON threadIdx.x #define OUTPUT_INCLUDING_BIAS (threadIdx.x + 1) #define NUM_OUTPUTS blockDim.x #define NEURON threadIdx.y #define NUM_NEURONS blockDim.y #define NUM_INPUTS_OUTPUT_NEURON (NUM_NEURONS + 1) #define PATTERN blockIdx.x namespace GPUMLib { KERNEL CalculateLocalGradient(cudafloat * rmsF, cudafloat * bestRMS, cudafloat maxErrorGrowth, cudafloat * outputs, cudafloat * weights, cudafloat * m, int mOffset, int totalNeuronsWithSelectiveActivation, cudafloat * localGradientNextLayer, cudafloat * localGradient, cudafloat * localGradientSpaceNet) { extern __shared__ cudafloat lg[]; if (bestRMS != nullptr) { __shared__ cudafloat rms; __shared__ cudafloat bRMS; rms = *rmsF; bRMS = *bestRMS; if (rms >= bRMS * maxErrorGrowth) return; } cudafloat * lgNextLayer = (lg + (NUM_OUTPUTS * NUM_NEURONS)); if (NEURON == 0) lgNextLayer[OUTPUT_NEURON] = localGradientNextLayer[PATTERN * NUM_OUTPUTS + OUTPUT_NEURON]; int connection = OUTPUT_NEURON * NUM_INPUTS_OUTPUT_NEURON + NEURON + 1; int threadId = (NEURON * NUM_OUTPUTS + OUTPUT_NEURON); __syncthreads(); lg[threadId] = weights[connection] * lgNextLayer[OUTPUT_NEURON]; __syncthreads(); int numberElemSum = NUM_OUTPUTS; for(int sumUpTo = (numberElemSum >> 1); numberElemSum > 1; sumUpTo = (numberElemSum >> 1)) { int nextNumberElemSum = sumUpTo; if (numberElemSum & 1) nextNumberElemSum++; if (OUTPUT_NEURON < sumUpTo) lg[threadId] += lg[threadId + nextNumberElemSum]; numberElemSum = nextNumberElemSum; __syncthreads(); } if (OUTPUT_NEURON == 0) { int n = PATTERN * NUM_NEURONS + NEURON; cudafloat Fh = outputs[n]; cudafloat lgn = lg[threadId]; if (m != nullptr) { int nSelAct = PATTERN * totalNeuronsWithSelectiveActivation + NEURON + mOffset; cudafloat M = m[nSelAct]; if (M == CUDA_VALUE(0.0)) { localGradientSpaceNet[nSelAct] = CUDA_VALUE(0.0); } else { Fh = Fh / M; localGradientSpaceNet[nSelAct] = lgn * Fh * CUDA_SIGMOID_DERIVATE(M); } lgn *= M; } localGradient[n] = lgn * CUDA_SIGMOID_DERIVATE(Fh); } } }
fce4564615455d40c5004bc1bff25a4782668b21.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void FloatDivByConstant(float *A, float constant) { unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x; A[i]=A[i]/constant; }
fce4564615455d40c5004bc1bff25a4782668b21.cu
#include "includes.h" __global__ void FloatDivByConstant(float *A, float constant) { unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x; A[i]=A[i]/constant; }
0d15c495ca3f83516ccd7bbb765353224dc935cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/core_io.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void convert_bf16_f32(cutlass::bfloat16_t* output, float const* input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]); } } __global__ void convert_and_pack_bf16(cutlass::bfloat16_t* output, float const* input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid * 2 < N) { cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert; cutlass::Array<cutlass::bfloat16_t, 2>* dst_ptr = reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2>*>( output + tid * 2); cutlass::Array<float, 2> const* src_ptr = reinterpret_cast<cutlass::Array<float, 2> const*>(input + tid * 2); *dst_ptr = convert(*src_ptr); } } TEST(bfloat16_t, device_conversion) { using T = cutlass::bfloat16_t; using S = float; int const N = 256; cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1}); cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1}); for (int i = 0; i < N; ++i) { source.at({i, 0}) = float(i - 128); destination.at({i, 0}) = T(0); } source.sync_device(); destination.sync_device(); hipLaunchKernelGGL(( convert_bf16_f32), dim3(dim3(1, 1)), dim3(dim3(N, 1)), 0, 0, destination.device_data(), source.device_data(), N); ASSERT_EQ(hipGetLastError(), hipSuccess) << "Kernel launch error."; destination.sync_host(); int errors = 0; for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Basic conversion error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } destination.at({i, 0}) = T(0); } destination.sync_device(); hipLaunchKernelGGL(( convert_and_pack_bf16), dim3(dim3(1, 1)), dim3(dim3(N, 1)), 0, 0, destination.device_data(), source.device_data(), N); ASSERT_EQ(hipGetLastError(), hipSuccess) << "Kernel launch error."; destination.sync_host(); for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Convert and pack error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } } EXPECT_EQ(errors, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Host // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(bfloat16_t, host_conversion) { for (int i = -128; i < 128; ++i) { float f = static_cast<float>(i); cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f); EXPECT_TRUE(static_cast<int>(x) == i); EXPECT_TRUE(static_cast<float>(y) == f); } // Try out default-ctor (zero initialization of primitive proxy type) EXPECT_TRUE(cutlass::bfloat16_t() == 0.0_bf16); // Try out user-defined literals EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16); EXPECT_TRUE(7 == static_cast<int>(7_bf16)); } TEST(bfloat16_t, host_arithmetic) { for (int i = -100; i < 100; ++i) { for (int j = -100; j < 100; ++j) { cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j); EXPECT_TRUE(static_cast<int>(x + y) == (i + j)); } } } TEST(bfloat16_t, host_round) { struct { uint32_t f32_bits; uint16_t expected; } tests[] = {{0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz {0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz {0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf {0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf {0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf {0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz {0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz {0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf {0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf {0x7f800000, 0x7f80}, // +inf {0xff800000, 0xff80}, // -inf {0x7fffffff, 0x7fff}, // canonical NaN {0x7ff00001, 0x7fff}, // NaN -> canonical NaN {0xfff00010, 0x7fff}, // Nan -> canonical NaN {0, 0}}; bool running = true; for (int i = 0; running; ++i) { float f32 = reinterpret_cast<float const&>(tests[i].f32_bits); cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32); bool passed = (tests[i].expected == bf16.raw()); EXPECT_TRUE(passed) << "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits << ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x" << std::hex << bf16.raw(); if (!tests[i].f32_bits) { running = false; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Device // /////////////////////////////////////////////////////////////////////////////////////////////////
0d15c495ca3f83516ccd7bbb765353224dc935cc.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Statically sized array of elements that accommodates all CUTLASS-supported numeric types and is safe to use in a union. */ #include "../common/cutlass_unit_test.h" #include "cutlass/array.h" #include "cutlass/core_io.h" #include "cutlass/numeric_types.h" #include "cutlass/numeric_conversion.h" #include "cutlass/layout/matrix.h" #include "cutlass/util/device_memory.h" #include "cutlass/util/host_tensor.h" ///////////////////////////////////////////////////////////////////////////////////////////////// __global__ void convert_bf16_f32(cutlass::bfloat16_t* output, float const* input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { output[tid] = static_cast<cutlass::bfloat16_t>(input[tid]); } } __global__ void convert_and_pack_bf16(cutlass::bfloat16_t* output, float const* input, int N) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid * 2 < N) { cutlass::NumericArrayConverter<cutlass::bfloat16_t, float, 2> convert; cutlass::Array<cutlass::bfloat16_t, 2>* dst_ptr = reinterpret_cast<cutlass::Array<cutlass::bfloat16_t, 2>*>( output + tid * 2); cutlass::Array<float, 2> const* src_ptr = reinterpret_cast<cutlass::Array<float, 2> const*>(input + tid * 2); *dst_ptr = convert(*src_ptr); } } TEST(bfloat16_t, device_conversion) { using T = cutlass::bfloat16_t; using S = float; int const N = 256; cutlass::HostTensor<T, cutlass::layout::RowMajor> destination({N, 1}); cutlass::HostTensor<S, cutlass::layout::RowMajor> source({N, 1}); for (int i = 0; i < N; ++i) { source.at({i, 0}) = float(i - 128); destination.at({i, 0}) = T(0); } source.sync_device(); destination.sync_device(); convert_bf16_f32<<<dim3(1, 1), dim3(N, 1)>>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); int errors = 0; for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Basic conversion error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } destination.at({i, 0}) = T(0); } destination.sync_device(); convert_and_pack_bf16<<<dim3(1, 1), dim3(N, 1)>>>(destination.device_data(), source.device_data(), N); ASSERT_EQ(cudaGetLastError(), cudaSuccess) << "Kernel launch error."; destination.sync_host(); for (int i = 0; i < N; ++i) { T got = destination.at({i, 0}); S expected = source.at({i, 0}); if (S(got) != expected) { ++errors; if (errors < 10) { std::cerr << "Convert and pack error - [" << i << "] - got " << got << ", expected " << expected << "\n"; } } } EXPECT_EQ(errors, 0); } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Host // ///////////////////////////////////////////////////////////////////////////////////////////////// TEST(bfloat16_t, host_conversion) { for (int i = -128; i < 128; ++i) { float f = static_cast<float>(i); cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(f); EXPECT_TRUE(static_cast<int>(x) == i); EXPECT_TRUE(static_cast<float>(y) == f); } // Try out default-ctor (zero initialization of primitive proxy type) EXPECT_TRUE(cutlass::bfloat16_t() == 0.0_bf16); // Try out user-defined literals EXPECT_TRUE(cutlass::bfloat16_t(7) == 7_bf16); EXPECT_TRUE(7 == static_cast<int>(7_bf16)); } TEST(bfloat16_t, host_arithmetic) { for (int i = -100; i < 100; ++i) { for (int j = -100; j < 100; ++j) { cutlass::bfloat16_t x = static_cast<cutlass::bfloat16_t>(i); cutlass::bfloat16_t y = static_cast<cutlass::bfloat16_t>(j); EXPECT_TRUE(static_cast<int>(x + y) == (i + j)); } } } TEST(bfloat16_t, host_round) { struct { uint32_t f32_bits; uint16_t expected; } tests[] = {{0x40040000, 0x4004}, // M=0, R=0, S=0 => rtz {0x40048000, 0x4004}, // M=0, R=1, S=0 => rtz {0x40040001, 0x4004}, // M=0, R=1, S=1 => +inf {0x4004c000, 0x4005}, // M=0, R=1, S=1 => +inf {0x4004a000, 0x4005}, // M=0, R=1, S=1 => +inf {0x40050000, 0x4005}, // M=1, R=0, S=0 => rtz {0x40054000, 0x4005}, // M=1, R=0, S=1 => rtz {0x40058000, 0x4006}, // M=1, R=1, S=0 => +inf {0x40058001, 0x4006}, // M=1, R=1, S=1 => +inf {0x7f800000, 0x7f80}, // +inf {0xff800000, 0xff80}, // -inf {0x7fffffff, 0x7fff}, // canonical NaN {0x7ff00001, 0x7fff}, // NaN -> canonical NaN {0xfff00010, 0x7fff}, // Nan -> canonical NaN {0, 0}}; bool running = true; for (int i = 0; running; ++i) { float f32 = reinterpret_cast<float const&>(tests[i].f32_bits); cutlass::bfloat16_t bf16 = cutlass::bfloat16_t(f32); bool passed = (tests[i].expected == bf16.raw()); EXPECT_TRUE(passed) << "Error - convert(f32: 0x" << std::hex << tests[i].f32_bits << ") -> 0x" << std::hex << tests[i].expected << "\ngot: 0x" << std::hex << bf16.raw(); if (!tests[i].f32_bits) { running = false; } } } ///////////////////////////////////////////////////////////////////////////////////////////////// // // Device // /////////////////////////////////////////////////////////////////////////////////////////////////
f021410fdc0658e71cbed5518c2df1053d681c3b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void multiplyBy2o(int *size, const long *in, long *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < *size) { out[ix] = in[ix] * 2; } }
f021410fdc0658e71cbed5518c2df1053d681c3b.cu
#include "includes.h" __global__ void multiplyBy2o(int *size, const long *in, long *out) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; if (ix < *size) { out[ix] = in[ix] * 2; } }
024b075b0bda4ac34cc487e386490e76c1c033c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #define N 30 #define BLOCK_SIZE 8 #define RADIUS 3 __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronizacja wtkw (bariera) - sprawd co sie stanie bez tego __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } void random (int *tab, int wym ) { int i; for(i=0;i<wym;i++) tab[i]=rand()%101; } int main(void) { int *in, *out; // host copies of in, out; int *d_in, *d_out; // device copies of in and out int size = N * sizeof(int); int i; srand(time(NULL)); // Allocate space for device copies of a, b, c hipMalloc((void **)&d_in, size); hipMalloc((void **)&d_out, size); // Alloc space for host copies of a, b, c and setup input values in = (int *)malloc(size); random(in, N); out = (int *)malloc(size); // Copy inputs to device hipMemcpy(d_in, in, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( stencil_1d), dim3((N+BLOCK_SIZE-1)/BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, d_in, d_out); // Copy result back to host hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); for(i=0;i<N;i++) { printf("in[%d](%d) = out[%d](%d)\n",i,in[i],i,out[i]); } // Cleanup //printf("%d+%d=%d\n",a,b,c); free(in); free(out); hipFree(d_in); hipFree(d_out); return 0; }
024b075b0bda4ac34cc487e386490e76c1c033c7.cu
#include<stdio.h> #define N 30 #define BLOCK_SIZE 8 #define RADIUS 3 __global__ void stencil_1d(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x; int lindex = threadIdx.x + RADIUS; // Read input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronizacja wątków (bariera) - sprawdź co sie stanie bez tego __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS ; offset <= RADIUS ; offset++) result += temp[lindex + offset]; // Store the result out[gindex] = result; } void random (int *tab, int wym ) { int i; for(i=0;i<wym;i++) tab[i]=rand()%101; } int main(void) { int *in, *out; // host copies of in, out; int *d_in, *d_out; // device copies of in and out int size = N * sizeof(int); int i; srand(time(NULL)); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, size); // Alloc space for host copies of a, b, c and setup input values in = (int *)malloc(size); random(in, N); out = (int *)malloc(size); // Copy inputs to device cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU stencil_1d<<<(N+BLOCK_SIZE-1)/BLOCK_SIZE,BLOCK_SIZE>>>(d_in, d_out); // Copy result back to host cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); for(i=0;i<N;i++) { printf("in[%d](%d) = out[%d](%d)\n",i,in[i],i,out[i]); } // Cleanup //printf("%d+%d=%d\n",a,b,c); free(in); free(out); cudaFree(d_in); cudaFree(d_out); return 0; }
1dcbe6ac818df6f57c7c775cc5ca4dceb20e884f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm50(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm70(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm75(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm80(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_rf_sm50(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_rf_sm70(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_rf_sm75(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x128_rf_sm80(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x128_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm50(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm70(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm75(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm80(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
1dcbe6ac818df6f57c7c775cc5ca4dceb20e884f.cu
/* * Copyright (c) Meta Platforms, Inc. and affiliates. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ // This file is auto-generated. See "generate_kernels.py" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h> using namespace PyTorchMemEffAttention; __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm50(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm70(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm75(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x64_rf_sm80(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 64, 64, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x64_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_rf_sm50(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_rf_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_rf_sm70(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_rf_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_rf_sm75(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_rf_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_64x128_rf_sm80(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 64, 128, 128, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_64x128_rf_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm50(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 500 #if __CUDA_ARCH__ < 700 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm50, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm50` is for sm50-sm70, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm70(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 700 #if __CUDA_ARCH__ < 750 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm70, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm70` is for sm70-sm75, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm75(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 750 #if __CUDA_ARCH__ < 800 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm75, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm75` is for sm75-sm80, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif } __global__ void __launch_bounds__( AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kNumThreads, AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::kMinBlocksPerSm) fmha_cutlassF_f16_aligned_32x128_gmem_sm80(typename AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::Params p) { #ifdef __CUDA_ARCH__ #if __CUDA_ARCH__ >= 800 #if __CUDA_ARCH__ < 1000 if (!p.advance_to_block()) { return; } AttentionKernel<cutlass::half_t, cutlass::arch::Sm80, true, 32, 128, 65536, true, true>::attention_kernel(p); return; #endif #endif printf( "FATAL: kernel `fmha_cutlassF_f16_aligned_32x128_gmem_sm80` is for sm80-sm100, but was built for sm%d\n", int(__CUDA_ARCH__ + 0) / 10); #endif }
9e494c2ece1f050c29531117a18ff0677c1484f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { // threadIdx.x contains the index of the current thread within its block, // blockDim.x contains the number of threads in the block int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU hipMallocManaged(&x, N * sizeof(float)); hipMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU // Execution configuration // CUDA GPUs run kernels using blocks of threads that are a multiple of 32 in size int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add << < numBlocks, blockSize >> > (N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
9e494c2ece1f050c29531117a18ff0677c1484f3.cu
#include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { // threadIdx.x contains the index of the current thread within its block, // blockDim.x contains the number of threads in the block int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1 << 20; float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU // Execution configuration // CUDA GPUs run kernels using blocks of threads that are a multiple of 32 in size int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add << < numBlocks, blockSize >> > (N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
560620c2a7f64a86cbe2bbca24024b80bf7c4ea2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Program to solve Laplace equation on a regular 3D grid // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> //#include <cutil.h> // define kernel block size #define BLOCK_X 32 #define BLOCK_Y 4 // include kernel function #include "laplace3d_kernel.cu" // declaration, forward extern "C" void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2); void printHelp(void); #define CUDA_SAFE_CALL_NO_SYNC(call) { \ hipError_t err = call; \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, hipGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } #define CUDA_SAFE_CALL(call) CUDA_SAFE_CALL_NO_SYNC(call); #define CUT_CHECK_ERROR(errorMessage) { \ hipError_t err = hipGetLastError(); \ if( hipSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, hipGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ } // Main program int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space int NX, NY, NZ, REPEAT, bx, by, i, j, k, ind, pitch; size_t pitch_bytes; float *h_u1, *h_u2, *h_u3, *h_foo, err; // unsigned int hTimer; // 'd_' prefix - GPU (device) memory space float *d_u1, *d_u2, *d_foo; // check command line inputs /* if(cutCheckCmdLineFlag( argc, (const char**)argv, "help")) { printHelp(); return 1; } if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nx", &NX) ) { if( NX <= 99 ) { printf("Illegal argument - nx must be greater than 99\n"); return -1; } } else NX = 100; if( cutGetCmdLineArgumenti( argc, (const char**)argv, "ny", &NY) ) { if( NY <= 99 ) { printf("Illegal argument - ny must be greater than 99\n"); return -1; } } else NY = 100; if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nz", &NZ) ) { if( NZ <= 99 ) { printf("Illegal argument - nz must be greater than 99\n"); return -1; } } else NZ = 100; if( cutGetCmdLineArgumenti( argc, (const char**)argv, "repeat", &REPEAT) ) { if( REPEAT <= 0 ) { printf("Illegal argument - repeat must be greater than zero\n"); return -1; } } else REPEAT = 1; */ NX = 100; NY = 100; NZ = 100; REPEAT = 1; printf("[BENCH] Grid dimensions: %d x %d x %d\n", NX, NY, NZ); // initialise card and timer /* int deviceCount; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; CUDA_SAFE_CALL_NO_SYNC(hipGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(hipSetDevice(dev)); */ // CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); // allocate memory for arrays h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ); CUDA_SAFE_CALL( hipMallocPitch((void **)&d_u1, &pitch_bytes, sizeof(float)*NX, NY*NZ) ); CUDA_SAFE_CALL( hipMallocPitch((void **)&d_u2, &pitch_bytes, sizeof(float)*NX, NY*NZ) ); pitch = pitch_bytes/sizeof(float); // initialise u1 for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1) h_u1[ind] = 1.0f; // Dirichlet b.c.'s else h_u1[ind] = 0.0f; } } } // copy u1 to device // CUT_SAFE_CALL(cutStartTimer(hTimer)); CUDA_SAFE_CALL( hipMemcpy2D(d_u1, pitch_bytes, h_u1, sizeof(float)*NX, sizeof(float)*NX, NY*NZ, hipMemcpyHostToDevice) ); CUDA_SAFE_CALL( hipDeviceSynchronize() ); // CUT_SAFE_CALL(cutStopTimer(hTimer)); // printf("\nCopy u1 to device: %f (ms) \n", cutGetTimerValue(hTimer)); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // Set up the execution configuration bx = 1 + (NX-1)/BLOCK_X; by = 1 + (NY-1)/BLOCK_Y; dim3 dimGrid(bx,by); dim3 dimBlock(BLOCK_X,BLOCK_Y); printf("[BENCH] dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z); printf("[BENCH] dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z); // Execute GPU kernel CUDA_SAFE_CALL( hipDeviceSynchronize() ); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // CUT_SAFE_CALL( cutStartTimer(hTimer) ); for (i = 1; i <= REPEAT; ++i) { hipLaunchKernelGGL(( GPU_laplace3d), dim3(dimGrid), dim3(dimBlock), 0, 0, NX, NY, NZ, pitch, d_u1, d_u2); d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u3 CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_CHECK_ERROR("GPU_laplace3d execution failed\n"); } // CUT_SAFE_CALL( cutStopTimer(hTimer) ); // printf("\n%dx GPU_laplace3d: %f (ms) \n", REPEAT, cutGetTimerValue(hTimer)); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // Read back GPU results // CUT_SAFE_CALL( cutStartTimer(hTimer) ); CUDA_SAFE_CALL( hipMemcpy2D(h_u2, sizeof(float)*NX, d_u1, pitch_bytes, sizeof(float)*NX, NY*NZ, hipMemcpyDeviceToHost) ); // CUT_SAFE_CALL( cutStopTimer(hTimer) ); // printf("\nCopy u2 to host: %f (ms) \n", cutGetTimerValue(hTimer)); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // print out corner of array /* for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u2[ind]); } printf("\n"); } printf("\n"); } */ // Gold treatment // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // CUT_SAFE_CALL( cutStartTimer(hTimer) ); for (int i = 1; i <= REPEAT; ++i) { Gold_laplace3d(NX, NY, NZ, h_u1, h_u3); h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3 } // CUT_SAFE_CALL( cutStopTimer(hTimer) ); // printf("\n%dx Gold_laplace3d: %f (ms) \n \n", REPEAT, cutGetTimerValue(hTimer)); // print out corner of array /* for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u1[ind]); } printf("\n"); } printf("\n"); } */ // error check err = 0.0; for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]); } } } printf("[BENCH] rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ))); // Release GPU and CPU memory // printf("CUDA_SAFE_CALL( hipFree(d_u1) );\n"); // fflush(stdout); CUDA_SAFE_CALL( hipFree(d_u1) ); // printf("CUDA_SAFE_CALL( hipFree(d_u2) );\n"); // fflush(stdout); CUDA_SAFE_CALL( hipFree(d_u2) ); // printf("free(h_u1);\n"); // fflush(stdout); free(h_u1); // printf("free(h_u2);\n"); // fflush(stdout); free(h_u2); // printf("free(h_u3);\n"); // fflush(stdout); free(h_u3); // CUT_SAFE_CALL( cutDeleteTimer(hTimer) ); // CUT_EXIT(argc, argv); } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { printf("Usage: laplace3d [OPTION]...\n"); printf("6-point stencil 3D Laplace test \n"); printf("\n"); printf("Example: run 100 iterations on a 256x128x128 grid\n"); printf("./laplace3d --nx=256 --ny=128 --nz=128 --repeat=100\n"); printf("\n"); printf("Options:\n"); printf("--help\t\t\tDisplay this help menu\n"); printf("--nx=[SIZE]\t\tGrid width\n"); printf("--ny=[SIZE]\t\tGrid height\n"); printf("--nz=[SIZE]\t\tGrid depth\n"); printf("--repeat=[COUNT]\tNumber of repetitions\n"); }
560620c2a7f64a86cbe2bbca24024b80bf7c4ea2.cu
// // Program to solve Laplace equation on a regular 3D grid // #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> //#include <cutil.h> // define kernel block size #define BLOCK_X 32 #define BLOCK_Y 4 // include kernel function #include "laplace3d_kernel.cu" // declaration, forward extern "C" void Gold_laplace3d(int NX, int NY, int NZ, float* h_u1, float* h_u2); void printHelp(void); #define CUDA_SAFE_CALL_NO_SYNC(call) { \ cudaError err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } } #define CUDA_SAFE_CALL(call) CUDA_SAFE_CALL_NO_SYNC(call); #define CUT_CHECK_ERROR(errorMessage) { \ cudaError_t err = cudaGetLastError(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) );\ exit(EXIT_FAILURE); \ } \ } // Main program int main(int argc, char **argv){ // 'h_' prefix - CPU (host) memory space int NX, NY, NZ, REPEAT, bx, by, i, j, k, ind, pitch; size_t pitch_bytes; float *h_u1, *h_u2, *h_u3, *h_foo, err; // unsigned int hTimer; // 'd_' prefix - GPU (device) memory space float *d_u1, *d_u2, *d_foo; // check command line inputs /* if(cutCheckCmdLineFlag( argc, (const char**)argv, "help")) { printHelp(); return 1; } if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nx", &NX) ) { if( NX <= 99 ) { printf("Illegal argument - nx must be greater than 99\n"); return -1; } } else NX = 100; if( cutGetCmdLineArgumenti( argc, (const char**)argv, "ny", &NY) ) { if( NY <= 99 ) { printf("Illegal argument - ny must be greater than 99\n"); return -1; } } else NY = 100; if( cutGetCmdLineArgumenti( argc, (const char**)argv, "nz", &NZ) ) { if( NZ <= 99 ) { printf("Illegal argument - nz must be greater than 99\n"); return -1; } } else NZ = 100; if( cutGetCmdLineArgumenti( argc, (const char**)argv, "repeat", &REPEAT) ) { if( REPEAT <= 0 ) { printf("Illegal argument - repeat must be greater than zero\n"); return -1; } } else REPEAT = 1; */ NX = 100; NY = 100; NZ = 100; REPEAT = 1; printf("[BENCH] Grid dimensions: %d x %d x %d\n", NX, NY, NZ); // initialise card and timer /* int deviceCount; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { fprintf(stderr, "There is no device.\n"); exit(EXIT_FAILURE); } int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL_NO_SYNC(cudaGetDeviceProperties(&deviceProp, dev)); if (deviceProp.major >= 1) break; } if (dev == deviceCount) { fprintf(stderr, "There is no device supporting CUDA.\n"); exit(EXIT_FAILURE); } else CUDA_SAFE_CALL(cudaSetDevice(dev)); */ // CUT_SAFE_CALL( cutCreateTimer(&hTimer) ); // allocate memory for arrays h_u1 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_u2 = (float *)malloc(sizeof(float)*NX*NY*NZ); h_u3 = (float *)malloc(sizeof(float)*NX*NY*NZ); CUDA_SAFE_CALL( cudaMallocPitch((void **)&d_u1, &pitch_bytes, sizeof(float)*NX, NY*NZ) ); CUDA_SAFE_CALL( cudaMallocPitch((void **)&d_u2, &pitch_bytes, sizeof(float)*NX, NY*NZ) ); pitch = pitch_bytes/sizeof(float); // initialise u1 for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; if (i==0 || i==NX-1 || j==0 || j==NY-1|| k==0 || k==NZ-1) h_u1[ind] = 1.0f; // Dirichlet b.c.'s else h_u1[ind] = 0.0f; } } } // copy u1 to device // CUT_SAFE_CALL(cutStartTimer(hTimer)); CUDA_SAFE_CALL( cudaMemcpy2D(d_u1, pitch_bytes, h_u1, sizeof(float)*NX, sizeof(float)*NX, NY*NZ, cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL( cudaThreadSynchronize() ); // CUT_SAFE_CALL(cutStopTimer(hTimer)); // printf("\nCopy u1 to device: %f (ms) \n", cutGetTimerValue(hTimer)); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // Set up the execution configuration bx = 1 + (NX-1)/BLOCK_X; by = 1 + (NY-1)/BLOCK_Y; dim3 dimGrid(bx,by); dim3 dimBlock(BLOCK_X,BLOCK_Y); printf("[BENCH] dimGrid = %d %d %d \n",dimGrid.x,dimGrid.y,dimGrid.z); printf("[BENCH] dimBlock = %d %d %d \n",dimBlock.x,dimBlock.y,dimBlock.z); // Execute GPU kernel CUDA_SAFE_CALL( cudaThreadSynchronize() ); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // CUT_SAFE_CALL( cutStartTimer(hTimer) ); for (i = 1; i <= REPEAT; ++i) { GPU_laplace3d<<<dimGrid, dimBlock>>>(NX, NY, NZ, pitch, d_u1, d_u2); d_foo = d_u1; d_u1 = d_u2; d_u2 = d_foo; // swap d_u1 and d_u3 CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_CHECK_ERROR("GPU_laplace3d execution failed\n"); } // CUT_SAFE_CALL( cutStopTimer(hTimer) ); // printf("\n%dx GPU_laplace3d: %f (ms) \n", REPEAT, cutGetTimerValue(hTimer)); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // Read back GPU results // CUT_SAFE_CALL( cutStartTimer(hTimer) ); CUDA_SAFE_CALL( cudaMemcpy2D(h_u2, sizeof(float)*NX, d_u1, pitch_bytes, sizeof(float)*NX, NY*NZ, cudaMemcpyDeviceToHost) ); // CUT_SAFE_CALL( cutStopTimer(hTimer) ); // printf("\nCopy u2 to host: %f (ms) \n", cutGetTimerValue(hTimer)); // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // print out corner of array /* for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u2[ind]); } printf("\n"); } printf("\n"); } */ // Gold treatment // CUT_SAFE_CALL( cutResetTimer(hTimer) ); // CUT_SAFE_CALL( cutStartTimer(hTimer) ); for (int i = 1; i <= REPEAT; ++i) { Gold_laplace3d(NX, NY, NZ, h_u1, h_u3); h_foo = h_u1; h_u1 = h_u3; h_u3 = h_foo; // swap h_u1 and h_u3 } // CUT_SAFE_CALL( cutStopTimer(hTimer) ); // printf("\n%dx Gold_laplace3d: %f (ms) \n \n", REPEAT, cutGetTimerValue(hTimer)); // print out corner of array /* for (k=0; k<3; k++) { for (j=0; j<8; j++) { for (i=0; i<8; i++) { ind = i + j*NX + k*NX*NY; printf(" %5.2f ", h_u1[ind]); } printf("\n"); } printf("\n"); } */ // error check err = 0.0; for (k=0; k<NZ; k++) { for (j=0; j<NY; j++) { for (i=0; i<NX; i++) { ind = i + j*NX + k*NX*NY; err += (h_u1[ind]-h_u2[ind])*(h_u1[ind]-h_u2[ind]); } } } printf("[BENCH] rms error = %f \n",sqrt(err/ (float)(NX*NY*NZ))); // Release GPU and CPU memory // printf("CUDA_SAFE_CALL( cudaFree(d_u1) );\n"); // fflush(stdout); CUDA_SAFE_CALL( cudaFree(d_u1) ); // printf("CUDA_SAFE_CALL( cudaFree(d_u2) );\n"); // fflush(stdout); CUDA_SAFE_CALL( cudaFree(d_u2) ); // printf("free(h_u1);\n"); // fflush(stdout); free(h_u1); // printf("free(h_u2);\n"); // fflush(stdout); free(h_u2); // printf("free(h_u3);\n"); // fflush(stdout); free(h_u3); // CUT_SAFE_CALL( cutDeleteTimer(hTimer) ); // CUT_EXIT(argc, argv); } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { printf("Usage: laplace3d [OPTION]...\n"); printf("6-point stencil 3D Laplace test \n"); printf("\n"); printf("Example: run 100 iterations on a 256x128x128 grid\n"); printf("./laplace3d --nx=256 --ny=128 --nz=128 --repeat=100\n"); printf("\n"); printf("Options:\n"); printf("--help\t\t\tDisplay this help menu\n"); printf("--nx=[SIZE]\t\tGrid width\n"); printf("--ny=[SIZE]\t\tGrid height\n"); printf("--nz=[SIZE]\t\tGrid depth\n"); printf("--repeat=[COUNT]\tNumber of repetitions\n"); }
c3cc3796447f00b3e315273171485c97f1a55cfb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void GetThreadId( unsigned int *block, unsigned int *thread, unsigned int *warp, unsigned int *calc_thread, unsigned int *clocks) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; block[thread_id] = blockIdx.x; thread[thread_id] = threadIdx.x; warp[thread_id] = threadIdx.x / warpSize; calc_thread[thread_id] = thread_id; clocks[thread_id] = clock(); } #define ArraySize Arraysize * sizeof(unsigned int) int warp_main(int argc, char* argv[]) { unsigned int block_nums, thread_nums; printf("Input blocknums and threadnums\n"); scanf("%d%d", &block_nums, &thread_nums); const unsigned int Arraysize = block_nums * thread_nums; unsigned int *gpu_block, *gpu_thread, *gpu_warp, *gpu_calc, *gpu_clock; hipMalloc((void**)&gpu_block, ArraySize); hipMalloc((void**)&gpu_thread, ArraySize); hipMalloc((void**)&gpu_warp, ArraySize); hipMalloc((void**)&gpu_calc, ArraySize); hipMalloc((void**)&gpu_clock, ArraySize); GetThreadId << <block_nums, thread_nums >> >(gpu_block, gpu_thread, gpu_warp, gpu_calc, gpu_clock); unsigned int* cpu_block = new unsigned int[Arraysize]; unsigned int* cpu_thread = new unsigned int[Arraysize]; unsigned int* cpu_warp = new unsigned int[Arraysize]; unsigned int* cpu_calc = new unsigned int[Arraysize]; unsigned int* cpu_clock = new unsigned int[Arraysize]; hipMemcpy(cpu_block, gpu_block, ArraySize, hipMemcpyDeviceToHost); hipMemcpy(cpu_thread, gpu_thread, ArraySize, hipMemcpyDeviceToHost); hipMemcpy(cpu_warp, gpu_warp, ArraySize, hipMemcpyDeviceToHost); hipMemcpy(cpu_calc, gpu_calc, ArraySize, hipMemcpyDeviceToHost); hipMemcpy(cpu_clock, gpu_clock, ArraySize, hipMemcpyDeviceToHost); hipFree(gpu_block); hipFree(gpu_thread); hipFree(gpu_warp); hipFree(gpu_calc); hipFree(gpu_clock); for (int i = 0; i<Arraysize; i++) { // output the paramters, ps:cpu_clock[i] - cpu_clock[0] is represented of the difference between the run time of each thread and start thread,it is easy to calculate and observe. printf("Calculated Thread: %3u- Block: %3u- Warp: %3u- Thread: %3u- Time: %3u\n", cpu_calc[i], cpu_block[i], cpu_warp[i], cpu_thread[i], cpu_clock[i] - cpu_clock[(i / thread_nums) * thread_nums]); } delete cpu_block; delete cpu_thread; delete cpu_warp; delete cpu_calc; delete cpu_clock; system("pause"); return 0; }
c3cc3796447f00b3e315273171485c97f1a55cfb.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void GetThreadId( unsigned int *block, unsigned int *thread, unsigned int *warp, unsigned int *calc_thread, unsigned int *clocks) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; block[thread_id] = blockIdx.x; thread[thread_id] = threadIdx.x; warp[thread_id] = threadIdx.x / warpSize; calc_thread[thread_id] = thread_id; clocks[thread_id] = clock(); } #define ArraySize Arraysize * sizeof(unsigned int) int warp_main(int argc, char* argv[]) { unsigned int block_nums, thread_nums; printf("Input blocknums and threadnums\n"); scanf("%d%d", &block_nums, &thread_nums); const unsigned int Arraysize = block_nums * thread_nums; unsigned int *gpu_block, *gpu_thread, *gpu_warp, *gpu_calc, *gpu_clock; cudaMalloc((void**)&gpu_block, ArraySize); cudaMalloc((void**)&gpu_thread, ArraySize); cudaMalloc((void**)&gpu_warp, ArraySize); cudaMalloc((void**)&gpu_calc, ArraySize); cudaMalloc((void**)&gpu_clock, ArraySize); GetThreadId << <block_nums, thread_nums >> >(gpu_block, gpu_thread, gpu_warp, gpu_calc, gpu_clock); unsigned int* cpu_block = new unsigned int[Arraysize]; unsigned int* cpu_thread = new unsigned int[Arraysize]; unsigned int* cpu_warp = new unsigned int[Arraysize]; unsigned int* cpu_calc = new unsigned int[Arraysize]; unsigned int* cpu_clock = new unsigned int[Arraysize]; cudaMemcpy(cpu_block, gpu_block, ArraySize, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_thread, gpu_thread, ArraySize, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_warp, gpu_warp, ArraySize, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_calc, gpu_calc, ArraySize, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_clock, gpu_clock, ArraySize, cudaMemcpyDeviceToHost); cudaFree(gpu_block); cudaFree(gpu_thread); cudaFree(gpu_warp); cudaFree(gpu_calc); cudaFree(gpu_clock); for (int i = 0; i<Arraysize; i++) { // output the paramters, ps:cpu_clock[i] - cpu_clock[0] is represented of the difference between the run time of each thread and start thread,it is easy to calculate and observe. printf("Calculated Thread: %3u- Block: %3u- Warp: %3u- Thread: %3u- Time: %3u\n", cpu_calc[i], cpu_block[i], cpu_warp[i], cpu_thread[i], cpu_clock[i] - cpu_clock[(i / thread_nums) * thread_nums]); } delete cpu_block; delete cpu_thread; delete cpu_warp; delete cpu_calc; delete cpu_clock; system("pause"); return 0; }
2b503c798bf207c3f642a09bf04a93a738b6c63a.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "xflow/ops.h" #include "xflow/cuda_helper.h" using namespace XFlow; void Matmul::map(void) { // create descriptors checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); helperSetTensorDescriptor(outputs[0], outputTensor); if (activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } // allocate tensors size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(hipMalloc(&outputs[0].data_ptr, outputSize)); } void Matmul::unmap(void) { checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor)); if (activation != AC_MODE_NONE) { checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc)); } checkCUDA(hipFree(outputs[0].data_ptr)); } void Matmul::forward(bool block) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = outputs[0].numDim; int m = inputs[0].dim[numDim-2]; int n = inputs[1].dim[numDim-1]; int k = inputs[0].dim[numDim-1]; hipblasOperation_t transA, transB; int lda, ldb, ldc; if (inputs[0].stride[numDim-2] == 1) { transA = HIPBLAS_OP_N; lda = inputs[0].stride[numDim-1]; } else { assert(inputs[0].stride[numDim-1] == 1); transA = HIPBLAS_OP_T; lda = inputs[0].stride[numDim-2]; } if (inputs[1].stride[numDim-2] == 1) { transB = HIPBLAS_OP_N; ldb = inputs[1].stride[numDim-1]; } else { assert(inputs[1].stride[numDim-1] == 1); transB = HIPBLAS_OP_T; ldb = inputs[1].stride[numDim-2]; } ldc = outputs[0].stride[numDim-1]; if (numDim == 2) { // Normal 2D Matmul checkCUDA(hipblasSgemm(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, (float*)inputs[1].data_ptr, ldb, &beta, (float*)outputs[0].data_ptr, ldc)); } else { // Batched Matmul int strideA = inputs[0].stride[numDim-3]; int strideB = inputs[1].stride[numDim-3]; int strideC = outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= outputs[0].dim[i]; checkCUDA(hipblasSgemmStridedBatched(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, strideA, (float*)inputs[1].data_ptr, ldb, strideB, &beta, (float*)outputs[0].data_ptr, ldc, strideC, batch)); } if (activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(model->dnn, actiDesc, &alpha, outputTensor, outputs[0].data_ptr, &beta, outputTensor, outputs[0].data_ptr)); if (block) checkCUDA(hipDeviceSynchronize()); } void Model::measure_matmul_cost(Matmul* mm) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = mm->outputs[0].numDim; int m = mm->inputs[0].dim[numDim-2]; int n = mm->inputs[1].dim[numDim-1]; int k = mm->inputs[0].dim[numDim-1]; hipblasOperation_t transA, transB; int lda, ldb, ldc; if (mm->inputs[0].stride[numDim-2] == 1) { transA = HIPBLAS_OP_N; lda = mm->inputs[0].stride[numDim-1]; } else { assert(mm->inputs[0].stride[numDim-1] == 1); transA = HIPBLAS_OP_T; lda = mm->inputs[0].stride[numDim-2]; } if (mm->inputs[1].stride[numDim-2] == 1) { transB = HIPBLAS_OP_N; ldb = mm->inputs[1].stride[numDim-1]; } else { assert(mm->inputs[1].stride[numDim-1] == 1); transB = HIPBLAS_OP_T; ldb = mm->inputs[1].stride[numDim-2]; } ldc = mm->outputs[0].stride[numDim-1]; if (mm->activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (mm->activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } helperSetTensorDescriptor(mm->outputs[0], outputTensor); checkCUDA(hipDeviceSynchronize()); for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) { if (i == WARMUP_TIMES) checkCUDA(hipEventRecord(startEvent)); if (numDim == 2) { // Normal 2D Matmul checkCUDA(hipblasSgemm(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, filterPtr, ldb, &beta, outputPtr, ldc)); } else { // Batched Matmul int strideA = mm->inputs[0].stride[numDim-3]; int strideB = mm->inputs[1].stride[numDim-3]; int strideC = mm->outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= mm->outputs[0].dim[i]; checkCUDA(hipblasSgemmStridedBatched(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, strideA, filterPtr, ldb, strideB, &beta, outputPtr, ldc, strideC, batch)); } if (mm->activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(dnn, actiDesc, &alpha, outputTensor, outputPtr, &beta, outputTensor, outputPtr)); } checkCUDA(hipEventRecord(endEvent)); checkCUDA(hipEventSynchronize(endEvent)); float milliseconds; hipEventElapsedTime(&milliseconds, startEvent, endEvent); mm->runtime = milliseconds / REPEAT_TIMES; if (print_cost) printf(" measure[Matmul]: %s %s acti(%d) cost(%.4lf)\n", mm->inputs[0].to_string("input").c_str(), mm->inputs[1].to_string("weight").c_str(), mm->activation, mm->runtime); }
2b503c798bf207c3f642a09bf04a93a738b6c63a.cu
/* Copyright 2019 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "xflow/ops.h" #include "xflow/cuda_helper.h" using namespace XFlow; void Matmul::map(void) { // create descriptors checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); helperSetTensorDescriptor(outputs[0], outputTensor); if (activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } // allocate tensors size_t outputSize = sizeof(DATATYPE) * outputs[0].volume(); checkCUDA(cudaMalloc(&outputs[0].data_ptr, outputSize)); } void Matmul::unmap(void) { checkCUDNN(cudnnDestroyTensorDescriptor(outputTensor)); if (activation != AC_MODE_NONE) { checkCUDNN(cudnnDestroyActivationDescriptor(actiDesc)); } checkCUDA(cudaFree(outputs[0].data_ptr)); } void Matmul::forward(bool block) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = outputs[0].numDim; int m = inputs[0].dim[numDim-2]; int n = inputs[1].dim[numDim-1]; int k = inputs[0].dim[numDim-1]; cublasOperation_t transA, transB; int lda, ldb, ldc; if (inputs[0].stride[numDim-2] == 1) { transA = CUBLAS_OP_N; lda = inputs[0].stride[numDim-1]; } else { assert(inputs[0].stride[numDim-1] == 1); transA = CUBLAS_OP_T; lda = inputs[0].stride[numDim-2]; } if (inputs[1].stride[numDim-2] == 1) { transB = CUBLAS_OP_N; ldb = inputs[1].stride[numDim-1]; } else { assert(inputs[1].stride[numDim-1] == 1); transB = CUBLAS_OP_T; ldb = inputs[1].stride[numDim-2]; } ldc = outputs[0].stride[numDim-1]; if (numDim == 2) { // Normal 2D Matmul checkCUDA(cublasSgemm(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, (float*)inputs[1].data_ptr, ldb, &beta, (float*)outputs[0].data_ptr, ldc)); } else { // Batched Matmul int strideA = inputs[0].stride[numDim-3]; int strideB = inputs[1].stride[numDim-3]; int strideC = outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= outputs[0].dim[i]; checkCUDA(cublasSgemmStridedBatched(model->blas, transA, transB, m, n, k, &alpha, (float*)inputs[0].data_ptr, lda, strideA, (float*)inputs[1].data_ptr, ldb, strideB, &beta, (float*)outputs[0].data_ptr, ldc, strideC, batch)); } if (activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(model->dnn, actiDesc, &alpha, outputTensor, outputs[0].data_ptr, &beta, outputTensor, outputs[0].data_ptr)); if (block) checkCUDA(cudaDeviceSynchronize()); } void Model::measure_matmul_cost(Matmul* mm) { const float alpha = 1.0f; const float beta = 0.0f; int numDim = mm->outputs[0].numDim; int m = mm->inputs[0].dim[numDim-2]; int n = mm->inputs[1].dim[numDim-1]; int k = mm->inputs[0].dim[numDim-1]; cublasOperation_t transA, transB; int lda, ldb, ldc; if (mm->inputs[0].stride[numDim-2] == 1) { transA = CUBLAS_OP_N; lda = mm->inputs[0].stride[numDim-1]; } else { assert(mm->inputs[0].stride[numDim-1] == 1); transA = CUBLAS_OP_T; lda = mm->inputs[0].stride[numDim-2]; } if (mm->inputs[1].stride[numDim-2] == 1) { transB = CUBLAS_OP_N; ldb = mm->inputs[1].stride[numDim-1]; } else { assert(mm->inputs[1].stride[numDim-1] == 1); transB = CUBLAS_OP_T; ldb = mm->inputs[1].stride[numDim-2]; } ldc = mm->outputs[0].stride[numDim-1]; if (mm->activation != AC_MODE_NONE) { cudnnActivationMode_t mode; switch (mm->activation) { case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_TANH: mode = CUDNN_ACTIVATION_TANH; break; default: assert(false); } checkCUDNN(cudnnSetActivationDescriptor(actiDesc, mode, CUDNN_NOT_PROPAGATE_NAN, 0.0)); } helperSetTensorDescriptor(mm->outputs[0], outputTensor); checkCUDA(cudaDeviceSynchronize()); for (int i = 0; i < WARMUP_TIMES + REPEAT_TIMES; i++) { if (i == WARMUP_TIMES) checkCUDA(cudaEventRecord(startEvent)); if (numDim == 2) { // Normal 2D Matmul checkCUDA(cublasSgemm(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, filterPtr, ldb, &beta, outputPtr, ldc)); } else { // Batched Matmul int strideA = mm->inputs[0].stride[numDim-3]; int strideB = mm->inputs[1].stride[numDim-3]; int strideC = mm->outputs[0].stride[numDim-3]; int batch = 1; for (int i = 0; i < numDim-2; i++) batch *= mm->outputs[0].dim[i]; checkCUDA(cublasSgemmStridedBatched(blas, transA, transB, m, n, k, &alpha, inputPtr, lda, strideA, filterPtr, ldb, strideB, &beta, outputPtr, ldc, strideC, batch)); } if (mm->activation != AC_MODE_NONE) checkCUDNN(cudnnActivationForward(dnn, actiDesc, &alpha, outputTensor, outputPtr, &beta, outputTensor, outputPtr)); } checkCUDA(cudaEventRecord(endEvent)); checkCUDA(cudaEventSynchronize(endEvent)); float milliseconds; cudaEventElapsedTime(&milliseconds, startEvent, endEvent); mm->runtime = milliseconds / REPEAT_TIMES; if (print_cost) printf(" measure[Matmul]: %s %s acti(%d) cost(%.4lf)\n", mm->inputs[0].to_string("input").c_str(), mm->inputs[1].to_string("weight").c_str(), mm->activation, mm->runtime); }
9c6cf1a18d60c2ab430f9a98f1ce7833927b4847.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Software License Agreement(BSD License) * * Point Cloud Library(PCL) - www.pointclouds.org * Copyright(c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <icp_odometry/cuda_utils/internal.h> #include <icp_odometry/cuda_utils/containers/safe_call.hpp> __global__ void pyrDownGaussKernel(const PtrStepSz<unsigned short> src, PtrStepSz<unsigned short> dst, float sigma_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= dst.cols || y >= dst.rows) return; const int D = 5; int center = src.ptr(2 * y)[2 * x]; int x_mi = max(0, 2*x - D/2) - 2*x; int y_mi = max(0, 2*y - D/2) - 2*y; int x_ma = min(src.cols, 2*x -D/2+D) - 2*x; int y_ma = min(src.rows, 2*y -D/2+D) - 2*y; float sum = 0; float wall = 0; float weights[] = {0.375f, 0.25f, 0.0625f} ; for(int yi = y_mi; yi < y_ma; ++yi) for(int xi = x_mi; xi < x_ma; ++xi) { int val = src.ptr(2*y + yi)[2*x + xi]; if(abs(val - center) < 3 * sigma_color) { sum += val * weights[abs(xi)] * weights[abs(yi)]; wall += weights[abs(xi)] * weights[abs(yi)]; } } dst.ptr(y)[x] = static_cast<int>(sum /wall); } void pyrDown(const DeviceArray2D<unsigned short> & src, DeviceArray2D<unsigned short> & dst) { dst.create(src.rows() / 2, src.cols() / 2); dim3 block(32, 8); dim3 grid(divUp(dst.cols(), block.x), divUp(dst.rows(), block.y)); const float sigma_color = 30; hipLaunchKernelGGL(( pyrDownGaussKernel), dim3(grid), dim3(block), 0, 0, src, dst, sigma_color); cudaSafeCall( hipGetLastError() ); }; __global__ void computeVmapKernel(const PtrStepSz<unsigned short> depth, PtrStep<float> vmap, float fx_inv, float fy_inv, float cx, float cy, float depthCutoff) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u < depth.cols && v < depth.rows) { float z = depth.ptr(v)[u] / 1000.f; // load and convert: mm -> meters if(z != 0 && z < depthCutoff) { float vx = z *(u - cx) * fx_inv; float vy = z *(v - cy) * fy_inv; float vz = z; vmap.ptr(v )[u] = vx; vmap.ptr(v + depth.rows)[u] = vy; vmap.ptr(v + depth.rows * 2)[u] = vz; } else { vmap.ptr(v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } } } void createVMap(const Intr& intr, const DeviceArray2D<unsigned short> & depth, DeviceArray2D<float> & vmap, const float depthCutoff) { vmap.create(depth.rows() * 3, depth.cols()); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(depth.cols(), block.x); grid.y = divUp(depth.rows(), block.y); float fx = intr.fx, cx = intr.cx; float fy = intr.fy, cy = intr.cy; hipLaunchKernelGGL(( computeVmapKernel), dim3(grid), dim3(block), 0, 0, depth, vmap, 1.f / fx, 1.f / fy, cx, cy, depthCutoff); cudaSafeCall(hipGetLastError()); } __global__ void computeNmapKernel(int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u >= cols || v >= rows) return; if(u == cols - 1 || v == rows - 1) { nmap.ptr(v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ return; } Eigen::Matrix<float,3,1,Eigen::DontAlign> v00, v01, v10; v00(0) = vmap.ptr(v)[u]; v01(0) = vmap.ptr(v)[u + 1]; v10(0) = vmap.ptr(v + 1)[u]; if(!isnan(v00(0)) && !isnan(v01(0)) && !isnan(v10(0))) { v00(1) = vmap.ptr(v + rows)[u]; v01(1) = vmap.ptr(v + rows)[u + 1]; v10(1) = vmap.ptr(v + 1 + rows)[u]; v00(2) = vmap.ptr(v + 2 * rows)[u]; v01(2) = vmap.ptr(v + 2 * rows)[u + 1]; v10(2) = vmap.ptr(v + 1 + 2 * rows)[u]; Eigen::Matrix<float,3,1,Eigen::DontAlign> r = (v01 - v00).cross(v10 - v00).normalized(); nmap.ptr(v)[u] = r(0); nmap.ptr(v + rows)[u] = r(1); nmap.ptr(v + 2 * rows)[u] = r(2); } else nmap.ptr(v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } void createNMap(const DeviceArray2D<float>& vmap, DeviceArray2D<float>& nmap) { nmap.create(vmap.rows(), vmap.cols()); int rows = vmap.rows() / 3; int cols = vmap.cols(); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(cols, block.x); grid.y = divUp(rows, block.y); hipLaunchKernelGGL(( computeNmapKernel), dim3(grid), dim3(block), 0, 0, rows, cols, vmap, nmap); cudaSafeCall(hipGetLastError()); }
9c6cf1a18d60c2ab430f9a98f1ce7833927b4847.cu
/* * Software License Agreement(BSD License) * * Point Cloud Library(PCL) - www.pointclouds.org * Copyright(c) 2011, Willow Garage, Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Willow Garage, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <icp_odometry/cuda_utils/internal.h> #include <icp_odometry/cuda_utils/containers/safe_call.hpp> __global__ void pyrDownGaussKernel(const PtrStepSz<unsigned short> src, PtrStepSz<unsigned short> dst, float sigma_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x >= dst.cols || y >= dst.rows) return; const int D = 5; int center = src.ptr(2 * y)[2 * x]; int x_mi = max(0, 2*x - D/2) - 2*x; int y_mi = max(0, 2*y - D/2) - 2*y; int x_ma = min(src.cols, 2*x -D/2+D) - 2*x; int y_ma = min(src.rows, 2*y -D/2+D) - 2*y; float sum = 0; float wall = 0; float weights[] = {0.375f, 0.25f, 0.0625f} ; for(int yi = y_mi; yi < y_ma; ++yi) for(int xi = x_mi; xi < x_ma; ++xi) { int val = src.ptr(2*y + yi)[2*x + xi]; if(abs(val - center) < 3 * sigma_color) { sum += val * weights[abs(xi)] * weights[abs(yi)]; wall += weights[abs(xi)] * weights[abs(yi)]; } } dst.ptr(y)[x] = static_cast<int>(sum /wall); } void pyrDown(const DeviceArray2D<unsigned short> & src, DeviceArray2D<unsigned short> & dst) { dst.create(src.rows() / 2, src.cols() / 2); dim3 block(32, 8); dim3 grid(divUp(dst.cols(), block.x), divUp(dst.rows(), block.y)); const float sigma_color = 30; pyrDownGaussKernel<<<grid, block>>>(src, dst, sigma_color); cudaSafeCall( cudaGetLastError() ); }; __global__ void computeVmapKernel(const PtrStepSz<unsigned short> depth, PtrStep<float> vmap, float fx_inv, float fy_inv, float cx, float cy, float depthCutoff) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u < depth.cols && v < depth.rows) { float z = depth.ptr(v)[u] / 1000.f; // load and convert: mm -> meters if(z != 0 && z < depthCutoff) { float vx = z *(u - cx) * fx_inv; float vy = z *(v - cy) * fy_inv; float vz = z; vmap.ptr(v )[u] = vx; vmap.ptr(v + depth.rows)[u] = vy; vmap.ptr(v + depth.rows * 2)[u] = vz; } else { vmap.ptr(v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } } } void createVMap(const Intr& intr, const DeviceArray2D<unsigned short> & depth, DeviceArray2D<float> & vmap, const float depthCutoff) { vmap.create(depth.rows() * 3, depth.cols()); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(depth.cols(), block.x); grid.y = divUp(depth.rows(), block.y); float fx = intr.fx, cx = intr.cx; float fy = intr.fy, cy = intr.cy; computeVmapKernel<<<grid, block>>>(depth, vmap, 1.f / fx, 1.f / fy, cx, cy, depthCutoff); cudaSafeCall(cudaGetLastError()); } __global__ void computeNmapKernel(int rows, int cols, const PtrStep<float> vmap, PtrStep<float> nmap) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u >= cols || v >= rows) return; if(u == cols - 1 || v == rows - 1) { nmap.ptr(v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ return; } Eigen::Matrix<float,3,1,Eigen::DontAlign> v00, v01, v10; v00(0) = vmap.ptr(v)[u]; v01(0) = vmap.ptr(v)[u + 1]; v10(0) = vmap.ptr(v + 1)[u]; if(!isnan(v00(0)) && !isnan(v01(0)) && !isnan(v10(0))) { v00(1) = vmap.ptr(v + rows)[u]; v01(1) = vmap.ptr(v + rows)[u + 1]; v10(1) = vmap.ptr(v + 1 + rows)[u]; v00(2) = vmap.ptr(v + 2 * rows)[u]; v01(2) = vmap.ptr(v + 2 * rows)[u + 1]; v10(2) = vmap.ptr(v + 1 + 2 * rows)[u]; Eigen::Matrix<float,3,1,Eigen::DontAlign> r = (v01 - v00).cross(v10 - v00).normalized(); nmap.ptr(v)[u] = r(0); nmap.ptr(v + rows)[u] = r(1); nmap.ptr(v + 2 * rows)[u] = r(2); } else nmap.ptr(v)[u] = __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ } void createNMap(const DeviceArray2D<float>& vmap, DeviceArray2D<float>& nmap) { nmap.create(vmap.rows(), vmap.cols()); int rows = vmap.rows() / 3; int cols = vmap.cols(); dim3 block(32, 8); dim3 grid(1, 1, 1); grid.x = divUp(cols, block.x); grid.y = divUp(rows, block.y); computeNmapKernel<<<grid, block>>>(rows, cols, vmap, nmap); cudaSafeCall(cudaGetLastError()); }
0571b1f517fd932f6ca0894c6586767bd21b6d60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "..\cumath\cumath.cuh" #define LOOP_UNROLL // Bilinear Interpolation __device__ float bilinear(float q11, float q12, float q21, float q22, float scale) { return (1.0f - scale)*(1.0f - scale)*q11 + (1.0f - scale)*scale*q12 + scale*(1.0f - scale)*q21 + scale*scale*q22; } //__global__ void __launch_bounds__(MAX_BLOCK_SIZE, MIN_BLOCKS_PER_SM) resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) __global__ void resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (uint i = row; i < height; i += blockDim.y*gridDim.y) //#pragma unroll for (uint j = col; j < width; j += blockDim.x*gridDim.x) { #ifdef LOOP_UNROLL if (threadIdx.y + 1 < blockDim.y) { int r = i*scale, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; // Bilinear Interpolation float p = bilinear(*q11, *q12, *q21, *q22, scale); uchar *outputPixel = (uchar*)((char*)d_output + i*out_pitch) + j; *outputPixel = (uchar)p; r = r + 1; q11 = (uchar*)((char*)d_input + r*in_pitch) + c; q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; p = bilinear(*q11, *q12, *q21, *q22, scale); outputPixel = (uchar*)((char*)d_output + (i+1)*out_pitch) + j; *outputPixel = (uchar)p; } #else #pragma unroll for (uint k = 0; k < 2; k++) { if (threadIdx.y + 1 < blockDim.y) { int r = i*scale + k, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; uchar *outputPixel = (uchar*)((char*)d_output + (i + k)*out_pitch) + j; float p = bilinear(*q11, *q12, *q21, *q22, scale); *outputPixel = (uchar)p; } } #endif // } } extern "C" void cudaResize(const cv::Mat & input, cv::Mat & output, float scale) { int newRow = int(input.rows * scale); int newCol = int(input.cols * scale); output = cv::Mat(cv::Size(newCol, newRow), CV_8U, cv::Scalar(0)); scale = 1.0f / scale; // define block size and thread size dim3 threadSize(MAX_THREADS, 4); dim3 blockSize(output.cols / (4 * threadSize.x), output.rows / (4 * threadSize.y)); // I divide the image into 16 grid to increase ILP level. hipStream_t stream; hipStreamCreate(&stream); size_t in_pitch, out_pitch; uchar *d_input, *d_output; hipMallocPitch(&d_input, &in_pitch, sizeof(uchar)*input.cols, input.rows); hipMemcpy2DAsync(d_input, in_pitch, input.data, sizeof(uchar)*input.cols, sizeof(uchar)*input.cols, input.rows, hipMemcpyHostToDevice, stream); hipMallocPitch(&d_output, &out_pitch, sizeof(uchar)*output.cols, output.rows); hipLaunchKernelGGL(( resize), dim3(blockSize), dim3(threadSize), 0, stream, d_input, in_pitch, output.rows, output.cols, d_output, out_pitch, scale); hipDeviceSynchronize(); hipMemcpy2D(output.data, sizeof(uchar)*output.cols, d_output, out_pitch, sizeof(uchar)*output.cols, output.rows, hipMemcpyDeviceToHost); // resource releasing hipStreamDestroy(stream); hipFree(d_input); hipFree(d_output); }
0571b1f517fd932f6ca0894c6586767bd21b6d60.cu
#include "..\cumath\cumath.cuh" #define LOOP_UNROLL // Bilinear Interpolation __device__ float bilinear(float q11, float q12, float q21, float q22, float scale) { return (1.0f - scale)*(1.0f - scale)*q11 + (1.0f - scale)*scale*q12 + scale*(1.0f - scale)*q21 + scale*scale*q22; } //__global__ void __launch_bounds__(MAX_BLOCK_SIZE, MIN_BLOCKS_PER_SM) resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) __global__ void resize(uchar* d_input, size_t in_pitch, int height, int width, uchar* d_output, size_t out_pitch, float scale) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (uint i = row; i < height; i += blockDim.y*gridDim.y) //#pragma unroll for (uint j = col; j < width; j += blockDim.x*gridDim.x) { #ifdef LOOP_UNROLL if (threadIdx.y + 1 < blockDim.y) { int r = i*scale, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; // Bilinear Interpolation float p = bilinear(*q11, *q12, *q21, *q22, scale); uchar *outputPixel = (uchar*)((char*)d_output + i*out_pitch) + j; *outputPixel = (uchar)p; r = r + 1; q11 = (uchar*)((char*)d_input + r*in_pitch) + c; q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; p = bilinear(*q11, *q12, *q21, *q22, scale); outputPixel = (uchar*)((char*)d_output + (i+1)*out_pitch) + j; *outputPixel = (uchar)p; } #else #pragma unroll for (uint k = 0; k < 2; k++) { if (threadIdx.y + 1 < blockDim.y) { int r = i*scale + k, c = j*scale; uchar *q11 = (uchar*)((char*)d_input + r*in_pitch) + c; uchar *q12 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c; uchar *q21 = (uchar*)((char*)d_input + r*in_pitch) + c + 1; uchar *q22 = (uchar*)((char*)d_input + (r + 1)*in_pitch) + c + 1; uchar *outputPixel = (uchar*)((char*)d_output + (i + k)*out_pitch) + j; float p = bilinear(*q11, *q12, *q21, *q22, scale); *outputPixel = (uchar)p; } } #endif // } } extern "C" void cudaResize(const cv::Mat & input, cv::Mat & output, float scale) { int newRow = int(input.rows * scale); int newCol = int(input.cols * scale); output = cv::Mat(cv::Size(newCol, newRow), CV_8U, cv::Scalar(0)); scale = 1.0f / scale; // define block size and thread size dim3 threadSize(MAX_THREADS, 4); dim3 blockSize(output.cols / (4 * threadSize.x), output.rows / (4 * threadSize.y)); // I divide the image into 16 grid to increase ILP level. cudaStream_t stream; cudaStreamCreate(&stream); size_t in_pitch, out_pitch; uchar *d_input, *d_output; cudaMallocPitch(&d_input, &in_pitch, sizeof(uchar)*input.cols, input.rows); cudaMemcpy2DAsync(d_input, in_pitch, input.data, sizeof(uchar)*input.cols, sizeof(uchar)*input.cols, input.rows, cudaMemcpyHostToDevice, stream); cudaMallocPitch(&d_output, &out_pitch, sizeof(uchar)*output.cols, output.rows); resize<<<blockSize, threadSize, 0, stream>>>(d_input, in_pitch, output.rows, output.cols, d_output, out_pitch, scale); cudaDeviceSynchronize(); cudaMemcpy2D(output.data, sizeof(uchar)*output.cols, d_output, out_pitch, sizeof(uchar)*output.cols, output.rows, cudaMemcpyDeviceToHost); // resource releasing cudaStreamDestroy(stream); cudaFree(d_input); cudaFree(d_output); }
703da02dd01e4dd6b483929031757b8770c559c1.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <ctime> #include <iostream> #include <iterator> #include <gtest/gtest.h> #include <common/host_buffer.hpp> namespace MLCommon { TEST(HostBufferTest, ctor) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); hipStream_t stream = 0; const int size = 4; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(size, buffer.size()); } TEST(HostBufferTest, clear) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); hipStream_t stream = 0; const int size = 8; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(size, buffer.size()); buffer.clear(); ASSERT_EQ(0, buffer.size()); } TEST(HostBufferTest, itiface) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); hipStream_t stream = 0; const int size = 8; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(std::distance(buffer.begin(), buffer.end()), buffer.size()); } TEST(HostBufferTest, reserve) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); hipStream_t stream = 0; constexpr int size = 8; constexpr int capacity = 16; static_assert(capacity > size, "capacity must be larger than size for test to work"); host_buffer<int> buffer(allocator, stream, 0); buffer.reserve(capacity, stream); ASSERT_NE(nullptr, buffer.data()); const int* const data_ptr = buffer.data(); buffer.resize(size, stream); ASSERT_EQ(data_ptr, buffer.data()); } TEST(HostBufferTest, resize) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); hipStream_t stream = 0; std::srand(std::time(nullptr)); const int random_variable = std::rand(); const int size = 1; host_buffer<int> buffer(allocator, stream, size); buffer[0] = random_variable; const int* const data_ptr = buffer.data(); buffer.resize(4, stream); ASSERT_EQ(random_variable, buffer[0]); ASSERT_NE(data_ptr, buffer.data()); } TEST(HostBufferTest, release) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); hipStream_t stream = 0; const int size = 8; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(size, buffer.size()); ASSERT_NE(nullptr, buffer.data()); buffer.release(stream); ASSERT_EQ(0, buffer.size()); ASSERT_EQ(nullptr, buffer.data()); } } // end namespace MLCommon
703da02dd01e4dd6b483929031757b8770c559c1.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdlib> #include <ctime> #include <iostream> #include <iterator> #include <gtest/gtest.h> #include <common/host_buffer.hpp> namespace MLCommon { TEST(HostBufferTest, ctor) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); cudaStream_t stream = 0; const int size = 4; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(size, buffer.size()); } TEST(HostBufferTest, clear) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); cudaStream_t stream = 0; const int size = 8; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(size, buffer.size()); buffer.clear(); ASSERT_EQ(0, buffer.size()); } TEST(HostBufferTest, itiface) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); cudaStream_t stream = 0; const int size = 8; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(std::distance(buffer.begin(), buffer.end()), buffer.size()); } TEST(HostBufferTest, reserve) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); cudaStream_t stream = 0; constexpr int size = 8; constexpr int capacity = 16; static_assert(capacity > size, "capacity must be larger than size for test to work"); host_buffer<int> buffer(allocator, stream, 0); buffer.reserve(capacity, stream); ASSERT_NE(nullptr, buffer.data()); const int* const data_ptr = buffer.data(); buffer.resize(size, stream); ASSERT_EQ(data_ptr, buffer.data()); } TEST(HostBufferTest, resize) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); cudaStream_t stream = 0; std::srand(std::time(nullptr)); const int random_variable = std::rand(); const int size = 1; host_buffer<int> buffer(allocator, stream, size); buffer[0] = random_variable; const int* const data_ptr = buffer.data(); buffer.resize(4, stream); ASSERT_EQ(random_variable, buffer[0]); ASSERT_NE(data_ptr, buffer.data()); } TEST(HostBufferTest, release) { std::shared_ptr<hostAllocator> allocator(new defaultHostAllocator); cudaStream_t stream = 0; const int size = 8; host_buffer<int> buffer(allocator, stream, size); ASSERT_EQ(size, buffer.size()); ASSERT_NE(nullptr, buffer.data()); buffer.release(stream); ASSERT_EQ(0, buffer.size()); ASSERT_EQ(nullptr, buffer.data()); } } // end namespace MLCommon
c1532849fb15b411c9e602ba6500e737780152cf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include "optixCutouts.h" #include <cuda/random.h> #include <sutil/vec_math.h> #include <cuda/helpers.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; }; struct Onb { __forceinline__ __device__ Onb( const float3& normal ) { m_normal = normal; if( fabs( m_normal.x ) > fabs( m_normal.z ) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize( m_binormal ); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform( float3& p ) const { p = p.x * m_tangent + p.y * m_binormal + p.z * m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere( const float u1, const float u2, float3& p ) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f * M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x * p.x - p.y * p.y ) ); } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const uint3 idx = optixGetLaunchIndex(); const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) // TODO RR, variable for depth break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); const uint3 launch_index = optixGetLaunchIndex(); const unsigned int image_index = launch_index.y * params.width + launch_index.x; float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f); params.frame_buffer[ image_index ] = make_color ( accum_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b ); prd->done = true; } extern "C" __global__ void __anyhit__ah() { const unsigned int hit_kind = optixGetHitKind(); HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); // The texture coordinates are defined per-vertex for built-in triangles, // and are derived from the surface normal for our custom sphere geometry. float3 texcoord; if( optixIsTriangleHit() ) { const int vert_idx_offset = prim_idx*3; const float2 barycentrics = optixGetTriangleBarycentrics(); const float2 t0 = rt_data->tex_coords[ vert_idx_offset+0 ]; const float2 t1 = rt_data->tex_coords[ vert_idx_offset+1 ]; const float2 t2 = rt_data->tex_coords[ vert_idx_offset+2 ]; texcoord = make_float3( t0 * (1.0f - barycentrics.x - barycentrics.y) + t1 * barycentrics.x + t2 * barycentrics.y ); } else { const float3 normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() ) ); // TODO: Pass UV scale in SBT? const float uv_scale = 16.0f; const float u = uv_scale * ( 0.5f + atan2f( normal.z, normal.x ) * 0.5f * M_1_PIf ); const float v = uv_scale * ( 0.5f - asinf( normal.y ) * M_1_PIf ); texcoord = make_float3( u, v, 0.0f ); } int which_check = (static_cast<int>(texcoord.x) + static_cast<int>(texcoord.y)) & 1; if( which_check == 0 ) { optixIgnoreIntersection(); } } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); RadiancePRD* prd = getPRD(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const unsigned int hit_kind = optixGetHitKind(); float3 N; if( optixIsTriangleHit() ) { const float3 v0 = make_float3( rt_data->vertices[vert_idx_offset + 0] ); const float3 v1 = make_float3( rt_data->vertices[vert_idx_offset + 1] ); const float3 v2 = make_float3( rt_data->vertices[vert_idx_offset + 2] ); const float3 N_0 = normalize( cross( v1 - v0, v2 - v0 ) ); N = faceforward( N_0, -ray_dir, N_0 ); } else { N = make_float3(int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); } prd->emitted = ( prd->countEmitted ) ? rt_data->emission_color : make_float3( 0.0f ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax() * ray_dir; unsigned int seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->attenuation *= rt_data->diffuse_color; prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
c1532849fb15b411c9e602ba6500e737780152cf.cu
// // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include "optixCutouts.h" #include <cuda/random.h> #include <sutil/vec_math.h> #include <cuda/helpers.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ struct RadiancePRD { // TODO: move some state directly into payload registers? float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; }; struct Onb { __forceinline__ __device__ Onb( const float3& normal ) { m_normal = normal; if( fabs( m_normal.x ) > fabs( m_normal.z ) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize( m_binormal ); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform( float3& p ) const { p = p.x * m_tangent + p.y * m_binormal + p.z * m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } static __forceinline__ __device__ void cosine_sample_hemisphere( const float u1, const float u2, float3& p ) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f * M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x * p.x - p.y * p.y ) ); } static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { // TODO: deduce stride from num ray-types passed in params unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 1 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } //------------------------------------------------------------------------------ // // // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const uint3 idx = optixGetLaunchIndex(); const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( idx.y*w + idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin // TODO: smarter offset 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) // TODO RR, variable for depth break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); const uint3 launch_index = optixGetLaunchIndex(); const unsigned int image_index = launch_index.y * params.width + launch_index.x; float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.accum_buffer[ image_index ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.accum_buffer[ image_index ] = make_float4( accum_color, 1.0f); params.frame_buffer[ image_index ] = make_color ( accum_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b ); prd->done = true; } extern "C" __global__ void __anyhit__ah() { const unsigned int hit_kind = optixGetHitKind(); HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); const int prim_idx = optixGetPrimitiveIndex(); // The texture coordinates are defined per-vertex for built-in triangles, // and are derived from the surface normal for our custom sphere geometry. float3 texcoord; if( optixIsTriangleHit() ) { const int vert_idx_offset = prim_idx*3; const float2 barycentrics = optixGetTriangleBarycentrics(); const float2 t0 = rt_data->tex_coords[ vert_idx_offset+0 ]; const float2 t1 = rt_data->tex_coords[ vert_idx_offset+1 ]; const float2 t2 = rt_data->tex_coords[ vert_idx_offset+2 ]; texcoord = make_float3( t0 * (1.0f - barycentrics.x - barycentrics.y) + t1 * barycentrics.x + t2 * barycentrics.y ); } else { const float3 normal = make_float3( int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() ) ); // TODO: Pass UV scale in SBT? const float uv_scale = 16.0f; const float u = uv_scale * ( 0.5f + atan2f( normal.z, normal.x ) * 0.5f * M_1_PIf ); const float v = uv_scale * ( 0.5f - asinf( normal.y ) * M_1_PIf ); texcoord = make_float3( u, v, 0.0f ); } int which_check = (static_cast<int>(texcoord.x) + static_cast<int>(texcoord.y)) & 1; if( which_check == 0 ) { optixIgnoreIntersection(); } } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); RadiancePRD* prd = getPRD(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; const unsigned int hit_kind = optixGetHitKind(); float3 N; if( optixIsTriangleHit() ) { const float3 v0 = make_float3( rt_data->vertices[vert_idx_offset + 0] ); const float3 v1 = make_float3( rt_data->vertices[vert_idx_offset + 1] ); const float3 v2 = make_float3( rt_data->vertices[vert_idx_offset + 2] ); const float3 N_0 = normalize( cross( v1 - v0, v2 - v0 ) ); N = faceforward( N_0, -ray_dir, N_0 ); } else { N = make_float3(int_as_float( optixGetAttribute_0() ), int_as_float( optixGetAttribute_1() ), int_as_float( optixGetAttribute_2() )); } prd->emitted = ( prd->countEmitted ) ? rt_data->emission_color : make_float3( 0.0f ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax() * ray_dir; unsigned int seed = prd->seed; { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->attenuation *= rt_data->diffuse_color; prd->countEmitted = false; } const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
a29771e7b592c11614be0b43ffd721eeb34c98c4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "device_launch_parameters.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernelUpSweepStep(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k > n) return; int prev_step_size = 1 << d; int cur_step_size = 2 * prev_step_size; if (k % cur_step_size == 0) cdata[k + cur_step_size - 1] += cdata[k + prev_step_size - 1]; } __global__ void kernelUpSweepStepEfficient(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) return; int prev_step_size = 1 << d; int cur_step_size = 2 * prev_step_size; int new_offset = k * cur_step_size; cdata[new_offset + cur_step_size - 1] += cdata[new_offset + prev_step_size - 1]; } __global__ void kernelDownSweepStep(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k > n) return; int left_step = 1 << d; int cur_step = 2 * left_step; if (k % cur_step == 0) { int temp = cdata[k + left_step - 1]; cdata[k + left_step - 1] = cdata[k + cur_step - 1]; cdata[k + cur_step - 1] += temp; } } __global__ void kernelDownSweepStepEfficient(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) return; int prev_step_size = 1 << d; int cur_step_size = 2 * prev_step_size; int new_offset = k * cur_step_size; int temp = cdata[new_offset + prev_step_size - 1]; cdata[new_offset + prev_step_size - 1] = cdata[new_offset + cur_step_size - 1]; cdata[new_offset + cur_step_size - 1] += temp; } void printArray(int n, int *a, bool abridged = false) { printf(" [ "); for (int i = 0; i < n; i++) { if (abridged && i + 2 == 15 && n > 16) { i = n - 2; printf("... "); } printf("%3d ", a[i]); } printf("]\n"); } void printCudaArray(int size, int* data) { int *d_data = new int[size]; hipMemcpy(d_data, data, size * sizeof(int), hipMemcpyDeviceToHost); printArray(size, d_data, true); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanEfficient(int n, int *odata, const int *idata, int blockSize) { // Memory Allocation and Copying int power_size = pow(2, ilog2ceil(n)); int *cdata; hipMalloc((void**)&cdata, power_size * sizeof(int)); checkCUDAErrorFn("hipMalloc adata failed!"); hipMemset(cdata, 0, power_size * sizeof(int)); hipMemcpy(cdata, idata, n * sizeof(int), hipMemcpyHostToDevice); bool started_timer = true; try { timer().startGpuTimer(); } catch (const std::exception& e) { started_timer = false; } int numThreads; //Up Sweep for (int d = 0; d <= ilog2ceil(power_size) - 1 ; d++) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernelUpSweepStepEfficient) , dim3(fullBlocks), dim3(blockSize), 0, 0, numThreads, d, cdata); } //Down Sweep hipMemset(cdata + power_size - 1, 0, sizeof(int)); for (int d = ilog2(power_size) - 1; d >= 0; d--) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); hipLaunchKernelGGL(( kernelDownSweepStepEfficient) , dim3(fullBlocks), dim3(blockSize), 0, 0, numThreads, d, cdata); } if (started_timer) timer().endGpuTimer(); // Copy Back and Free Memory hipMemcpy(odata, cdata, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(cdata); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanEfficientCUDA(int n, int *odata, const int *idata, int blockSize) { // Memory Allocation and Copying int power_size = pow(2, ilog2ceil(n)); int *cdata; hipMalloc((void**)&cdata, power_size * sizeof(int)); checkCUDAErrorFn("hipMalloc adata failed!"); hipMemset(cdata, 0, power_size * sizeof(int)); hipMemcpy(cdata, idata, n * sizeof(int), hipMemcpyDeviceToDevice); bool started_timer = true; try { timer().startGpuTimer(); } catch (const std::exception& e) { started_timer = false; } int numThreads; //Up Sweep for (int d = 0; d <= ilog2ceil(power_size) - 1; d++) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); kernelUpSweepStepEfficient << <fullBlocks, blockSize >> > (numThreads, d, cdata); } //Down Sweep hipMemset(cdata + power_size - 1, 0, sizeof(int)); for (int d = ilog2(power_size) - 1; d >= 0; d--) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); kernelDownSweepStepEfficient << <fullBlocks, blockSize >> > (numThreads, d, cdata); } if (started_timer) timer().endGpuTimer(); // Copy Back and Free Memory hipMemcpy(odata, cdata, sizeof(int) * n, hipMemcpyDeviceToDevice); hipFree(cdata); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata, int blockSize) { // Memory Allocation and Copying int power_size = pow(2, ilog2ceil(n)); int *cdata; hipMalloc((void**)&cdata, power_size * sizeof(int)); checkCUDAErrorFn("hipMalloc adata failed!"); hipMemset(cdata, 0, power_size * sizeof(int)); hipMemcpy(cdata, idata, n * sizeof(int), hipMemcpyHostToDevice); bool started_timer = true; try { timer().startGpuTimer(); } catch (const std::exception& e) { started_timer = false; } dim3 fullBlocksPerGrid((power_size + blockSize - 1) / blockSize); //Up Sweep for (int d = 0; d < ilog2ceil(power_size); d++) { kernelUpSweepStep << <fullBlocksPerGrid, blockSize >> > (power_size, d, cdata); } //Down Sweep hipMemset(cdata + power_size - 1, 0, sizeof(int)); for (int d = ilog2(power_size) - 1; d >= 0; d--) { kernelDownSweepStep << <fullBlocksPerGrid, blockSize >> > (power_size, d, cdata); } if (started_timer) timer().endGpuTimer(); // Copy Back and Free Memory hipMemcpy(odata, cdata, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(cdata); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata, bool efficient, int blockSize) { // Memory Allocation and Copying int *bools = new int[n]; int *indices = new int[n]; int *dev_bools; int *dev_indices; int *dev_idata; int *dev_odata; hipMalloc((void**)&dev_bools, n * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_bools failed!"); hipMalloc((void**)&dev_indices, n * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_indices failed!"); hipMalloc((void**)&dev_idata, n * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_idata failed!"); hipMalloc((void**)&dev_odata, n * sizeof(int)); checkCUDAErrorFn("hipMalloc dev_odata failed!"); hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice); timer().startGpuTimer(); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_bools, dev_idata); hipMemcpy(bools, dev_bools, sizeof(int) * n, hipMemcpyDeviceToHost); if(efficient) scanEfficient(n, indices, bools, blockSize); else scan(n, indices, bools, blockSize); hipMemcpy(dev_indices, indices, sizeof(int) * n, hipMemcpyHostToDevice); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n, dev_odata, dev_idata, dev_bools, dev_indices); timer().endGpuTimer(); // Copy Back and Free Memory hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost); hipFree(dev_bools); hipFree(dev_indices); hipFree(dev_idata); hipFree(dev_odata); return indices[n - 1] + bools[n - 1];; } } }
a29771e7b592c11614be0b43ffd721eeb34c98c4.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "device_launch_parameters.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } __global__ void kernelUpSweepStep(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k > n) return; int prev_step_size = 1 << d; int cur_step_size = 2 * prev_step_size; if (k % cur_step_size == 0) cdata[k + cur_step_size - 1] += cdata[k + prev_step_size - 1]; } __global__ void kernelUpSweepStepEfficient(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) return; int prev_step_size = 1 << d; int cur_step_size = 2 * prev_step_size; int new_offset = k * cur_step_size; cdata[new_offset + cur_step_size - 1] += cdata[new_offset + prev_step_size - 1]; } __global__ void kernelDownSweepStep(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k > n) return; int left_step = 1 << d; int cur_step = 2 * left_step; if (k % cur_step == 0) { int temp = cdata[k + left_step - 1]; cdata[k + left_step - 1] = cdata[k + cur_step - 1]; cdata[k + cur_step - 1] += temp; } } __global__ void kernelDownSweepStepEfficient(int n, int d, int* cdata) { int k = (blockIdx.x * blockDim.x) + threadIdx.x; if (k >= n) return; int prev_step_size = 1 << d; int cur_step_size = 2 * prev_step_size; int new_offset = k * cur_step_size; int temp = cdata[new_offset + prev_step_size - 1]; cdata[new_offset + prev_step_size - 1] = cdata[new_offset + cur_step_size - 1]; cdata[new_offset + cur_step_size - 1] += temp; } void printArray(int n, int *a, bool abridged = false) { printf(" [ "); for (int i = 0; i < n; i++) { if (abridged && i + 2 == 15 && n > 16) { i = n - 2; printf("... "); } printf("%3d ", a[i]); } printf("]\n"); } void printCudaArray(int size, int* data) { int *d_data = new int[size]; cudaMemcpy(d_data, data, size * sizeof(int), cudaMemcpyDeviceToHost); printArray(size, d_data, true); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanEfficient(int n, int *odata, const int *idata, int blockSize) { // Memory Allocation and Copying int power_size = pow(2, ilog2ceil(n)); int *cdata; cudaMalloc((void**)&cdata, power_size * sizeof(int)); checkCUDAErrorFn("cudaMalloc adata failed!"); cudaMemset(cdata, 0, power_size * sizeof(int)); cudaMemcpy(cdata, idata, n * sizeof(int), cudaMemcpyHostToDevice); bool started_timer = true; try { timer().startGpuTimer(); } catch (const std::exception& e) { started_timer = false; } int numThreads; //Up Sweep for (int d = 0; d <= ilog2ceil(power_size) - 1 ; d++) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); kernelUpSweepStepEfficient <<<fullBlocks, blockSize>>> (numThreads, d, cdata); } //Down Sweep cudaMemset(cdata + power_size - 1, 0, sizeof(int)); for (int d = ilog2(power_size) - 1; d >= 0; d--) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); kernelDownSweepStepEfficient <<<fullBlocks, blockSize>>> (numThreads, d, cdata); } if (started_timer) timer().endGpuTimer(); // Copy Back and Free Memory cudaMemcpy(odata, cdata, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(cdata); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scanEfficientCUDA(int n, int *odata, const int *idata, int blockSize) { // Memory Allocation and Copying int power_size = pow(2, ilog2ceil(n)); int *cdata; cudaMalloc((void**)&cdata, power_size * sizeof(int)); checkCUDAErrorFn("cudaMalloc adata failed!"); cudaMemset(cdata, 0, power_size * sizeof(int)); cudaMemcpy(cdata, idata, n * sizeof(int), cudaMemcpyDeviceToDevice); bool started_timer = true; try { timer().startGpuTimer(); } catch (const std::exception& e) { started_timer = false; } int numThreads; //Up Sweep for (int d = 0; d <= ilog2ceil(power_size) - 1; d++) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); kernelUpSweepStepEfficient << <fullBlocks, blockSize >> > (numThreads, d, cdata); } //Down Sweep cudaMemset(cdata + power_size - 1, 0, sizeof(int)); for (int d = ilog2(power_size) - 1; d >= 0; d--) { numThreads = pow(2, (ilog2ceil(power_size) - 1 - d)); dim3 fullBlocks((numThreads + blockSize - 1) / blockSize); kernelDownSweepStepEfficient << <fullBlocks, blockSize >> > (numThreads, d, cdata); } if (started_timer) timer().endGpuTimer(); // Copy Back and Free Memory cudaMemcpy(odata, cdata, sizeof(int) * n, cudaMemcpyDeviceToDevice); cudaFree(cdata); } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata, int blockSize) { // Memory Allocation and Copying int power_size = pow(2, ilog2ceil(n)); int *cdata; cudaMalloc((void**)&cdata, power_size * sizeof(int)); checkCUDAErrorFn("cudaMalloc adata failed!"); cudaMemset(cdata, 0, power_size * sizeof(int)); cudaMemcpy(cdata, idata, n * sizeof(int), cudaMemcpyHostToDevice); bool started_timer = true; try { timer().startGpuTimer(); } catch (const std::exception& e) { started_timer = false; } dim3 fullBlocksPerGrid((power_size + blockSize - 1) / blockSize); //Up Sweep for (int d = 0; d < ilog2ceil(power_size); d++) { kernelUpSweepStep << <fullBlocksPerGrid, blockSize >> > (power_size, d, cdata); } //Down Sweep cudaMemset(cdata + power_size - 1, 0, sizeof(int)); for (int d = ilog2(power_size) - 1; d >= 0; d--) { kernelDownSweepStep << <fullBlocksPerGrid, blockSize >> > (power_size, d, cdata); } if (started_timer) timer().endGpuTimer(); // Copy Back and Free Memory cudaMemcpy(odata, cdata, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(cdata); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata, bool efficient, int blockSize) { // Memory Allocation and Copying int *bools = new int[n]; int *indices = new int[n]; int *dev_bools; int *dev_indices; int *dev_idata; int *dev_odata; cudaMalloc((void**)&dev_bools, n * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_bools failed!"); cudaMalloc((void**)&dev_indices, n * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_indices failed!"); cudaMalloc((void**)&dev_idata, n * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_idata failed!"); cudaMalloc((void**)&dev_odata, n * sizeof(int)); checkCUDAErrorFn("cudaMalloc dev_odata failed!"); cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice); timer().startGpuTimer(); dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize); StreamCompaction::Common::kernMapToBoolean << <fullBlocksPerGrid, blockSize >> > (n, dev_bools, dev_idata); cudaMemcpy(bools, dev_bools, sizeof(int) * n, cudaMemcpyDeviceToHost); if(efficient) scanEfficient(n, indices, bools, blockSize); else scan(n, indices, bools, blockSize); cudaMemcpy(dev_indices, indices, sizeof(int) * n, cudaMemcpyHostToDevice); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, blockSize >> > (n, dev_odata, dev_idata, dev_bools, dev_indices); timer().endGpuTimer(); // Copy Back and Free Memory cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost); cudaFree(dev_bools); cudaFree(dev_indices); cudaFree(dev_idata); cudaFree(dev_odata); return indices[n - 1] + bools[n - 1];; } } }
1ffd191656e677477b0ed5ecd0bc448be538209e.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the Cuda OpenGL bindings to dynamically modify a vertex buffer using a Cuda kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with Cuda 3. Map the VBO for writing from Cuda 4. Run Cuda kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL Host code */ #define GLEW_STATIC #define FREEGLUT_STATIC // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cstdlib> #include <ctime> #include <random> #include <chrono> using namespace std::chrono; #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // OpenGL Graphics includes #include <helper_gl.h> #if defined (__APPLE__) || defined(MACOSX) #pragma clang diagnostic ignored "-Wdeprecated-declarations" #include <GLUT/glut.h> #ifndef glutCloseFunc #define glutCloseFunc glutWMCloseFunc #endif #else #include <GL/freeglut.h> #endif // includes, cuda #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and hip/hip_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <hip/hip_vector_types.h> #define MAX_EPSILON_ERROR 10.0f #define THRESHOLD 0.30f #define REFRESH_DELAY 10 //ms //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // vbo variables GLuint vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; /* //20/10/19 GLuint vbo2; struct cudaGraphicsResource *cuda_vbo_resource_2; void *d_vbo_buffer_2 = NULL; */ //22/10/19 test float4 *h_offsets; float4 *d_offsets; //void *offsetsAutoTestH = NULL; void *offsetsAutoTestD = NULL; float g_fAnim = 0.0; float UnitOfChangeOnY = 0.0f;// 19/10/19 test -UI float UnitOfChangeOnX = 0.0f; //int jitterAmmountInt = 0;//20/10/19 float jitterAmmountFloat1 = 0.0f;//20/10/19 float jitterAmmountFloat2 = 0.0f; float jitterAmmountFloat3 = 0.0f; bool jitter = false; bool origionalJitter = false; float jitterAmmountFloatOrigional = 0.0f; //bool exitTest = false; //21/10/19 game //float fallingDistence = 0.0f; bool falling = false; float horizontalChange = 0.0f; float randomHeightTop = 0.0f; float randomHeightBottom = 0.0f; // mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; // Auto-Verification Code int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward bool runTest(int argc, char **argv, char *ref_file); void cleanup(); // GL functionality bool initGL(int *argc, char **argv); void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); //20/10/19 void createVBO2(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res); // rendering callbacks void display(); void keyboard(unsigned char key, int x, int y); void mouse(int button, int state, int x, int y); void motion(int x, int y); void timerEvent(int value); // Cuda functionality void runCuda(struct cudaGraphicsResource **vbo_resource, float4 **Hoffsets, float4 **Doffsets);//, float4 * h_offsets, float4 * d_offsets);//, struct cudaGraphicsResource **vbo_resource_2);//20/10/19 test void runAutoTest(int devID, char **argv, char *ref_file); void checkResultCuda(int argc, char **argv, const GLuint &vbo); const char *sSDKsample = "simpleGL (VBO)"; /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float UnitOfChangeOnX, float UnitOfChangeOnY, float4 *offsets, bool falling, float horizontalChange, float randomHeightTop, float randomHeightBottom)//, float FallingDistence)//, float jitterAmmountFloat) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //printf("%d \n", x); // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u * 2.0f - 1.0f;//17/10/19 test - making easier to see dots to try make circle (old code - *2.0f - 1.0f;) v = v * 2.0f - 1.0f;//*2.0f - 1.0f; //16/10/19 test - Q a. start if(u > -0.11f + UnitOfChangeOnX & u < 0.11f + UnitOfChangeOnX){// u > -0.11f & u < 0.11f (new reduced x values) if(v > -0.125f + UnitOfChangeOnY & v < 0.125f + UnitOfChangeOnY){ pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); } else { float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if (falling) { if(u > 0.75f & u < 1.0f){//bottom block if(v > 0.90f - randomHeightBottom & v < 1.0f){ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u-horizontalChange, 0.0f, v, 1.0f); } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } if (u > 0.75f & u < 1.0f) //{//top block if (v > -1.1f & v < -0.90f + randomHeightTop) //{ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u - horizontalChange, 0.0f, v, 1.0f); //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} } else { // write output vertex pos[y*width + x] = make_float4(u + offsets[y*width + x].x, w + offsets[y*width + x].y, v + offsets[y*width + x].z, 1.0f); } } } else { //printf("*** IN ELSE *** \n"); // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if(falling){ if(u > 0.75f & u < 1.0f){//bottom block if(v > 0.90f - randomHeightBottom & v < 1.0f){ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u-horizontalChange, 0.0f, v, 1.0f); } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } if (u > 0.75f & u < 1.0f) //{//top block if (v > -1.1f & v < -0.90f + randomHeightTop) //{ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u - horizontalChange, 0.0f, v, 1.0f); //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} } else { // write output vertex pos[y*width + x] = make_float4(u + offsets[y*width + x].x, w + offsets[y*width + x].y, v + offsets[y*width + x].z, 1.0f); } } //16/10/19 test - Q a. end __syncthreads(); //17/10/19 test - Q a. start //__syncthreads();//extra top part //if (u > -0.109f & u < 0.109f) // if (v > -0.126f & v < 0.126f) // pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); if (u > -0.111f + UnitOfChangeOnX & u < 0.111f + UnitOfChangeOnX)//1 - u > -0.111f & u < 0.111f (new reduced x values) if (v > -0.109f + UnitOfChangeOnY & v < 0.109f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); //__syncthreads(); //if (u > -0.126f & u < 0.126f)//test // if (v > -0.125f & v < 0.125f) // pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//2 if (u > -0.127f + UnitOfChangeOnX & u < 0.127f + UnitOfChangeOnX)// u > -0.127f & u < 0.127f (new reduced x values) if (v > -0.093f + UnitOfChangeOnY & v < 0.093f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//3 if (u > -0.143f + UnitOfChangeOnX & u < 0.143f + UnitOfChangeOnX)// u > -0.143f & u < 0.143f (new reduced x values) if (v > -0.077f + UnitOfChangeOnY & v < 0.077f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//4 if (u > -0.148f + UnitOfChangeOnX & u < 0.148f + UnitOfChangeOnX)// u > -0.148f & u < 0.148f (new reduced x values) if (v > -0.061f + UnitOfChangeOnY & v < 0.061f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//5 if (u > -0.164f + UnitOfChangeOnX & u < 0.164f + UnitOfChangeOnX)// u > -0.164f & u < 0.164f (new reduced x values) if (v > -0.045f + UnitOfChangeOnY & v < 0.045f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();// "top part" 1 if (u > -0.094f + UnitOfChangeOnX & u < 0.094f + UnitOfChangeOnX) if (v > -0.141f + UnitOfChangeOnY & v < 0.141f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();// "top part" 2 if (u > -0.078f + UnitOfChangeOnX & u < 0.078f + UnitOfChangeOnX) if (v > -0.157f + UnitOfChangeOnY & v < 0.157f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();// "top part" 3 if (u > -0.062f + UnitOfChangeOnX & u < 0.062f + UnitOfChangeOnX) if (v > -0.173f + UnitOfChangeOnY& v < 0.173f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); //17/10/19 test - Q a. end } __global__ void new_vbo_x_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float jitterAmmountFloatOrigional)//, float4 *randNum) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //printf("JitterAmmount: %f\n", jitterAmmountFloat); //printf("") /* float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; //pos[y*width + x] = make_float4(u + jitterAmmountFloat, 0.0f, v + jitterAmmountFloat, 1.0f); */ //pos[y*width + x].x = u + jitterAmmountFloat;//make_float4(u + jitterAmmountFloat, 0.0f + jitterAmmountFloat, v + jitterAmmountFloat, 1.0f); pos[y*width + x].x += pos[y*width + x].x * jitterAmmountFloatOrigional; //pos[y*width + x].y += pos[y*width + x].y * jitterAmmountFloat; //pos[y*width + x].z += pos[y*width + x].z * jitterAmmountFloat; } __global__ void new_vbo_y_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float jitterAmmountFloatOrigional)//, float4 *randNum) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; /* float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; //pos[y*width + x].y = u + jitterAmmountFloat; */ /* float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; pos[y*width + x].y = w + jitterAmmountFloat;//make_float4(u, w + jitterAmmountFloat, v, 1.0f); */ pos[y*width + x].y += pos[y*width + x].y * jitterAmmountFloatOrigional; } __global__ void new_vbo_z_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float jitterAmmountFloatOrigional)//, float4 *randNum) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; /* float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; pos[y*width + x].z = v + jitterAmmountFloat; */ pos[y*width + x].z += pos[y*width + x].z * jitterAmmountFloatOrigional; } __global__ void game_kernel(float4 *pos, unsigned int width, unsigned int height, float time, bool falling, float UnitOfChangeOnX, float UnitOfChangeOnY)//, float jitterAmmountFloatOrigional)//, float seedTest)//, float4 *randNum) { /* if (falling) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; if (u > 0.0f & u < 1.0f) if (v > 0.0f & v < -1.0f) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); } */ } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time, float4 *offsets)//, float4 *randNum) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); auto seedTest = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generatorTest; generatorTest.seed(seedTest); std::uniform_real_distribution<double> distributionTest(0.0, 0.75); auto seedTest2 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generatorTest2; generatorTest2.seed(seedTest2); if (falling) UnitOfChangeOnY += 0.016; if (falling & UnitOfChangeOnY > 1.0f) falling = false;//UnitOfChangeOnY = 0.0f; if (falling) horizontalChange += 0.016f; if (falling & horizontalChange > 2.0f) horizontalChange = 0.0f; if (horizontalChange == 0.0f) randomHeightTop = distributionTest(generatorTest); if (horizontalChange == 0.0f) randomHeightBottom = distributionTest(generatorTest2); //auto seedTest = std::chrono::high_resolution_clock::now().time_since_epoch().count(); game_kernel << < grid, block >> > (pos, mesh_width, mesh_height, time, falling, UnitOfChangeOnX, UnitOfChangeOnY); simple_vbo_kernel << < grid, block >> > (pos, mesh_width, mesh_height, time, UnitOfChangeOnX, UnitOfChangeOnY, offsets,falling, horizontalChange, randomHeightTop, randomHeightBottom);//, fallingDistence);//, jitterAmmountFloat); //20/10/19 test - jitter //if (exitTest != true) { if (origionalJitter) { //srand(static_cast<unsigned int>(clock())); //jitterAmmountFloat = float(rand()) / (float(RAND_MAX) * 2.0f - 1.0f);//+ 1.0); //std::default_random_engine generator; //milliseconds ms = duration_cast<milliseconds>(system_clock::now().time_since_epoch()); //float testFloat = (float)ms; auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generator; generator.seed(seed);//std::time(0));// * 1000); std::uniform_real_distribution<double> distribution(-0.0625 , 0.0652);//-0.03125f, 0.03125f); //doubles from jitterAmmountFloatOrigional = distribution(generator); //printf("JitterAmmount x: %f\n", jitterAmmountFloatOrigional); new_vbo_x_kernel << <grid, block >> > (pos, mesh_width, mesh_height, time, jitterAmmountFloatOrigional);//, randNum); auto seed2 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed2);//std::time(0));// * 1000); jitterAmmountFloatOrigional = distribution(generator); //printf("JitterAmmount y: %f\n", jitterAmmountFloatOrigional); new_vbo_y_kernel << <grid, block >> > (pos, mesh_width, mesh_height, time, jitterAmmountFloatOrigional); auto seed3 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed3);//std::time(0));// * 1000); jitterAmmountFloatOrigional = distribution(generator); //printf("JitterAmmount z: %f\n", jitterAmmountFloatOrigional); new_vbo_z_kernel << <grid, block >> > (pos, mesh_width, mesh_height, time, jitterAmmountFloatOrigional); } //} } bool checkHW(char *name, const char *gpuType, int dev) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); strcpy(name, deviceProp.name); if (!STRNCASECMP(deviceProp.name, gpuType, strlen(gpuType))) { return true; } else { return false; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char *ref_file = NULL; pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); if (argc > 1) { if (checkCmdLineFlag(argc, (const char **)argv, "file")) { // In this mode, we are running non-OpenGL and doing a compare of the VBO was generated correctly getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file); } } printf("\n"); runTest(argc, argv, ref_file); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS); glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL(int *argc, char **argv) { glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Cuda GL Interop (VBO)"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutTimerFunc(REFRESH_DELAY, timerEvent,0); // initialize necessary OpenGL extensions if (! isGLVersionSupported(2,0)) { fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return false; } // default initialization glClearColor(0.0, 0.0, 0.0, 1.0); glDisable(GL_DEPTH_TEST); // viewport glViewport(0, 0, window_width, window_height); // projection glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); SDK_CHECK_ERROR_GL(); return true; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv, char *ref_file) { // Create the CUTIL timer sdkCreateTimer(&timer); // use command-line specified CUDA device, otherwise use device with highest Gflops/s int devID = findCudaDevice(argc, (const char **)argv); // command line mode only if (ref_file != NULL) { // create VBO checkCudaErrors(hipMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float))); // run the cuda part runAutoTest(devID, argv, ref_file); // check result of Cuda step checkResultCuda(argc, argv, vbo); hipFree(d_vbo_buffer); d_vbo_buffer = NULL; } else { // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. if (false == initGL(&argc, argv)) { return false; } // register callbacks glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); #if defined (__APPLE__) || defined(MACOSX) atexit(cleanup); #else glutCloseFunc(cleanup); #endif // create VBO createVBO(&vbo, &cuda_vbo_resource, hipGraphicsMapFlagsWriteDiscard); //22/10/19 - start unsigned int size = mesh_width * mesh_height * 4 * sizeof(float);//1. //float4 *h_offsets;//2. MIGHT HAVE TO MAKE THESE GLOBALS //float4 *d_offsets;// MIGHT HAVE TO MAKE THESE GLOBALS //float4 **testh = &h_offsets; //float4 **testd = &d_offsets; h_offsets = (float4*)malloc(size);//test - size);//3 //hipMalloc(&d_offsets, size);//test - size); hipMalloc((void **)&d_offsets, size);//exactly lab3 way //for (int i = 0; i < size; i++) { h_offsets[i] = { 0.1f, 0.1f, 0.1f, 0.1f }; }//4 - fill h_offset with random xyz THESE MIGHT HAVE TO HAPPEN IN RUNCUDA //hipMemcpy(d_offsets, h_offsets, size, hipMemcpyHostToDevice);//5 THESE MIGHT HAVE TO HAPPEN IN RUNCUDA /* if (jitter) { for (int i = 0; i < size; i++) { h_offsets[i] = make_float4(0.5f, 0.5f, 0.5f, 1.0f); }//4 } else { for (int i = 0; i < 20000; i++) { h_offsets[i] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); }//4 } hipMemcpy(d_offsets, h_offsets, size, hipMemcpyHostToDevice);//5 */ //22/10/19 - end //20/10/19 //createVBO2(&vbo2, &cuda_vbo_resource_2, hipGraphicsMapFlagsWriteDiscard); // run the cuda part runCuda(&cuda_vbo_resource, &h_offsets, &d_offsets);//, &cuda_vbo_resource_2);//, 0);// 18/10/19 test - UI // start rendering mainloop glutMainLoop(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource, float4 **Hoffsets, float4 **Doffsets)//, struct cudaGraphicsResource **vbo_resource_2)// 18/10/19 test - UI { // map OpenGL buffer object for writing from CUDA float4 *dptr; checkCudaErrors(hipGraphicsMapResources(1, vbo_resource, 0)); size_t num_bytes; checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); //printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes); // execute the kernel // dim3 block(8, 8, 1); // dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); // kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim); /* //20/10/19 float4 *dptr2; checkCudaErrors(hipGraphicsMapResources(1, vbo_resource_2, 0)); size_t num_bytes_2; checkCudaErrors(hipGraphicsResourceGetMappedPointer((void **)&dptr2, &num_bytes_2, *vbo_resource_2)); */ //22/10/19 test - start float4 *dptr2;//23/10/19 float4 *dptr3;//23/10/19 dptr2 = *Hoffsets; dptr3 = *Doffsets; unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); if (jitter) { auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generator; generator.seed(seed); std::uniform_real_distribution<double> distribution(-0.0625, 0.0652); jitterAmmountFloat1 = distribution(generator); //printf("JitterAmmountFloat1: %f\n", jitterAmmountFloat1); auto seed2 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed2); jitterAmmountFloat2 = distribution(generator); //printf("JitterAmmountFloat2: %f\n", jitterAmmountFloat2); auto seed3 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed3); jitterAmmountFloat3 = distribution(generator); //printf("JitterAmmountFloat3: %f\n", jitterAmmountFloat3); for (int i = 0; i < 60000; i++) { dptr2[i] = make_float4(jitterAmmountFloat1, jitterAmmountFloat2, jitterAmmountFloat3, 1.0f); }//4 } else { for (int i = 0; i < 60000; i++) { dptr2[i] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); }//4 } hipMemcpy(dptr3, dptr2, 60000, hipMemcpyHostToDevice);//5 //22/10/19 test - end launch_kernel(dptr, mesh_width, mesh_height, g_fAnim, dptr3);//, d_offsets);//, dptr2); // unmap buffer object checkCudaErrors(hipGraphicsUnmapResources(1, vbo_resource, 0)); //checkCudaErrors(hipGraphicsUnmapResources(1, vbo_resource_2, 0)); } #ifdef _WIN32 #ifndef FOPEN #define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode) #endif #else #ifndef FOPEN #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) #endif #endif void sdkDumpBin2(void *data, unsigned int bytes, const char *filename) { printf("sdkDumpBin: <%s>\n", filename); FILE *fp; FOPEN(fp, filename, "wb"); fwrite(data, bytes, 1, fp); fflush(fp); fclose(fp); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runAutoTest(int devID, char **argv, char *ref_file) { char *reference_file = NULL; void *imageData = malloc(mesh_width*mesh_height*sizeof(float)); //char ui = ' '; 18/10/19 test - UI // execute the kernel launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim, (float4 *)offsetsAutoTestD);//, d_offsets);//, (float4 *)d_vbo_buffer_2);//, 0); hipDeviceSynchronize(); getLastCudaError("launch_kernel failed"); checkCudaErrors(hipMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), hipMemcpyDeviceToHost)); sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGL.bin"); reference_file = sdkFindFilePath(ref_file, argv[0]); if (reference_file && !sdkCompareBin2BinFloat("simpleGL.bin", reference_file, mesh_width*mesh_height*sizeof(float), MAX_EPSILON_ERROR, THRESHOLD, pArgv[0])) { g_TotalErrors++; } } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); // create buffer object glGenBuffers(1, vbo); glBindBuffer(GL_ARRAY_BUFFER, *vbo); // initialize buffer object unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); SDK_CHECK_ERROR_GL(); } /* void createVBO2(GLuint *vbo2, struct cudaGraphicsResource **vbo_res_2, unsigned int vbo_res_flags_2) { assert(vbo2); // create buffer object glGenBuffers(1, vbo2); glBindBuffer(GL_ARRAY_BUFFER, *vbo2); // initialize buffer object unsigned int size = ((float(rand()) / float(RAND_MAX)) * (1.0f - -1.0f)) + -1.0f; glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 1); // register this buffer object with CUDA checkCudaErrors(hipGraphicsGLRegisterBuffer(vbo_res_2, *vbo2, vbo_res_flags_2)); SDK_CHECK_ERROR_GL(); } */ //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { // unregister this buffer object with CUDA checkCudaErrors(hipGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource, &h_offsets, &d_offsets);//, h_offsets, d_offsets);//, &cuda_vbo_resource_2); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.0, 0.0, translate_z); glRotatef(rotate_x, 1.0, 0.0, 0.0); glRotatef(rotate_y, 0.0, 1.0, 0.0); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor3f(1.0, 0.0, 0.0); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); g_fAnim += 0.01f; sdkStopTimer(&timer); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } void cleanup() { sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); //deleteVBO(&vbo2, cuda_vbo_resource_2);//20/10/19 } } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard(unsigned char key, int /*x*/, int /*y*/) { //float4 * dptr;//18/10/19 test - UI switch (key) { case (27) : #if defined(__APPLE__) || defined(MACOSX) exit(EXIT_SUCCESS); #else glutDestroyWindow(glutGetWindow()); return; #endif //18/10/19 test - UI case (115) : //launch_new_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim, 1); //runCuda(&cuda_vbo_resource, 1); if(!falling) if(UnitOfChangeOnY < 1.072f) UnitOfChangeOnY += 0.032f;// 19/10/19 test - UI //display();// 19/10/19 test - UI printf("JUST_PRESSED_s \n"); return; case (119): if (UnitOfChangeOnY > -1.072f) UnitOfChangeOnY -= 0.032f; if(falling) if(UnitOfChangeOnY > -1.072f) UnitOfChangeOnY -= 0.256f; printf("JUST_PRESSED_w \n"); return; case (97): if (!falling) if (UnitOfChangeOnX > -1.072f) UnitOfChangeOnX -= 0.032f; printf("JUST_PRESSED_a \n"); return; case (100): if (!falling) if (UnitOfChangeOnX < 1.072f) UnitOfChangeOnX += 0.032f; printf("JUST_PRESSED_d \n"); return; case (49): //if (exitTest) // exitTest = false; jitter = true; //srand(static_cast<unsigned int>(clock())); //jitterAmmountInt = 0 + (rand() %2);//(rand() / (float)RAND_MAX * 1.0f) + -1.0f;//((float(rand()) / float(RAND_MAX)) * 1.0f - -1.0f);//(1.0f - -1.0f)) + -1.0f;//* (1.0f - -1.0f)) + -1.0f; //jitterAmmountFloat = double(rand()) / (double(RAND_MAX) + 1.0);//float(5 + rand() % (150 +1 -5))/ 100; //printf("JitterAmmountInt: %d\n", jitterAmmountInt); //printf("JitterAmmountFloat: %f\n", jitterAmmountFloat); printf("JUST_PRESSED_1 \n"); return; case (50): //exitTest = true; jitter = false; printf("JUST_PRESSED_2 \n"); return; case (53): falling = true; UnitOfChangeOnX = 0.0f; UnitOfChangeOnY = 0.0f; /* while (falling) { if (fallingDistence > 1.0f) fallingDistence = 0.0f; fallingDistence += 0.016f; } */ printf("JUST_PRESSED_5 \n"); return; case(54): falling = false; //fallingDistence = 0.0f; printf("JUST_PRESSED_6 \n"); return; case(51): origionalJitter = true; printf("JUST_PRESSED_3 \n"); return; case(52): origionalJitter = false; printf("JUST_PRESSED_4 \n"); return; } } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; } void motion(int x, int y) { float dx, dy; dx = (float)(x - mouse_old_x); dy = (float)(y - mouse_old_y); if (mouse_buttons & 1) { rotate_x += dy * 0.2f; rotate_y += dx * 0.2f; } else if (mouse_buttons & 4) { translate_z += dy * 0.01f; } mouse_old_x = x; mouse_old_y = y; } //////////////////////////////////////////////////////////////////////////////// //! Check if the result is correct or write data to file for external //! regression testing //////////////////////////////////////////////////////////////////////////////// void checkResultCuda(int argc, char **argv, const GLuint &vbo) { if (!d_vbo_buffer) { checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource)); // map buffer object glBindBuffer(GL_ARRAY_BUFFER, vbo); float *data = (float *) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY); // check result if (checkCmdLineFlag(argc, (const char **) argv, "regression")) { // write file for regression test sdkWriteFile<float>("./data/regression.dat", data, mesh_width * mesh_height * 3, 0.0, false); } // unmap GL buffer object if (!glUnmapBuffer(GL_ARRAY_BUFFER)) { fprintf(stderr, "Unmap buffer failed.\n"); fflush(stderr); } checkCudaErrors(hipGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, hipGraphicsMapFlagsWriteDiscard)); SDK_CHECK_ERROR_GL(); } }
1ffd191656e677477b0ed5ecd0bc448be538209e.cu
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* This example demonstrates how to use the Cuda OpenGL bindings to dynamically modify a vertex buffer using a Cuda kernel. The steps are: 1. Create an empty vertex buffer object (VBO) 2. Register the VBO with Cuda 3. Map the VBO for writing from Cuda 4. Run Cuda kernel to modify the vertex positions 5. Unmap the VBO 6. Render the results using OpenGL Host code */ #define GLEW_STATIC #define FREEGLUT_STATIC // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cstdlib> #include <ctime> #include <random> #include <chrono> using namespace std::chrono; #ifdef _WIN32 # define WINDOWS_LEAN_AND_MEAN # define NOMINMAX # include <windows.h> #endif // OpenGL Graphics includes #include <helper_gl.h> #if defined (__APPLE__) || defined(MACOSX) #pragma clang diagnostic ignored "-Wdeprecated-declarations" #include <GLUT/glut.h> #ifndef glutCloseFunc #define glutCloseFunc glutWMCloseFunc #endif #else #include <GL/freeglut.h> #endif // includes, cuda #include <cuda_runtime.h> #include <cuda_gl_interop.h> // Utilities and timing functions #include <helper_functions.h> // includes cuda.h and cuda_runtime_api.h #include <timer.h> // timing functions // CUDA helper functions #include <helper_cuda.h> // helper functions for CUDA error check #include <vector_types.h> #define MAX_EPSILON_ERROR 10.0f #define THRESHOLD 0.30f #define REFRESH_DELAY 10 //ms //////////////////////////////////////////////////////////////////////////////// // constants const unsigned int window_width = 512; const unsigned int window_height = 512; const unsigned int mesh_width = 256; const unsigned int mesh_height = 256; // vbo variables GLuint vbo; struct cudaGraphicsResource *cuda_vbo_resource; void *d_vbo_buffer = NULL; /* //20/10/19 GLuint vbo2; struct cudaGraphicsResource *cuda_vbo_resource_2; void *d_vbo_buffer_2 = NULL; */ //22/10/19 test float4 *h_offsets; float4 *d_offsets; //void *offsetsAutoTestH = NULL; void *offsetsAutoTestD = NULL; float g_fAnim = 0.0; float UnitOfChangeOnY = 0.0f;// 19/10/19 test -UI float UnitOfChangeOnX = 0.0f; //int jitterAmmountInt = 0;//20/10/19 float jitterAmmountFloat1 = 0.0f;//20/10/19 float jitterAmmountFloat2 = 0.0f; float jitterAmmountFloat3 = 0.0f; bool jitter = false; bool origionalJitter = false; float jitterAmmountFloatOrigional = 0.0f; //bool exitTest = false; //21/10/19 game //float fallingDistence = 0.0f; bool falling = false; float horizontalChange = 0.0f; float randomHeightTop = 0.0f; float randomHeightBottom = 0.0f; // mouse controls int mouse_old_x, mouse_old_y; int mouse_buttons = 0; float rotate_x = 0.0, rotate_y = 0.0; float translate_z = -3.0; StopWatchInterface *timer = NULL; // Auto-Verification Code int fpsCount = 0; // FPS count for averaging int fpsLimit = 1; // FPS limit for sampling int g_Index = 0; float avgFPS = 0.0f; unsigned int frameCount = 0; unsigned int g_TotalErrors = 0; bool g_bQAReadback = false; int *pArgc = NULL; char **pArgv = NULL; #define MAX(a,b) ((a > b) ? a : b) //////////////////////////////////////////////////////////////////////////////// // declaration, forward bool runTest(int argc, char **argv, char *ref_file); void cleanup(); // GL functionality bool initGL(int *argc, char **argv); void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); //20/10/19 void createVBO2(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags); void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res); // rendering callbacks void display(); void keyboard(unsigned char key, int x, int y); void mouse(int button, int state, int x, int y); void motion(int x, int y); void timerEvent(int value); // Cuda functionality void runCuda(struct cudaGraphicsResource **vbo_resource, float4 **Hoffsets, float4 **Doffsets);//, float4 * h_offsets, float4 * d_offsets);//, struct cudaGraphicsResource **vbo_resource_2);//20/10/19 test void runAutoTest(int devID, char **argv, char *ref_file); void checkResultCuda(int argc, char **argv, const GLuint &vbo); const char *sSDKsample = "simpleGL (VBO)"; /////////////////////////////////////////////////////////////////////////////// //! Simple kernel to modify vertex positions in sine wave pattern //! @param data data in global memory /////////////////////////////////////////////////////////////////////////////// __global__ void simple_vbo_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float UnitOfChangeOnX, float UnitOfChangeOnY, float4 *offsets, bool falling, float horizontalChange, float randomHeightTop, float randomHeightBottom)//, float FallingDistence)//, float jitterAmmountFloat) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //printf("%d \n", x); // calculate uv coordinates float u = x / (float) width; float v = y / (float) height; u = u * 2.0f - 1.0f;//17/10/19 test - making easier to see dots to try make circle (old code - *2.0f - 1.0f;) v = v * 2.0f - 1.0f;//*2.0f - 1.0f; //16/10/19 test - Q a. start if(u > -0.11f + UnitOfChangeOnX & u < 0.11f + UnitOfChangeOnX){// u > -0.11f & u < 0.11f (new reduced x values) if(v > -0.125f + UnitOfChangeOnY & v < 0.125f + UnitOfChangeOnY){ pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); } else { float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if (falling) { if(u > 0.75f & u < 1.0f){//bottom block if(v > 0.90f - randomHeightBottom & v < 1.0f){ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u-horizontalChange, 0.0f, v, 1.0f); } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } if (u > 0.75f & u < 1.0f) //{//top block if (v > -1.1f & v < -0.90f + randomHeightTop) //{ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u - horizontalChange, 0.0f, v, 1.0f); //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} } else { // write output vertex pos[y*width + x] = make_float4(u + offsets[y*width + x].x, w + offsets[y*width + x].y, v + offsets[y*width + x].z, 1.0f); } } } else { //printf("*** IN ELSE *** \n"); // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; if(falling){ if(u > 0.75f & u < 1.0f){//bottom block if(v > 0.90f - randomHeightBottom & v < 1.0f){ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u-horizontalChange, 0.0f, v, 1.0f); } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } } else { pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); } if (u > 0.75f & u < 1.0f) //{//top block if (v > -1.1f & v < -0.90f + randomHeightTop) //{ //printf("in if"); //float gameTime = time; pos[y*width + x] = make_float4(u - horizontalChange, 0.0f, v, 1.0f); //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} //} //else { // pos[y*width + x] = make_float4(u, -0.5f, v, 1.0f); //} } else { // write output vertex pos[y*width + x] = make_float4(u + offsets[y*width + x].x, w + offsets[y*width + x].y, v + offsets[y*width + x].z, 1.0f); } } //16/10/19 test - Q a. end __syncthreads(); //17/10/19 test - Q a. start //__syncthreads();//extra top part //if (u > -0.109f & u < 0.109f) // if (v > -0.126f & v < 0.126f) // pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); if (u > -0.111f + UnitOfChangeOnX & u < 0.111f + UnitOfChangeOnX)//1 - u > -0.111f & u < 0.111f (new reduced x values) if (v > -0.109f + UnitOfChangeOnY & v < 0.109f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); //__syncthreads(); //if (u > -0.126f & u < 0.126f)//test // if (v > -0.125f & v < 0.125f) // pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//2 if (u > -0.127f + UnitOfChangeOnX & u < 0.127f + UnitOfChangeOnX)// u > -0.127f & u < 0.127f (new reduced x values) if (v > -0.093f + UnitOfChangeOnY & v < 0.093f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//3 if (u > -0.143f + UnitOfChangeOnX & u < 0.143f + UnitOfChangeOnX)// u > -0.143f & u < 0.143f (new reduced x values) if (v > -0.077f + UnitOfChangeOnY & v < 0.077f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//4 if (u > -0.148f + UnitOfChangeOnX & u < 0.148f + UnitOfChangeOnX)// u > -0.148f & u < 0.148f (new reduced x values) if (v > -0.061f + UnitOfChangeOnY & v < 0.061f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();//5 if (u > -0.164f + UnitOfChangeOnX & u < 0.164f + UnitOfChangeOnX)// u > -0.164f & u < 0.164f (new reduced x values) if (v > -0.045f + UnitOfChangeOnY & v < 0.045f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();// "top part" 1 if (u > -0.094f + UnitOfChangeOnX & u < 0.094f + UnitOfChangeOnX) if (v > -0.141f + UnitOfChangeOnY & v < 0.141f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();// "top part" 2 if (u > -0.078f + UnitOfChangeOnX & u < 0.078f + UnitOfChangeOnX) if (v > -0.157f + UnitOfChangeOnY & v < 0.157f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); __syncthreads();// "top part" 3 if (u > -0.062f + UnitOfChangeOnX & u < 0.062f + UnitOfChangeOnX) if (v > -0.173f + UnitOfChangeOnY& v < 0.173f + UnitOfChangeOnY) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); //17/10/19 test - Q a. end } __global__ void new_vbo_x_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float jitterAmmountFloatOrigional)//, float4 *randNum) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //printf("JitterAmmount: %f\n", jitterAmmountFloat); //printf("") /* float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; //pos[y*width + x] = make_float4(u + jitterAmmountFloat, 0.0f, v + jitterAmmountFloat, 1.0f); */ //pos[y*width + x].x = u + jitterAmmountFloat;//make_float4(u + jitterAmmountFloat, 0.0f + jitterAmmountFloat, v + jitterAmmountFloat, 1.0f); pos[y*width + x].x += pos[y*width + x].x * jitterAmmountFloatOrigional; //pos[y*width + x].y += pos[y*width + x].y * jitterAmmountFloat; //pos[y*width + x].z += pos[y*width + x].z * jitterAmmountFloat; } __global__ void new_vbo_y_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float jitterAmmountFloatOrigional)//, float4 *randNum) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; /* float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; //pos[y*width + x].y = u + jitterAmmountFloat; */ /* float freq = 4.0f; float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f; pos[y*width + x].y = w + jitterAmmountFloat;//make_float4(u, w + jitterAmmountFloat, v, 1.0f); */ pos[y*width + x].y += pos[y*width + x].y * jitterAmmountFloatOrigional; } __global__ void new_vbo_z_kernel(float4 *pos, unsigned int width, unsigned int height, float time, float jitterAmmountFloatOrigional)//, float4 *randNum) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; /* float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; pos[y*width + x].z = v + jitterAmmountFloat; */ pos[y*width + x].z += pos[y*width + x].z * jitterAmmountFloatOrigional; } __global__ void game_kernel(float4 *pos, unsigned int width, unsigned int height, float time, bool falling, float UnitOfChangeOnX, float UnitOfChangeOnY)//, float jitterAmmountFloatOrigional)//, float seedTest)//, float4 *randNum) { /* if (falling) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; if (u > 0.0f & u < 1.0f) if (v > 0.0f & v < -1.0f) pos[y*width + x] = make_float4(u, 0.0f, v, 1.0f); } */ } void launch_kernel(float4 *pos, unsigned int mesh_width, unsigned int mesh_height, float time, float4 *offsets)//, float4 *randNum) { // execute the kernel dim3 block(8, 8, 1); dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); auto seedTest = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generatorTest; generatorTest.seed(seedTest); std::uniform_real_distribution<double> distributionTest(0.0, 0.75); auto seedTest2 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generatorTest2; generatorTest2.seed(seedTest2); if (falling) UnitOfChangeOnY += 0.016; if (falling & UnitOfChangeOnY > 1.0f) falling = false;//UnitOfChangeOnY = 0.0f; if (falling) horizontalChange += 0.016f; if (falling & horizontalChange > 2.0f) horizontalChange = 0.0f; if (horizontalChange == 0.0f) randomHeightTop = distributionTest(generatorTest); if (horizontalChange == 0.0f) randomHeightBottom = distributionTest(generatorTest2); //auto seedTest = std::chrono::high_resolution_clock::now().time_since_epoch().count(); game_kernel << < grid, block >> > (pos, mesh_width, mesh_height, time, falling, UnitOfChangeOnX, UnitOfChangeOnY); simple_vbo_kernel << < grid, block >> > (pos, mesh_width, mesh_height, time, UnitOfChangeOnX, UnitOfChangeOnY, offsets,falling, horizontalChange, randomHeightTop, randomHeightBottom);//, fallingDistence);//, jitterAmmountFloat); //20/10/19 test - jitter //if (exitTest != true) { if (origionalJitter) { //srand(static_cast<unsigned int>(clock())); //jitterAmmountFloat = float(rand()) / (float(RAND_MAX) * 2.0f - 1.0f);//+ 1.0); //std::default_random_engine generator; //milliseconds ms = duration_cast<milliseconds>(system_clock::now().time_since_epoch()); //float testFloat = (float)ms; auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generator; generator.seed(seed);//std::time(0));// * 1000); std::uniform_real_distribution<double> distribution(-0.0625 , 0.0652);//-0.03125f, 0.03125f); //doubles from jitterAmmountFloatOrigional = distribution(generator); //printf("JitterAmmount x: %f\n", jitterAmmountFloatOrigional); new_vbo_x_kernel << <grid, block >> > (pos, mesh_width, mesh_height, time, jitterAmmountFloatOrigional);//, randNum); auto seed2 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed2);//std::time(0));// * 1000); jitterAmmountFloatOrigional = distribution(generator); //printf("JitterAmmount y: %f\n", jitterAmmountFloatOrigional); new_vbo_y_kernel << <grid, block >> > (pos, mesh_width, mesh_height, time, jitterAmmountFloatOrigional); auto seed3 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed3);//std::time(0));// * 1000); jitterAmmountFloatOrigional = distribution(generator); //printf("JitterAmmount z: %f\n", jitterAmmountFloatOrigional); new_vbo_z_kernel << <grid, block >> > (pos, mesh_width, mesh_height, time, jitterAmmountFloatOrigional); } //} } bool checkHW(char *name, const char *gpuType, int dev) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); strcpy(name, deviceProp.name); if (!STRNCASECMP(deviceProp.name, gpuType, strlen(gpuType))) { return true; } else { return false; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { char *ref_file = NULL; pArgc = &argc; pArgv = argv; #if defined(__linux__) setenv ("DISPLAY", ":0", 0); #endif printf("%s starting...\n", sSDKsample); if (argc > 1) { if (checkCmdLineFlag(argc, (const char **)argv, "file")) { // In this mode, we are running non-OpenGL and doing a compare of the VBO was generated correctly getCmdLineArgumentString(argc, (const char **)argv, "file", (char **)&ref_file); } } printf("\n"); runTest(argc, argv, ref_file); printf("%s completed, returned %s\n", sSDKsample, (g_TotalErrors == 0) ? "OK" : "ERROR!"); exit(g_TotalErrors == 0 ? EXIT_SUCCESS : EXIT_FAILURE); } void computeFPS() { frameCount++; fpsCount++; if (fpsCount == fpsLimit) { avgFPS = 1.f / (sdkGetAverageTimerValue(&timer) / 1000.f); fpsCount = 0; fpsLimit = (int)MAX(avgFPS, 1.f); sdkResetTimer(&timer); } char fps[256]; sprintf(fps, "Cuda GL Interop (VBO): %3.1f fps (Max 100Hz)", avgFPS); glutSetWindowTitle(fps); } //////////////////////////////////////////////////////////////////////////////// //! Initialize GL //////////////////////////////////////////////////////////////////////////////// bool initGL(int *argc, char **argv) { glutInit(argc, argv); glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE); glutInitWindowSize(window_width, window_height); glutCreateWindow("Cuda GL Interop (VBO)"); glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMotionFunc(motion); glutTimerFunc(REFRESH_DELAY, timerEvent,0); // initialize necessary OpenGL extensions if (! isGLVersionSupported(2,0)) { fprintf(stderr, "ERROR: Support for necessary OpenGL extensions missing."); fflush(stderr); return false; } // default initialization glClearColor(0.0, 0.0, 0.0, 1.0); glDisable(GL_DEPTH_TEST); // viewport glViewport(0, 0, window_width, window_height); // projection glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0, (GLfloat)window_width / (GLfloat) window_height, 0.1, 10.0); SDK_CHECK_ERROR_GL(); return true; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv, char *ref_file) { // Create the CUTIL timer sdkCreateTimer(&timer); // use command-line specified CUDA device, otherwise use device with highest Gflops/s int devID = findCudaDevice(argc, (const char **)argv); // command line mode only if (ref_file != NULL) { // create VBO checkCudaErrors(cudaMalloc((void **)&d_vbo_buffer, mesh_width*mesh_height*4*sizeof(float))); // run the cuda part runAutoTest(devID, argv, ref_file); // check result of Cuda step checkResultCuda(argc, argv, vbo); cudaFree(d_vbo_buffer); d_vbo_buffer = NULL; } else { // First initialize OpenGL context, so we can properly set the GL for CUDA. // This is necessary in order to achieve optimal performance with OpenGL/CUDA interop. if (false == initGL(&argc, argv)) { return false; } // register callbacks glutDisplayFunc(display); glutKeyboardFunc(keyboard); glutMouseFunc(mouse); glutMotionFunc(motion); #if defined (__APPLE__) || defined(MACOSX) atexit(cleanup); #else glutCloseFunc(cleanup); #endif // create VBO createVBO(&vbo, &cuda_vbo_resource, cudaGraphicsMapFlagsWriteDiscard); //22/10/19 - start unsigned int size = mesh_width * mesh_height * 4 * sizeof(float);//1. //float4 *h_offsets;//2. MIGHT HAVE TO MAKE THESE GLOBALS //float4 *d_offsets;// MIGHT HAVE TO MAKE THESE GLOBALS //float4 **testh = &h_offsets; //float4 **testd = &d_offsets; h_offsets = (float4*)malloc(size);//test - size);//3 //cudaMalloc(&d_offsets, size);//test - size); cudaMalloc((void **)&d_offsets, size);//exactly lab3 way //for (int i = 0; i < size; i++) { h_offsets[i] = { 0.1f, 0.1f, 0.1f, 0.1f }; }//4 - fill h_offset with random xyz THESE MIGHT HAVE TO HAPPEN IN RUNCUDA //cudaMemcpy(d_offsets, h_offsets, size, cudaMemcpyHostToDevice);//5 THESE MIGHT HAVE TO HAPPEN IN RUNCUDA /* if (jitter) { for (int i = 0; i < size; i++) { h_offsets[i] = make_float4(0.5f, 0.5f, 0.5f, 1.0f); }//4 } else { for (int i = 0; i < 20000; i++) { h_offsets[i] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); }//4 } cudaMemcpy(d_offsets, h_offsets, size, cudaMemcpyHostToDevice);//5 */ //22/10/19 - end //20/10/19 //createVBO2(&vbo2, &cuda_vbo_resource_2, cudaGraphicsMapFlagsWriteDiscard); // run the cuda part runCuda(&cuda_vbo_resource, &h_offsets, &d_offsets);//, &cuda_vbo_resource_2);//, 0);// 18/10/19 test - UI // start rendering mainloop glutMainLoop(); } return true; } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runCuda(struct cudaGraphicsResource **vbo_resource, float4 **Hoffsets, float4 **Doffsets)//, struct cudaGraphicsResource **vbo_resource_2)// 18/10/19 test - UI { // map OpenGL buffer object for writing from CUDA float4 *dptr; checkCudaErrors(cudaGraphicsMapResources(1, vbo_resource, 0)); size_t num_bytes; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, *vbo_resource)); //printf("CUDA mapped VBO: May access %ld bytes\n", num_bytes); // execute the kernel // dim3 block(8, 8, 1); // dim3 grid(mesh_width / block.x, mesh_height / block.y, 1); // kernel<<< grid, block>>>(dptr, mesh_width, mesh_height, g_fAnim); /* //20/10/19 float4 *dptr2; checkCudaErrors(cudaGraphicsMapResources(1, vbo_resource_2, 0)); size_t num_bytes_2; checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void **)&dptr2, &num_bytes_2, *vbo_resource_2)); */ //22/10/19 test - start float4 *dptr2;//23/10/19 float4 *dptr3;//23/10/19 dptr2 = *Hoffsets; dptr3 = *Doffsets; unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); if (jitter) { auto seed = std::chrono::high_resolution_clock::now().time_since_epoch().count(); std::mt19937 generator; generator.seed(seed); std::uniform_real_distribution<double> distribution(-0.0625, 0.0652); jitterAmmountFloat1 = distribution(generator); //printf("JitterAmmountFloat1: %f\n", jitterAmmountFloat1); auto seed2 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed2); jitterAmmountFloat2 = distribution(generator); //printf("JitterAmmountFloat2: %f\n", jitterAmmountFloat2); auto seed3 = std::chrono::high_resolution_clock::now().time_since_epoch().count(); generator.seed(seed3); jitterAmmountFloat3 = distribution(generator); //printf("JitterAmmountFloat3: %f\n", jitterAmmountFloat3); for (int i = 0; i < 60000; i++) { dptr2[i] = make_float4(jitterAmmountFloat1, jitterAmmountFloat2, jitterAmmountFloat3, 1.0f); }//4 } else { for (int i = 0; i < 60000; i++) { dptr2[i] = make_float4(0.0f, 0.0f, 0.0f, 1.0f); }//4 } cudaMemcpy(dptr3, dptr2, 60000, cudaMemcpyHostToDevice);//5 //22/10/19 test - end launch_kernel(dptr, mesh_width, mesh_height, g_fAnim, dptr3);//, d_offsets);//, dptr2); // unmap buffer object checkCudaErrors(cudaGraphicsUnmapResources(1, vbo_resource, 0)); //checkCudaErrors(cudaGraphicsUnmapResources(1, vbo_resource_2, 0)); } #ifdef _WIN32 #ifndef FOPEN #define FOPEN(fHandle,filename,mode) fopen_s(&fHandle, filename, mode) #endif #else #ifndef FOPEN #define FOPEN(fHandle,filename,mode) (fHandle = fopen(filename, mode)) #endif #endif void sdkDumpBin2(void *data, unsigned int bytes, const char *filename) { printf("sdkDumpBin: <%s>\n", filename); FILE *fp; FOPEN(fp, filename, "wb"); fwrite(data, bytes, 1, fp); fflush(fp); fclose(fp); } //////////////////////////////////////////////////////////////////////////////// //! Run the Cuda part of the computation //////////////////////////////////////////////////////////////////////////////// void runAutoTest(int devID, char **argv, char *ref_file) { char *reference_file = NULL; void *imageData = malloc(mesh_width*mesh_height*sizeof(float)); //char ui = ' '; 18/10/19 test - UI // execute the kernel launch_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim, (float4 *)offsetsAutoTestD);//, d_offsets);//, (float4 *)d_vbo_buffer_2);//, 0); cudaDeviceSynchronize(); getLastCudaError("launch_kernel failed"); checkCudaErrors(cudaMemcpy(imageData, d_vbo_buffer, mesh_width*mesh_height*sizeof(float), cudaMemcpyDeviceToHost)); sdkDumpBin2(imageData, mesh_width*mesh_height*sizeof(float), "simpleGL.bin"); reference_file = sdkFindFilePath(ref_file, argv[0]); if (reference_file && !sdkCompareBin2BinFloat("simpleGL.bin", reference_file, mesh_width*mesh_height*sizeof(float), MAX_EPSILON_ERROR, THRESHOLD, pArgv[0])) { g_TotalErrors++; } } //////////////////////////////////////////////////////////////////////////////// //! Create VBO //////////////////////////////////////////////////////////////////////////////// void createVBO(GLuint *vbo, struct cudaGraphicsResource **vbo_res, unsigned int vbo_res_flags) { assert(vbo); // create buffer object glGenBuffers(1, vbo); glBindBuffer(GL_ARRAY_BUFFER, *vbo); // initialize buffer object unsigned int size = mesh_width * mesh_height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); // register this buffer object with CUDA checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res, *vbo, vbo_res_flags)); SDK_CHECK_ERROR_GL(); } /* void createVBO2(GLuint *vbo2, struct cudaGraphicsResource **vbo_res_2, unsigned int vbo_res_flags_2) { assert(vbo2); // create buffer object glGenBuffers(1, vbo2); glBindBuffer(GL_ARRAY_BUFFER, *vbo2); // initialize buffer object unsigned int size = ((float(rand()) / float(RAND_MAX)) * (1.0f - -1.0f)) + -1.0f; glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 1); // register this buffer object with CUDA checkCudaErrors(cudaGraphicsGLRegisterBuffer(vbo_res_2, *vbo2, vbo_res_flags_2)); SDK_CHECK_ERROR_GL(); } */ //////////////////////////////////////////////////////////////////////////////// //! Delete VBO //////////////////////////////////////////////////////////////////////////////// void deleteVBO(GLuint *vbo, struct cudaGraphicsResource *vbo_res) { // unregister this buffer object with CUDA checkCudaErrors(cudaGraphicsUnregisterResource(vbo_res)); glBindBuffer(1, *vbo); glDeleteBuffers(1, vbo); *vbo = 0; } //////////////////////////////////////////////////////////////////////////////// //! Display callback //////////////////////////////////////////////////////////////////////////////// void display() { sdkStartTimer(&timer); // run CUDA kernel to generate vertex positions runCuda(&cuda_vbo_resource, &h_offsets, &d_offsets);//, h_offsets, d_offsets);//, &cuda_vbo_resource_2); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // set view matrix glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef(0.0, 0.0, translate_z); glRotatef(rotate_x, 1.0, 0.0, 0.0); glRotatef(rotate_y, 0.0, 1.0, 0.0); // render from the vbo glBindBuffer(GL_ARRAY_BUFFER, vbo); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glColor3f(1.0, 0.0, 0.0); glDrawArrays(GL_POINTS, 0, mesh_width * mesh_height); glDisableClientState(GL_VERTEX_ARRAY); glutSwapBuffers(); g_fAnim += 0.01f; sdkStopTimer(&timer); computeFPS(); } void timerEvent(int value) { if (glutGetWindow()) { glutPostRedisplay(); glutTimerFunc(REFRESH_DELAY, timerEvent,0); } } void cleanup() { sdkDeleteTimer(&timer); if (vbo) { deleteVBO(&vbo, cuda_vbo_resource); //deleteVBO(&vbo2, cuda_vbo_resource_2);//20/10/19 } } //////////////////////////////////////////////////////////////////////////////// //! Keyboard events handler //////////////////////////////////////////////////////////////////////////////// void keyboard(unsigned char key, int /*x*/, int /*y*/) { //float4 * dptr;//18/10/19 test - UI switch (key) { case (27) : #if defined(__APPLE__) || defined(MACOSX) exit(EXIT_SUCCESS); #else glutDestroyWindow(glutGetWindow()); return; #endif //18/10/19 test - UI case (115) : //launch_new_kernel((float4 *)d_vbo_buffer, mesh_width, mesh_height, g_fAnim, 1); //runCuda(&cuda_vbo_resource, 1); if(!falling) if(UnitOfChangeOnY < 1.072f) UnitOfChangeOnY += 0.032f;// 19/10/19 test - UI //display();// 19/10/19 test - UI printf("JUST_PRESSED_s \n"); return; case (119): if (UnitOfChangeOnY > -1.072f) UnitOfChangeOnY -= 0.032f; if(falling) if(UnitOfChangeOnY > -1.072f) UnitOfChangeOnY -= 0.256f; printf("JUST_PRESSED_w \n"); return; case (97): if (!falling) if (UnitOfChangeOnX > -1.072f) UnitOfChangeOnX -= 0.032f; printf("JUST_PRESSED_a \n"); return; case (100): if (!falling) if (UnitOfChangeOnX < 1.072f) UnitOfChangeOnX += 0.032f; printf("JUST_PRESSED_d \n"); return; case (49): //if (exitTest) // exitTest = false; jitter = true; //srand(static_cast<unsigned int>(clock())); //jitterAmmountInt = 0 + (rand() %2);//(rand() / (float)RAND_MAX * 1.0f) + -1.0f;//((float(rand()) / float(RAND_MAX)) * 1.0f - -1.0f);//(1.0f - -1.0f)) + -1.0f;//* (1.0f - -1.0f)) + -1.0f; //jitterAmmountFloat = double(rand()) / (double(RAND_MAX) + 1.0);//float(5 + rand() % (150 +1 -5))/ 100; //printf("JitterAmmountInt: %d\n", jitterAmmountInt); //printf("JitterAmmountFloat: %f\n", jitterAmmountFloat); printf("JUST_PRESSED_1 \n"); return; case (50): //exitTest = true; jitter = false; printf("JUST_PRESSED_2 \n"); return; case (53): falling = true; UnitOfChangeOnX = 0.0f; UnitOfChangeOnY = 0.0f; /* while (falling) { if (fallingDistence > 1.0f) fallingDistence = 0.0f; fallingDistence += 0.016f; } */ printf("JUST_PRESSED_5 \n"); return; case(54): falling = false; //fallingDistence = 0.0f; printf("JUST_PRESSED_6 \n"); return; case(51): origionalJitter = true; printf("JUST_PRESSED_3 \n"); return; case(52): origionalJitter = false; printf("JUST_PRESSED_4 \n"); return; } } //////////////////////////////////////////////////////////////////////////////// //! Mouse event handlers //////////////////////////////////////////////////////////////////////////////// void mouse(int button, int state, int x, int y) { if (state == GLUT_DOWN) { mouse_buttons |= 1<<button; } else if (state == GLUT_UP) { mouse_buttons = 0; } mouse_old_x = x; mouse_old_y = y; } void motion(int x, int y) { float dx, dy; dx = (float)(x - mouse_old_x); dy = (float)(y - mouse_old_y); if (mouse_buttons & 1) { rotate_x += dy * 0.2f; rotate_y += dx * 0.2f; } else if (mouse_buttons & 4) { translate_z += dy * 0.01f; } mouse_old_x = x; mouse_old_y = y; } //////////////////////////////////////////////////////////////////////////////// //! Check if the result is correct or write data to file for external //! regression testing //////////////////////////////////////////////////////////////////////////////// void checkResultCuda(int argc, char **argv, const GLuint &vbo) { if (!d_vbo_buffer) { checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource)); // map buffer object glBindBuffer(GL_ARRAY_BUFFER, vbo); float *data = (float *) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY); // check result if (checkCmdLineFlag(argc, (const char **) argv, "regression")) { // write file for regression test sdkWriteFile<float>("./data/regression.dat", data, mesh_width * mesh_height * 3, 0.0, false); } // unmap GL buffer object if (!glUnmapBuffer(GL_ARRAY_BUFFER)) { fprintf(stderr, "Unmap buffer failed.\n"); fflush(stderr); } checkCudaErrors(cudaGraphicsGLRegisterBuffer(&cuda_vbo_resource, vbo, cudaGraphicsMapFlagsWriteDiscard)); SDK_CHECK_ERROR_GL(); } }
dd40b9c442868cb803d4e4dc34de56536cf540cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This program computes the sum of two vectors of length N // By: Nick from CoffeeBeforeArch #include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <iterator> #include <vector> using std::begin; using std::copy; using std::cout; using std::end; using std::generate; using std::vector; // CUDA kernel for vector addition // __global__ means this is called from the CPU, and runs on the GPU __global__ void vectorAdd(int* a, int* b, int* c, int N) { // Calculate global thread ID int tid = (blockIdx.x * blockDim.x) + threadIdx.x; // Boundary check if (tid < N) { // Each thread adds a single element c[tid] = a[tid] + b[tid]; } } // Check vector add result void verify_result(vector<int> &a, vector<int> &b, vector<int> &c) { for (int i = 0; i < a.size(); i++) { assert(c[i] == a[i] + b[i]); } } int main() { // Array size of 2^16 (65536 elements) constexpr int N = 1 << 26; size_t bytes = sizeof(int) * N; // Vectors for holding the host-side (CPU-side) data vector<int> a(N); vector<int> b(N); vector<int> c(N); // Initialize random numbers in each array generate(begin(a), end(a), []() { return rand() % 100; }); generate(begin(b), end(b), []() { return rand() % 100; }); // Allocate memory on the device int *d_a, *d_b, *d_c; hipMalloc(&d_a, bytes); hipMalloc(&d_b, bytes); hipMalloc(&d_c, bytes); // Copy data from the host to the device (CPU -> GPU) hipMemcpy(d_a, a.data(), bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, b.data(), bytes, hipMemcpyHostToDevice); // Threads per CTA (1024 threads per CTA) int NUM_THREADS = 1 << 10; // CTAs per Grid // We need to launch at LEAST as many threads as we have elements // This equation pads an extra CTA to the grid if N cannot evenly be divided // by NUM_THREADS (e.g. N = 1025, NUM_THREADS = 1024) int NUM_BLOCKS = (N + NUM_THREADS - 1) / NUM_THREADS; // Launch the kernel on the GPU // Kernel calls are asynchronous (the CPU program continues execution after // call, but no necessarily before the kernel finishes) hipLaunchKernelGGL(( vectorAdd), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, d_a, d_b, d_c, N); // Copy sum vector from device to host // hipMemcpy is a synchronous operation, and waits for the prior kernel // launch to complete (both go to the default stream in this case). // Therefore, this hipMemcpy acts as both a memcpy and synchronization // barrier. hipMemcpy(c.data(), d_c, bytes, hipMemcpyDeviceToHost); // Check result for errors verify_result(a, b, c); // Free memory on device hipFree(d_a); hipFree(d_b); hipFree(d_c); cout << "COMPLETED SUCCESSFULLY\n"; return 0; }
dd40b9c442868cb803d4e4dc34de56536cf540cb.cu
// This program computes the sum of two vectors of length N // By: Nick from CoffeeBeforeArch #include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <iterator> #include <vector> using std::begin; using std::copy; using std::cout; using std::end; using std::generate; using std::vector; // CUDA kernel for vector addition // __global__ means this is called from the CPU, and runs on the GPU __global__ void vectorAdd(int* a, int* b, int* c, int N) { // Calculate global thread ID int tid = (blockIdx.x * blockDim.x) + threadIdx.x; // Boundary check if (tid < N) { // Each thread adds a single element c[tid] = a[tid] + b[tid]; } } // Check vector add result void verify_result(vector<int> &a, vector<int> &b, vector<int> &c) { for (int i = 0; i < a.size(); i++) { assert(c[i] == a[i] + b[i]); } } int main() { // Array size of 2^16 (65536 elements) constexpr int N = 1 << 26; size_t bytes = sizeof(int) * N; // Vectors for holding the host-side (CPU-side) data vector<int> a(N); vector<int> b(N); vector<int> c(N); // Initialize random numbers in each array generate(begin(a), end(a), []() { return rand() % 100; }); generate(begin(b), end(b), []() { return rand() % 100; }); // Allocate memory on the device int *d_a, *d_b, *d_c; cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // Copy data from the host to the device (CPU -> GPU) cudaMemcpy(d_a, a.data(), bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b.data(), bytes, cudaMemcpyHostToDevice); // Threads per CTA (1024 threads per CTA) int NUM_THREADS = 1 << 10; // CTAs per Grid // We need to launch at LEAST as many threads as we have elements // This equation pads an extra CTA to the grid if N cannot evenly be divided // by NUM_THREADS (e.g. N = 1025, NUM_THREADS = 1024) int NUM_BLOCKS = (N + NUM_THREADS - 1) / NUM_THREADS; // Launch the kernel on the GPU // Kernel calls are asynchronous (the CPU program continues execution after // call, but no necessarily before the kernel finishes) vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, N); // Copy sum vector from device to host // cudaMemcpy is a synchronous operation, and waits for the prior kernel // launch to complete (both go to the default stream in this case). // Therefore, this cudaMemcpy acts as both a memcpy and synchronization // barrier. cudaMemcpy(c.data(), d_c, bytes, cudaMemcpyDeviceToHost); // Check result for errors verify_result(a, b, c); // Free memory on device cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cout << "COMPLETED SUCCESSFULLY\n"; return 0; }
9dfc7efe36166c59df87ecc383f8c6bbb153d97f.hip
// !!! This is a file automatically generated by hipify!!! #include "LandauPdf.hh" // LANDAU pdf : algorithm from CERNLIB G110 denlan // same algorithm is used in GSL MEM_CONSTANT fptype p1[5] = {0.4259894875,-0.1249762550, 0.03984243700, -0.006298287635, 0.001511162253}; MEM_CONSTANT fptype q1[5] = {1.0 ,-0.3388260629, 0.09594393323, -0.01608042283, 0.003778942063}; MEM_CONSTANT fptype p2[5] = {0.1788541609, 0.1173957403, 0.01488850518, -0.001394989411, 0.0001283617211}; MEM_CONSTANT fptype q2[5] = {1.0 , 0.7428795082, 0.3153932961, 0.06694219548, 0.008790609714}; MEM_CONSTANT fptype p3[5] = {0.1788544503, 0.09359161662,0.006325387654, 0.00006611667319,-0.000002031049101}; MEM_CONSTANT fptype q3[5] = {1.0 , 0.6097809921, 0.2560616665, 0.04746722384, 0.006957301675}; MEM_CONSTANT fptype p4[5] = {0.9874054407, 118.6723273, 849.2794360, -743.7792444, 427.0262186}; MEM_CONSTANT fptype q4[5] = {1.0 , 106.8615961, 337.6496214, 2016.712389, 1597.063511}; MEM_CONSTANT fptype p5[5] = {1.003675074, 167.5702434, 4789.711289, 21217.86767, -22324.94910}; MEM_CONSTANT fptype q5[5] = {1.0 , 156.9424537, 3745.310488, 9834.698876, 66924.28357}; MEM_CONSTANT fptype p6[5] = {1.000827619, 664.9143136, 62972.92665, 475554.6998, -5743609.109}; MEM_CONSTANT fptype q6[5] = {1.0 , 651.4101098, 56974.73333, 165917.4725, -2815759.939}; MEM_CONSTANT fptype a1[3] = {0.04166666667,-0.01996527778, 0.02709538966}; MEM_CONSTANT fptype a2[2] = {-1.845568670,-4.284640743}; EXEC_TARGET fptype device_Landau (fptype* evt, fptype* p, unsigned int* indices) { fptype x = evt[indices[2 + indices[0]]]; fptype mpv = p[indices[1]]; fptype sigma = p[indices[2]]; if (sigma <= 0) return 0; fptype v = (x - mpv)/sigma; fptype u, ue, us, denlan; if (v < -5.5) { u = EXP(v+1.0); if (u < 1e-10) return 0.0; ue = EXP(-1/u); us = SQRT(u); denlan = 0.3989422803*(ue/us)*(1+(a1[0]+(a1[1]+a1[2]*u)*u)*u); } else if (v < -1) { u = EXP(-v-1); denlan = EXP(-u)*SQRT(u)* (p1[0]+(p1[1]+(p1[2]+(p1[3]+p1[4]*v)*v)*v)*v)/ (q1[0]+(q1[1]+(q1[2]+(q1[3]+q1[4]*v)*v)*v)*v); } else if (v < 1) { denlan = (p2[0]+(p2[1]+(p2[2]+(p2[3]+p2[4]*v)*v)*v)*v)/ (q2[0]+(q2[1]+(q2[2]+(q2[3]+q2[4]*v)*v)*v)*v); } else if (v < 5) { denlan = (p3[0]+(p3[1]+(p3[2]+(p3[3]+p3[4]*v)*v)*v)*v)/ (q3[0]+(q3[1]+(q3[2]+(q3[3]+q3[4]*v)*v)*v)*v); } else if (v < 12) { u = 1/v; denlan = u*u*(p4[0]+(p4[1]+(p4[2]+(p4[3]+p4[4]*u)*u)*u)*u)/ (q4[0]+(q4[1]+(q4[2]+(q4[3]+q4[4]*u)*u)*u)*u); } else if (v < 50) { u = 1/v; denlan = u*u*(p5[0]+(p5[1]+(p5[2]+(p5[3]+p5[4]*u)*u)*u)*u)/ (q5[0]+(q5[1]+(q5[2]+(q5[3]+q5[4]*u)*u)*u)*u); } else if (v < 300) { u = 1/v; denlan = u*u*(p6[0]+(p6[1]+(p6[2]+(p6[3]+p6[4]*u)*u)*u)*u)/ (q6[0]+(q6[1]+(q6[2]+(q6[3]+q6[4]*u)*u)*u)*u); } else { u = 1/(v-v*::log(v)/(v+1)); denlan = u*u*(1+(a2[0]+a2[1]*u)*u); } return denlan/sigma; } MEM_DEVICE device_function_ptr ptr_to_Landau = device_Landau; __host__ LandauPdf::LandauPdf (std::string n, Variable* _x, Variable* mpv, Variable* sigma) : GooPdf(_x, n) { std::vector<unsigned int> pindices; pindices.push_back(registerParameter(mpv)); pindices.push_back(registerParameter(sigma)); GET_FUNCTION_ADDR(ptr_to_Landau); initialise(pindices); }
9dfc7efe36166c59df87ecc383f8c6bbb153d97f.cu
#include "LandauPdf.hh" // LANDAU pdf : algorithm from CERNLIB G110 denlan // same algorithm is used in GSL MEM_CONSTANT fptype p1[5] = {0.4259894875,-0.1249762550, 0.03984243700, -0.006298287635, 0.001511162253}; MEM_CONSTANT fptype q1[5] = {1.0 ,-0.3388260629, 0.09594393323, -0.01608042283, 0.003778942063}; MEM_CONSTANT fptype p2[5] = {0.1788541609, 0.1173957403, 0.01488850518, -0.001394989411, 0.0001283617211}; MEM_CONSTANT fptype q2[5] = {1.0 , 0.7428795082, 0.3153932961, 0.06694219548, 0.008790609714}; MEM_CONSTANT fptype p3[5] = {0.1788544503, 0.09359161662,0.006325387654, 0.00006611667319,-0.000002031049101}; MEM_CONSTANT fptype q3[5] = {1.0 , 0.6097809921, 0.2560616665, 0.04746722384, 0.006957301675}; MEM_CONSTANT fptype p4[5] = {0.9874054407, 118.6723273, 849.2794360, -743.7792444, 427.0262186}; MEM_CONSTANT fptype q4[5] = {1.0 , 106.8615961, 337.6496214, 2016.712389, 1597.063511}; MEM_CONSTANT fptype p5[5] = {1.003675074, 167.5702434, 4789.711289, 21217.86767, -22324.94910}; MEM_CONSTANT fptype q5[5] = {1.0 , 156.9424537, 3745.310488, 9834.698876, 66924.28357}; MEM_CONSTANT fptype p6[5] = {1.000827619, 664.9143136, 62972.92665, 475554.6998, -5743609.109}; MEM_CONSTANT fptype q6[5] = {1.0 , 651.4101098, 56974.73333, 165917.4725, -2815759.939}; MEM_CONSTANT fptype a1[3] = {0.04166666667,-0.01996527778, 0.02709538966}; MEM_CONSTANT fptype a2[2] = {-1.845568670,-4.284640743}; EXEC_TARGET fptype device_Landau (fptype* evt, fptype* p, unsigned int* indices) { fptype x = evt[indices[2 + indices[0]]]; fptype mpv = p[indices[1]]; fptype sigma = p[indices[2]]; if (sigma <= 0) return 0; fptype v = (x - mpv)/sigma; fptype u, ue, us, denlan; if (v < -5.5) { u = EXP(v+1.0); if (u < 1e-10) return 0.0; ue = EXP(-1/u); us = SQRT(u); denlan = 0.3989422803*(ue/us)*(1+(a1[0]+(a1[1]+a1[2]*u)*u)*u); } else if (v < -1) { u = EXP(-v-1); denlan = EXP(-u)*SQRT(u)* (p1[0]+(p1[1]+(p1[2]+(p1[3]+p1[4]*v)*v)*v)*v)/ (q1[0]+(q1[1]+(q1[2]+(q1[3]+q1[4]*v)*v)*v)*v); } else if (v < 1) { denlan = (p2[0]+(p2[1]+(p2[2]+(p2[3]+p2[4]*v)*v)*v)*v)/ (q2[0]+(q2[1]+(q2[2]+(q2[3]+q2[4]*v)*v)*v)*v); } else if (v < 5) { denlan = (p3[0]+(p3[1]+(p3[2]+(p3[3]+p3[4]*v)*v)*v)*v)/ (q3[0]+(q3[1]+(q3[2]+(q3[3]+q3[4]*v)*v)*v)*v); } else if (v < 12) { u = 1/v; denlan = u*u*(p4[0]+(p4[1]+(p4[2]+(p4[3]+p4[4]*u)*u)*u)*u)/ (q4[0]+(q4[1]+(q4[2]+(q4[3]+q4[4]*u)*u)*u)*u); } else if (v < 50) { u = 1/v; denlan = u*u*(p5[0]+(p5[1]+(p5[2]+(p5[3]+p5[4]*u)*u)*u)*u)/ (q5[0]+(q5[1]+(q5[2]+(q5[3]+q5[4]*u)*u)*u)*u); } else if (v < 300) { u = 1/v; denlan = u*u*(p6[0]+(p6[1]+(p6[2]+(p6[3]+p6[4]*u)*u)*u)*u)/ (q6[0]+(q6[1]+(q6[2]+(q6[3]+q6[4]*u)*u)*u)*u); } else { u = 1/(v-v*std::log(v)/(v+1)); denlan = u*u*(1+(a2[0]+a2[1]*u)*u); } return denlan/sigma; } MEM_DEVICE device_function_ptr ptr_to_Landau = device_Landau; __host__ LandauPdf::LandauPdf (std::string n, Variable* _x, Variable* mpv, Variable* sigma) : GooPdf(_x, n) { std::vector<unsigned int> pindices; pindices.push_back(registerParameter(mpv)); pindices.push_back(registerParameter(sigma)); GET_FUNCTION_ADDR(ptr_to_Landau); initialise(pindices); }
d52900bcbfc683acc8b9acad510d597835ecd4dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************\ * --- Practical Course: GPU Programming in Computer Vision --- * * time: summer term 2012 / September 17-24, 2012 * * project: superresolution * file: flowlib_gpu_sor.cu * * * implement all functions with ### implement me ### in the function body \****************************************************************************/ /* * flowlib_gpu_sor.cu * * Created on: Mar 14, 2012 * Author: steinbrf */ //#include <flowlib_gpu_sor.hpp> #include "flowlib.hpp" #include <auxiliary/cuda_basic.cuh> #include <linearoperations/linearoperations.cuh> #include <auxiliary/debug.hpp> hipChannelFormatDesc flow_sor_float_tex = hipCreateChannelDesc<float>(); texture<float, 2, hipReadModeElementType> tex_flow_sor_I1; texture<float, 2, hipReadModeElementType> tex_flow_sor_I2; bool textures_flow_sor_initialized = false; #define IMAGE_FILTER_METHOD hipFilterModeLinear #define SF_TEXTURE_OFFSET 0.5f #define SF_BW 16 #define SF_BH 16 FlowLibGpuSOR::FlowLibGpuSOR(int par_nx, int par_ny): FlowLib(par_nx,par_ny),FlowLibGpu(par_nx,par_ny),FlowLibSOR(par_nx,par_ny) { cuda_malloc2D((void**)&_penDat,_nx,_ny,1,sizeof(float),&_pitchf1); cuda_malloc2D((void**)&_penReg,_nx,_ny,1,sizeof(float),&_pitchf1); cuda_malloc2D((void**)&_b1,_nx,_ny,1,sizeof(float),&_pitchf1); cuda_malloc2D((void**)&_b2,_nx,_ny,1,sizeof(float),&_pitchf1); } FlowLibGpuSOR::~FlowLibGpuSOR() { if(_penDat) cutilSafeCall(hipFree(_penDat)); if(_penReg) cutilSafeCall(hipFree(_penReg)); if(_b1) cutilSafeCall(hipFree(_b1)); if(_b2) cutilSafeCall(hipFree(_b2)); } void bind_textures(const float *I1_g, const float *I2_g, int nx, int ny, int pitchf1) { tex_flow_sor_I1.addressMode[0] = hipAddressModeClamp; tex_flow_sor_I1.addressMode[1] = hipAddressModeClamp; tex_flow_sor_I1.filterMode = IMAGE_FILTER_METHOD ; tex_flow_sor_I1.normalized = false; tex_flow_sor_I2.addressMode[0] = hipAddressModeClamp; tex_flow_sor_I2.addressMode[1] = hipAddressModeClamp; tex_flow_sor_I2.filterMode = IMAGE_FILTER_METHOD; tex_flow_sor_I2.normalized = false; cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I1, I1_g, &flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) ); cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I2, I2_g, &flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) ); } void unbind_textures_flow_sor() { cutilSafeCall (hipUnbindTexture(tex_flow_sor_I1)); cutilSafeCall (hipUnbindTexture(tex_flow_sor_I2)); } void update_textures_flow_sor(const float *I2_resampled_warped_g, int nx_fine, int ny_fine, int pitchf1) { cutilSafeCall (hipUnbindTexture(tex_flow_sor_I2)); cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I2, I2_resampled_warped_g, &flow_sor_float_tex, nx_fine, ny_fine, pitchf1*sizeof(float)) ); } /** * @brief Adds one flow field onto another * @param du_g Horizontal increment * @param dv_g Vertical increment * @param u_g Horizontal accumulation * @param v_g Vertical accumulation * @param nx Image width * @param ny Image height * @param pitchf1 Image pitch for single float images */ __global__ void add_flow_fields ( const float *du_g, const float *dv_g, float *u_g, float *v_g, int nx, int ny, int pitchf1 ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < nx && y < ny){ u_g[y*pitchf1+x] = u_g[y*pitchf1+x] + du_g[y*pitchf1+x]; v_g[y*pitchf1+x] = v_g[y*pitchf1+x] + dv_g[y*pitchf1+x]; } } /** * @brief Kernel to compute the penalty values for several * lagged-diffusivity iterations taking into account pixel sizes for warping. * Image derivatives are read from texture, flow derivatives from shared memory * @param u_g Pointer to global device memory for the horizontal * flow component of the accumulation flow field * @param v_g Pointer to global device memory for the vertical * flow component of the accumulation flow field * @param du_g Pointer to global device memory for the horizontal * flow component of the increment flow field * @param dv_g Pointer to global device memory for the vertical * flow component of the increment flow field * @param penaltyd_g Pointer to global device memory for data term penalty * @param penaltyr_g Pointer to global device memory for regularity term * penalty * @param nx Image width * @param ny Image height * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param data_epsilon Smoothing parameter for the TV Penalization of the data * term * @param diff_epsilon Smoothing parameter for the TV Penalization of the * regularity term * @param pitchf1 Image pitch for single float images */ __global__ void sorflow_update_robustifications_warp_tex_shared ( const float *u_g, const float *v_g, const float *du_g, const float *dv_g, float *penaltyd_g, float *penaltyr_g, int nx, int ny, float hx, float hy, float data_epsilon, float diff_epsilon, int pitchf1 ) { const float hx_1 = 1.0f / (2.0f*hx); const float hy_1 = 1.0f / (2.0f*hy); const int x = blockIdx.x * SF_BW + threadIdx.x; const int y = blockIdx.y * SF_BW + threadIdx.y; __shared__ float u1[SF_BW][SF_BH]; __shared__ float u2[SF_BW][SF_BH]; __shared__ float du[SF_BW][SF_BH]; __shared__ float dv[SF_BW][SF_BH]; const float xx = (float)x + SF_TEXTURE_OFFSET; const float yy = (float)y + SF_TEXTURE_OFFSET; double dxu, dxv, dyu, dyv, dataterm; float3 is; if(x < nx && y < ny){ is.x = 0.5f*( tex2D(tex_flow_sor_I1,xx+1.0f,yy) -tex2D(tex_flow_sor_I1,xx-1.0f,yy) +tex2D(tex_flow_sor_I2,xx+1.0f,yy) -tex2D(tex_flow_sor_I2,xx-1.0f,yy))*hx_1; is.y = 0.5f*( tex2D(tex_flow_sor_I1,xx,yy+1.0f) -tex2D(tex_flow_sor_I1,xx,yy-1.0f) +tex2D(tex_flow_sor_I2,xx,yy+1.0f) -tex2D(tex_flow_sor_I2,xx,yy-1.0f))*hy_1; is.z = (tex2D(tex_flow_sor_I2,xx,yy)-tex2D(tex_flow_sor_I1,xx,yy)); u1[threadIdx.x][threadIdx.y] = u_g[y*pitchf1+x]; u2[threadIdx.x][threadIdx.y] = v_g[y*pitchf1+x]; du[threadIdx.x][threadIdx.y] = du_g[y*pitchf1+x]; dv[threadIdx.x][threadIdx.y] = dv_g[y*pitchf1+x]; } __syncthreads(); if(x < nx && y < ny){ dxu = ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? u1[threadIdx.x+1][threadIdx.y] : u_g[y*pitchf1+x+1]) : u1[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? u1[threadIdx.x-1][threadIdx.y] : u_g[y*pitchf1+x-1]) : u1[threadIdx.x][threadIdx.y]))*hx_1; dyu = ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? u1[threadIdx.x][threadIdx.y+1] : u_g[(y+1)*pitchf1+x]) : u1[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? u1[threadIdx.x][threadIdx.y-1] : u_g[(y-1)*pitchf1+x]) : u1[threadIdx.x][threadIdx.y]))*hy_1; dxv = ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? u2[threadIdx.x+1][threadIdx.y] : v_g[y*pitchf1+x+1]) : u2[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? u2[threadIdx.x-1][threadIdx.y] : v_g[y*pitchf1+x-1]) : u2[threadIdx.x][threadIdx.y]))*hx_1; dyv = ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? u2[threadIdx.x][threadIdx.y+1] : v_g[(y+1)*pitchf1+x]) : u2[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? u2[threadIdx.x][threadIdx.y-1] : v_g[(y-1)*pitchf1+x]) : u2[threadIdx.x][threadIdx.y]))*hy_1; dxu += ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? du[threadIdx.x+1][threadIdx.y] : du_g[y*pitchf1+x+1]) : du[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? du[threadIdx.x-1][threadIdx.y] : du_g[y*pitchf1+x-1]) : du[threadIdx.x][threadIdx.y]))*hx_1; dyu += ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? du[threadIdx.x][threadIdx.y+1] : du_g[(y+1)*pitchf1+x]) : du[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? du[threadIdx.x][threadIdx.y-1] : du_g[(y-1)*pitchf1+x]) : du[threadIdx.x][threadIdx.y]))*hy_1; dxv += ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? dv[threadIdx.x+1][threadIdx.y] : dv_g[y*pitchf1+x+1]) : dv[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? dv[threadIdx.x-1][threadIdx.y] : dv_g[y*pitchf1+x-1]) : dv[threadIdx.x][threadIdx.y]))*hx_1; dyv += ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? dv[threadIdx.x][threadIdx.y+1] : dv_g[(y+1)*pitchf1+x]) : dv[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? dv[threadIdx.x][threadIdx.y-1] : dv_g[(y-1)*pitchf1+x]) : dv[threadIdx.x][threadIdx.y]))*hy_1; dataterm = du[threadIdx.x][threadIdx.y]*is.x + dv[threadIdx.x][threadIdx.y]*is.y + is.z; penaltyd_g[y*pitchf1+x] = 1.0f / sqrt(dataterm*dataterm + data_epsilon); penaltyr_g[y*pitchf1+x] = 1.0f / sqrt(dxu*dxu + dxv*dxv + dyu*dyu + dyv*dyv + diff_epsilon); } } /** * @brief Precomputes one value as the sum of all values not depending of the * current flow increment * @param u_g Pointer to global device memory for the horizontal * flow component of the accumulation flow field * @param v_g Pointer to global device memory for the vertical * flow component of the accumulation flow field * @param penaltyd_g Pointer to global device memory for data term penalty * @param penaltyr_g Pointer to global device memory for regularity term * penalty * @param bu_g Pointer to global memory for horizontal result value * @param bv_g Pointer to global memory for vertical result value * @param nx Image width * @param ny Image height * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param lambda Smoothness weight * @param pitchf1 Image pitch for single float images */ __global__ void sorflow_update_righthandside_shared ( const float *u_g, const float *v_g, const float *penaltyd_g, const float *penaltyr_g, float *bu_g, float *bv_g, int nx, int ny, float hx, float hy, float lambda, int pitchf1 ) { const float hx_1 = 1.0f / (2.0f*hx); const float hy_1 = 1.0f / (2.0f*hy); const float hx_2=lambda/(hx*hx); const float hy_2=lambda/(hy*hy); const int x = blockIdx.x * SF_BW + threadIdx.x; const int y = blockIdx.y * SF_BW + threadIdx.y; __shared__ float u1[SF_BW][SF_BH]; __shared__ float u2[SF_BW][SF_BH]; __shared__ float pend[SF_BW][SF_BH]; __shared__ float penr[SF_BW][SF_BH]; float IxIt, IyIt; const float xx = (float)x + SF_TEXTURE_OFFSET; const float yy = (float)y + SF_TEXTURE_OFFSET; float3 is; float xp, xm, yp, ym, sum; if(x < nx && y < ny){ is.x = 0.5f*(tex2D(tex_flow_sor_I1,xx+1.0f,yy) -tex2D(tex_flow_sor_I1,xx-1.0f,yy) +tex2D(tex_flow_sor_I2,xx+1.0f,yy) -tex2D(tex_flow_sor_I2,xx-1.0f,yy))*hx_1; is.y = 0.5f*(tex2D(tex_flow_sor_I1,xx,yy+1.0f) -tex2D(tex_flow_sor_I1,xx,yy-1.0f) +tex2D(tex_flow_sor_I2,xx,yy+1.0f) -tex2D(tex_flow_sor_I2,xx,yy-1.0f))*hy_1; is.z = (tex2D(tex_flow_sor_I2,xx,yy)-tex2D(tex_flow_sor_I1,xx,yy)); IxIt = is.x*is.z; IyIt = is.y*is.z; u1[threadIdx.x][threadIdx.y] = u_g[y*pitchf1+x]; u2[threadIdx.x][threadIdx.y] = v_g[y*pitchf1+x]; pend[threadIdx.x][threadIdx.y] = penaltyd_g[y*pitchf1+x]; penr[threadIdx.x][threadIdx.y] = penaltyr_g[y*pitchf1+x]; } __syncthreads(); if(x < nx && y < ny){ xp = (x<nx-1 ? ((threadIdx.x<SF_BW-1 ? penr[threadIdx.x+1][threadIdx.y] : penaltyr_g[y*pitchf1+x+1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; xm = (x>0 ? ((threadIdx.x>0 ? penr[threadIdx.x-1][threadIdx.y] : penaltyr_g[y*pitchf1+x-1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; yp = (y<ny-1 ? ((threadIdx.y<SF_BH-1 ? penr[threadIdx.x][threadIdx.y+1] : penaltyr_g[(y+1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; ym = (y>0 ? ((threadIdx.y>0 ? penr[threadIdx.x][threadIdx.y-1] : penaltyr_g[(y-1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; sum = xp + xm + yp + ym; bu_g[y*pitchf1+x] = - pend[threadIdx.x][threadIdx.y] * IxIt + (x>0 ? xm*(threadIdx.x>0 ? u1[threadIdx.x-1][threadIdx.y] : u_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? u1[threadIdx.x+1][threadIdx.y] : u_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? u1[threadIdx.x][threadIdx.y-1] : u_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? u1[threadIdx.x][threadIdx.y+1] : u_g[(y+1)*pitchf1+x]) : 0.0f) - sum * u1[threadIdx.x][threadIdx.y]; bv_g[y*pitchf1+x] = - pend[threadIdx.x][threadIdx.y] * IyIt +(x>0 ? xm*(threadIdx.x>0 ? u2[threadIdx.x-1][threadIdx.y] : v_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? u2[threadIdx.x+1][threadIdx.y] : v_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? u2[threadIdx.x][threadIdx.y-1] : v_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? u2[threadIdx.x][threadIdx.y+1] : v_g[(y+1)*pitchf1+x]) : 0.0f) - sum * u2[threadIdx.x][threadIdx.y]; } } /** * @brief Kernel to compute one Red-Black-SOR iteration for the nonlinear * Euler-Lagrange equation taking into account penalty values and pixel * size for warping * @param bu_g Right-Hand-Side values for horizontal flow * @param bv_g Right-Hand-Side values for vertical flow * @param penaltyd_g Pointer to global device memory holding data term penalization * @param penaltyr_g Pointer to global device memory holding regularity term * penalization * @param du_g Pointer to global device memory for the horizontal * flow component increment * @param dv_g Pointer to global device memory for the vertical * flow component increment * @param nx Image width * @param ny Image height * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param lambda Smoothness weight * @param relaxation Overrelaxation for the SOR-solver * @param red Parameter deciding whether the red or black fields of a * checkerboard pattern are being updated * @param pitchf1 Image pitch for single float images */ __global__ void sorflow_nonlinear_warp_sor_shared ( const float *bu_g, const float *bv_g, const float *penaltyd_g, const float *penaltyr_g, float *du_g, float *dv_g, int nx, int ny, float hx, float hy, float lambda, float relaxation, int red, int pitchf1 ) { const float hx_1 = 1.0f / (2.0f*hx); const float hy_1 = 1.0f / (2.0f*hy); const float hx_2=lambda/(hx*hx); const float hy_2=lambda/(hy*hy); const int x = blockIdx.x * SF_BW + threadIdx.x; const int y = blockIdx.y * SF_BW + threadIdx.y; __shared__ float du[SF_BW][SF_BH]; __shared__ float dv[SF_BW][SF_BH]; __shared__ float pend[SF_BW][SF_BH]; __shared__ float penr[SF_BW][SF_BH]; const float xx = (float)x + SF_TEXTURE_OFFSET; const float yy = (float)y + SF_TEXTURE_OFFSET; float IxIx, IxIy, IyIy; float bu, bv; float3 is; float xp, xm, yp, ym, sum; if(x < nx && y < ny){ is.x = 0.5f*(tex2D(tex_flow_sor_I1,xx+1.0f,yy) -tex2D(tex_flow_sor_I1,xx-1.0f,yy) +tex2D(tex_flow_sor_I2,xx+1.0f,yy) -tex2D(tex_flow_sor_I2,xx-1.0f,yy))*hx_1; is.y = 0.5f*(tex2D(tex_flow_sor_I1,xx,yy+1.0f) -tex2D(tex_flow_sor_I1,xx,yy-1.0f) +tex2D(tex_flow_sor_I2,xx,yy+1.0f) -tex2D(tex_flow_sor_I2,xx,yy-1.0f))*hy_1; is.z = (tex2D(tex_flow_sor_I2,xx,yy)-tex2D(tex_flow_sor_I1,xx,yy)); IxIx = is.x*is.x; IxIy = is.x*is.y; IyIy = is.y*is.y; bu = bu_g[y*pitchf1+x]; bv = bv_g[y*pitchf1+x]; du[threadIdx.x][threadIdx.y] = du_g[y*pitchf1+x]; dv[threadIdx.x][threadIdx.y] = dv_g[y*pitchf1+x]; pend[threadIdx.x][threadIdx.y] = penaltyd_g[y*pitchf1+x]; penr[threadIdx.x][threadIdx.y] = penaltyr_g[y*pitchf1+x]; } __syncthreads(); if(x < nx && y < ny && ((x+y)%2) == red){ xp = (x<nx-1 ? ((threadIdx.x<SF_BW-1 ? penr[threadIdx.x+1][threadIdx.y] : penaltyr_g[y*pitchf1+x+1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; xm = (x>0 ? ((threadIdx.x>0 ? penr[threadIdx.x-1][threadIdx.y] : penaltyr_g[y*pitchf1+x-1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; yp = (y<ny-1 ? ((threadIdx.y<SF_BH-1 ? penr[threadIdx.x][threadIdx.y+1] : penaltyr_g[(y+1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; ym = (y>0 ? ((threadIdx.y>0 ? penr[threadIdx.x][threadIdx.y-1] : penaltyr_g[(y-1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; sum = xp + xm + yp + ym; du_g[y*pitchf1+x] = (1.0f-relaxation)*du[threadIdx.x][threadIdx.y] + relaxation * ( bu - pend[threadIdx.x][threadIdx.y]*IxIy*dv[threadIdx.x][threadIdx.y] + (x>0 ? xm*(threadIdx.x>0 ? du[threadIdx.x-1][threadIdx.y] : du_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? du[threadIdx.x+1][threadIdx.y] : du_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? du[threadIdx.x][threadIdx.y-1] : du_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? du[threadIdx.x][threadIdx.y+1] : du_g[(y+1)*pitchf1+x]) : 0.0f) ) / (pend[threadIdx.x][threadIdx.y]*IxIx + sum); dv_g[y*pitchf1+x] = (1.0f-relaxation)*dv[threadIdx.x][threadIdx.y] + relaxation * (bv - pend[threadIdx.x][threadIdx.y]*IxIy*du[threadIdx.x][threadIdx.y] + (x>0 ? xm*(threadIdx.x>0 ? dv[threadIdx.x-1][threadIdx.y] : dv_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? dv[threadIdx.x+1][threadIdx.y] : dv_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? dv[threadIdx.x][threadIdx.y-1] : dv_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? dv[threadIdx.x][threadIdx.y+1] : dv_g[(y+1)*pitchf1+x]) : 0.0f) )/ (pend[threadIdx.x][threadIdx.y]*IyIy + sum); } } /** * @brief Method that calls the sorflow_nonlinear_warp_sor_shared in a loop, * with an outer loop for computing the diffisivity values for * one level of a coarse-to-fine implementation. * @param u_g Pointer to global device memory for the horizontal * flow component * @param v_g Pointer to global device memory for the vertical * flow component * @param du_g Pointer to global device memory for the horizontal * flow component increment * @param dv_g Pointer to global device memory for the vertical * flow component increment * @param bu_g Right-Hand-Side values for horizontal flow * @param bv_g Right-Hand-Side values for vertical flow * @param penaltyd_g Pointer to global device memory holding data term penalization * @param penaltyr_g Pointer to global device memory holding regularity term * penalization * @param nx Image width * @param ny Image height * @param pitchf1 Image pitch for single float images * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param lambda Smoothness weight * @param outer_iterations Number of iterations of the penalty computation * @param inner_iterations Number of iterations for the SOR-solver * @param relaxation Overrelaxation for the SOR-solver * @param data_epsilon Smoothing parameter for the TV Penalization of the data * term * @param diff_epsilon Smoothing parameter for the TV Penalization of the * regularity term */ void sorflow_gpu_nonlinear_warp_level ( const float *u_g, const float *v_g, float *du_g, float *dv_g, float *bu_g, float *bv_g, float *penaltyd_g, float *penaltyr_g, int nx, int ny, int pitchf1, float hx, float hy, float lambda, float overrelaxation, int outer_iterations, int inner_iterations, float data_epsilon, float diff_epsilon ) { int i, j; int ngx = (nx%SF_BW) ? ((nx/SF_BW)+1) : (nx/SF_BW); int ngy = (ny%SF_BW) ? ((ny/SF_BW)+1) : (ny/SF_BW); dim3 dimGrid(ngx,ngy); dim3 dimBlock(SF_BW,SF_BH); cutilSafeCall( hipMemset(du_g,0,pitchf1*ny*sizeof(float))); cutilSafeCall( hipMemset(dv_g,0,pitchf1*ny*sizeof(float))); for(i=0;i<outer_iterations;i++){ hipLaunchKernelGGL(( sorflow_update_robustifications_warp_tex_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, u_g,v_g,du_g,dv_g,penaltyd_g,penaltyr_g,nx,ny,hx,hy, data_epsilon,diff_epsilon,pitchf1); catchkernel; hipLaunchKernelGGL(( sorflow_update_righthandside_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, u_g,v_g,penaltyd_g,penaltyr_g,bu_g,bv_g,nx,ny,hx,hy,lambda,pitchf1); catchkernel; for(j=0;j<inner_iterations;j++){ hipLaunchKernelGGL(( sorflow_nonlinear_warp_sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, bu_g,bv_g,penaltyd_g,penaltyr_g,du_g,dv_g,nx,ny,hx,hy,lambda,overrelaxation,0,pitchf1); catchkernel; hipLaunchKernelGGL(( sorflow_nonlinear_warp_sor_shared), dim3(dimGrid),dim3(dimBlock), 0, 0, bu_g,bv_g,penaltyd_g,penaltyr_g,du_g,dv_g,nx,ny,hx,hy,lambda,overrelaxation,1,pitchf1); catchkernel; } } } float FlowLibGpuSOR::computeFlow() { float lambda = _lambda * 255.0f; int max_rec_depth; int warp_max_levels; int rec_depth; int ngx = (_nx%SF_BW) ? ((_nx/SF_BW)+1) : (_nx/SF_BW); int ngy = (_ny%SF_BH) ? ((_ny/SF_BH)+1) : (_ny/SF_BH); dim3 dimGrid(ngx,ngy); dim3 dimBlock(SF_BW,SF_BH); warp_max_levels = computeMaxWarpLevels(); max_rec_depth = (((_start_level+1) < warp_max_levels) ? (_start_level+1) : warp_max_levels) -1; if(max_rec_depth >= _I1pyramid->nl){ max_rec_depth = _I1pyramid->nl-1; } if(!textures_flow_sor_initialized){ tex_flow_sor_I1.addressMode[0] = hipAddressModeClamp; tex_flow_sor_I1.addressMode[1] = hipAddressModeClamp; tex_flow_sor_I1.filterMode = IMAGE_FILTER_METHOD; tex_flow_sor_I1.normalized = false; tex_flow_sor_I2.addressMode[0] = hipAddressModeClamp; tex_flow_sor_I2.addressMode[1] = hipAddressModeClamp; tex_flow_sor_I2.filterMode = IMAGE_FILTER_METHOD; tex_flow_sor_I2.normalized = false; textures_flow_sor_initialized = true; } int nx_fine, ny_fine, nx_coarse=0, ny_coarse=0; float hx_fine; float hy_fine; cutilSafeCall( hipMemset(_u1_g,0,_pitchf1*_ny*sizeof(float))); cutilSafeCall( hipMemset(_u2_g,0,_pitchf1*_ny*sizeof(float))); for(rec_depth = max_rec_depth; rec_depth >= 0; rec_depth--){ if(_verbose) fprintf(stderr," Level %i",rec_depth); nx_fine = _I1pyramid->nx[rec_depth]; ny_fine = _I1pyramid->ny[rec_depth]; hx_fine=(float)_nx/(float)nx_fine; hy_fine=(float)_ny/(float)ny_fine; cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I1, _I1pyramid->level[rec_depth], &flow_sor_float_tex, nx_fine, ny_fine, _I1pyramid->pitch[rec_depth]*sizeof(float)) ); if(_debug){ sprintf(_debugbuffer,"debug/GI1 %i.png",rec_depth); saveCudaImage(_debugbuffer,_I1pyramid->level[rec_depth],nx_fine,ny_fine,_I1pyramid->pitch[rec_depth],1,1.0f,-1.0f); sprintf(_debugbuffer,"debug/GI2 %i.png",rec_depth); saveCudaImage(_debugbuffer,_I2pyramid->level[rec_depth],nx_fine,ny_fine,_I2pyramid->pitch[rec_depth],1,1.0f,-1.0f); } if(rec_depth < max_rec_depth){ resampleAreaParallelSeparate(_u1_g,_u1_g,nx_coarse,ny_coarse,_pitchf1,nx_fine,ny_fine,_pitchf1,_b1); resampleAreaParallelSeparate(_u2_g,_u2_g,nx_coarse,ny_coarse,_pitchf1,nx_fine,ny_fine,_pitchf1,_b2); } if(rec_depth >= _end_level){ backwardRegistrationBilinearFunctionTex(_I2pyramid->level[rec_depth],_u1_g,_u2_g, _I2warp,_I1pyramid->level[rec_depth], nx_fine,ny_fine,_I2pyramid->pitch[rec_depth],_pitchf1,hx_fine,hy_fine); if(_debug){ sprintf(_debugbuffer,"debug/GW2 %i.png",rec_depth); saveCudaImage(_debugbuffer,_I2warp,nx_fine,ny_fine,_pitchf1,1,1.0f,-1.0f); } cutilSafeCall (hipUnbindTexture(tex_flow_sor_I2)); cutilSafeCall( hipBindTexture2D(0, &tex_flow_sor_I2, _I2warp, &flow_sor_float_tex, nx_fine, ny_fine, _pitchf1*sizeof(float)) ); sorflow_gpu_nonlinear_warp_level (_u1_g,_u2_g,_u1lvl,_u2lvl,_b1,_b2,_penDat,_penReg, nx_fine,ny_fine,_pitchf1, hx_fine,hy_fine, lambda,_overrelaxation, _oi,_ii, _dat_epsilon,_reg_epsilon); hipLaunchKernelGGL(( add_flow_fields), dim3(dimGrid),dim3(dimBlock), 0, 0, _u1lvl,_u2lvl,_u1_g,_u2_g,nx_fine,ny_fine,_pitchf1); catchkernel; } else{ if(_verbose) fprintf(stderr," skipped"); } nx_coarse = nx_fine; ny_coarse = ny_fine; } if(_debug) delete [] _debugbuffer; unbind_textures_flow_sor(); //TODO: Timer return -1.0f; }
d52900bcbfc683acc8b9acad510d597835ecd4dd.cu
/****************************************************************************\ * --- Practical Course: GPU Programming in Computer Vision --- * * time: summer term 2012 / September 17-24, 2012 * * project: superresolution * file: flowlib_gpu_sor.cu * * * implement all functions with ### implement me ### in the function body \****************************************************************************/ /* * flowlib_gpu_sor.cu * * Created on: Mar 14, 2012 * Author: steinbrf */ //#include <flowlib_gpu_sor.hpp> #include "flowlib.hpp" #include <auxiliary/cuda_basic.cuh> #include <linearoperations/linearoperations.cuh> #include <auxiliary/debug.hpp> cudaChannelFormatDesc flow_sor_float_tex = cudaCreateChannelDesc<float>(); texture<float, 2, cudaReadModeElementType> tex_flow_sor_I1; texture<float, 2, cudaReadModeElementType> tex_flow_sor_I2; bool textures_flow_sor_initialized = false; #define IMAGE_FILTER_METHOD cudaFilterModeLinear #define SF_TEXTURE_OFFSET 0.5f #define SF_BW 16 #define SF_BH 16 FlowLibGpuSOR::FlowLibGpuSOR(int par_nx, int par_ny): FlowLib(par_nx,par_ny),FlowLibGpu(par_nx,par_ny),FlowLibSOR(par_nx,par_ny) { cuda_malloc2D((void**)&_penDat,_nx,_ny,1,sizeof(float),&_pitchf1); cuda_malloc2D((void**)&_penReg,_nx,_ny,1,sizeof(float),&_pitchf1); cuda_malloc2D((void**)&_b1,_nx,_ny,1,sizeof(float),&_pitchf1); cuda_malloc2D((void**)&_b2,_nx,_ny,1,sizeof(float),&_pitchf1); } FlowLibGpuSOR::~FlowLibGpuSOR() { if(_penDat) cutilSafeCall(cudaFree(_penDat)); if(_penReg) cutilSafeCall(cudaFree(_penReg)); if(_b1) cutilSafeCall(cudaFree(_b1)); if(_b2) cutilSafeCall(cudaFree(_b2)); } void bind_textures(const float *I1_g, const float *I2_g, int nx, int ny, int pitchf1) { tex_flow_sor_I1.addressMode[0] = cudaAddressModeClamp; tex_flow_sor_I1.addressMode[1] = cudaAddressModeClamp; tex_flow_sor_I1.filterMode = IMAGE_FILTER_METHOD ; tex_flow_sor_I1.normalized = false; tex_flow_sor_I2.addressMode[0] = cudaAddressModeClamp; tex_flow_sor_I2.addressMode[1] = cudaAddressModeClamp; tex_flow_sor_I2.filterMode = IMAGE_FILTER_METHOD; tex_flow_sor_I2.normalized = false; cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I1, I1_g, &flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) ); cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I2, I2_g, &flow_sor_float_tex, nx, ny, pitchf1*sizeof(float)) ); } void unbind_textures_flow_sor() { cutilSafeCall (cudaUnbindTexture(tex_flow_sor_I1)); cutilSafeCall (cudaUnbindTexture(tex_flow_sor_I2)); } void update_textures_flow_sor(const float *I2_resampled_warped_g, int nx_fine, int ny_fine, int pitchf1) { cutilSafeCall (cudaUnbindTexture(tex_flow_sor_I2)); cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I2, I2_resampled_warped_g, &flow_sor_float_tex, nx_fine, ny_fine, pitchf1*sizeof(float)) ); } /** * @brief Adds one flow field onto another * @param du_g Horizontal increment * @param dv_g Vertical increment * @param u_g Horizontal accumulation * @param v_g Vertical accumulation * @param nx Image width * @param ny Image height * @param pitchf1 Image pitch for single float images */ __global__ void add_flow_fields ( const float *du_g, const float *dv_g, float *u_g, float *v_g, int nx, int ny, int pitchf1 ) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < nx && y < ny){ u_g[y*pitchf1+x] = u_g[y*pitchf1+x] + du_g[y*pitchf1+x]; v_g[y*pitchf1+x] = v_g[y*pitchf1+x] + dv_g[y*pitchf1+x]; } } /** * @brief Kernel to compute the penalty values for several * lagged-diffusivity iterations taking into account pixel sizes for warping. * Image derivatives are read from texture, flow derivatives from shared memory * @param u_g Pointer to global device memory for the horizontal * flow component of the accumulation flow field * @param v_g Pointer to global device memory for the vertical * flow component of the accumulation flow field * @param du_g Pointer to global device memory for the horizontal * flow component of the increment flow field * @param dv_g Pointer to global device memory for the vertical * flow component of the increment flow field * @param penaltyd_g Pointer to global device memory for data term penalty * @param penaltyr_g Pointer to global device memory for regularity term * penalty * @param nx Image width * @param ny Image height * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param data_epsilon Smoothing parameter for the TV Penalization of the data * term * @param diff_epsilon Smoothing parameter for the TV Penalization of the * regularity term * @param pitchf1 Image pitch for single float images */ __global__ void sorflow_update_robustifications_warp_tex_shared ( const float *u_g, const float *v_g, const float *du_g, const float *dv_g, float *penaltyd_g, float *penaltyr_g, int nx, int ny, float hx, float hy, float data_epsilon, float diff_epsilon, int pitchf1 ) { const float hx_1 = 1.0f / (2.0f*hx); const float hy_1 = 1.0f / (2.0f*hy); const int x = blockIdx.x * SF_BW + threadIdx.x; const int y = blockIdx.y * SF_BW + threadIdx.y; __shared__ float u1[SF_BW][SF_BH]; __shared__ float u2[SF_BW][SF_BH]; __shared__ float du[SF_BW][SF_BH]; __shared__ float dv[SF_BW][SF_BH]; const float xx = (float)x + SF_TEXTURE_OFFSET; const float yy = (float)y + SF_TEXTURE_OFFSET; double dxu, dxv, dyu, dyv, dataterm; float3 is; if(x < nx && y < ny){ is.x = 0.5f*( tex2D(tex_flow_sor_I1,xx+1.0f,yy) -tex2D(tex_flow_sor_I1,xx-1.0f,yy) +tex2D(tex_flow_sor_I2,xx+1.0f,yy) -tex2D(tex_flow_sor_I2,xx-1.0f,yy))*hx_1; is.y = 0.5f*( tex2D(tex_flow_sor_I1,xx,yy+1.0f) -tex2D(tex_flow_sor_I1,xx,yy-1.0f) +tex2D(tex_flow_sor_I2,xx,yy+1.0f) -tex2D(tex_flow_sor_I2,xx,yy-1.0f))*hy_1; is.z = (tex2D(tex_flow_sor_I2,xx,yy)-tex2D(tex_flow_sor_I1,xx,yy)); u1[threadIdx.x][threadIdx.y] = u_g[y*pitchf1+x]; u2[threadIdx.x][threadIdx.y] = v_g[y*pitchf1+x]; du[threadIdx.x][threadIdx.y] = du_g[y*pitchf1+x]; dv[threadIdx.x][threadIdx.y] = dv_g[y*pitchf1+x]; } __syncthreads(); if(x < nx && y < ny){ dxu = ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? u1[threadIdx.x+1][threadIdx.y] : u_g[y*pitchf1+x+1]) : u1[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? u1[threadIdx.x-1][threadIdx.y] : u_g[y*pitchf1+x-1]) : u1[threadIdx.x][threadIdx.y]))*hx_1; dyu = ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? u1[threadIdx.x][threadIdx.y+1] : u_g[(y+1)*pitchf1+x]) : u1[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? u1[threadIdx.x][threadIdx.y-1] : u_g[(y-1)*pitchf1+x]) : u1[threadIdx.x][threadIdx.y]))*hy_1; dxv = ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? u2[threadIdx.x+1][threadIdx.y] : v_g[y*pitchf1+x+1]) : u2[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? u2[threadIdx.x-1][threadIdx.y] : v_g[y*pitchf1+x-1]) : u2[threadIdx.x][threadIdx.y]))*hx_1; dyv = ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? u2[threadIdx.x][threadIdx.y+1] : v_g[(y+1)*pitchf1+x]) : u2[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? u2[threadIdx.x][threadIdx.y-1] : v_g[(y-1)*pitchf1+x]) : u2[threadIdx.x][threadIdx.y]))*hy_1; dxu += ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? du[threadIdx.x+1][threadIdx.y] : du_g[y*pitchf1+x+1]) : du[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? du[threadIdx.x-1][threadIdx.y] : du_g[y*pitchf1+x-1]) : du[threadIdx.x][threadIdx.y]))*hx_1; dyu += ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? du[threadIdx.x][threadIdx.y+1] : du_g[(y+1)*pitchf1+x]) : du[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? du[threadIdx.x][threadIdx.y-1] : du_g[(y-1)*pitchf1+x]) : du[threadIdx.x][threadIdx.y]))*hy_1; dxv += ((x<nx-1 ? (threadIdx.x<SF_BW-1 ? dv[threadIdx.x+1][threadIdx.y] : dv_g[y*pitchf1+x+1]) : dv[threadIdx.x][threadIdx.y]) - (x>0 ? (threadIdx.x>0 ? dv[threadIdx.x-1][threadIdx.y] : dv_g[y*pitchf1+x-1]) : dv[threadIdx.x][threadIdx.y]))*hx_1; dyv += ((y<ny-1 ? (threadIdx.y<SF_BH-1 ? dv[threadIdx.x][threadIdx.y+1] : dv_g[(y+1)*pitchf1+x]) : dv[threadIdx.x][threadIdx.y]) - (y>0 ? (threadIdx.y>0 ? dv[threadIdx.x][threadIdx.y-1] : dv_g[(y-1)*pitchf1+x]) : dv[threadIdx.x][threadIdx.y]))*hy_1; dataterm = du[threadIdx.x][threadIdx.y]*is.x + dv[threadIdx.x][threadIdx.y]*is.y + is.z; penaltyd_g[y*pitchf1+x] = 1.0f / sqrt(dataterm*dataterm + data_epsilon); penaltyr_g[y*pitchf1+x] = 1.0f / sqrt(dxu*dxu + dxv*dxv + dyu*dyu + dyv*dyv + diff_epsilon); } } /** * @brief Precomputes one value as the sum of all values not depending of the * current flow increment * @param u_g Pointer to global device memory for the horizontal * flow component of the accumulation flow field * @param v_g Pointer to global device memory for the vertical * flow component of the accumulation flow field * @param penaltyd_g Pointer to global device memory for data term penalty * @param penaltyr_g Pointer to global device memory for regularity term * penalty * @param bu_g Pointer to global memory for horizontal result value * @param bv_g Pointer to global memory for vertical result value * @param nx Image width * @param ny Image height * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param lambda Smoothness weight * @param pitchf1 Image pitch for single float images */ __global__ void sorflow_update_righthandside_shared ( const float *u_g, const float *v_g, const float *penaltyd_g, const float *penaltyr_g, float *bu_g, float *bv_g, int nx, int ny, float hx, float hy, float lambda, int pitchf1 ) { const float hx_1 = 1.0f / (2.0f*hx); const float hy_1 = 1.0f / (2.0f*hy); const float hx_2=lambda/(hx*hx); const float hy_2=lambda/(hy*hy); const int x = blockIdx.x * SF_BW + threadIdx.x; const int y = blockIdx.y * SF_BW + threadIdx.y; __shared__ float u1[SF_BW][SF_BH]; __shared__ float u2[SF_BW][SF_BH]; __shared__ float pend[SF_BW][SF_BH]; __shared__ float penr[SF_BW][SF_BH]; float IxIt, IyIt; const float xx = (float)x + SF_TEXTURE_OFFSET; const float yy = (float)y + SF_TEXTURE_OFFSET; float3 is; float xp, xm, yp, ym, sum; if(x < nx && y < ny){ is.x = 0.5f*(tex2D(tex_flow_sor_I1,xx+1.0f,yy) -tex2D(tex_flow_sor_I1,xx-1.0f,yy) +tex2D(tex_flow_sor_I2,xx+1.0f,yy) -tex2D(tex_flow_sor_I2,xx-1.0f,yy))*hx_1; is.y = 0.5f*(tex2D(tex_flow_sor_I1,xx,yy+1.0f) -tex2D(tex_flow_sor_I1,xx,yy-1.0f) +tex2D(tex_flow_sor_I2,xx,yy+1.0f) -tex2D(tex_flow_sor_I2,xx,yy-1.0f))*hy_1; is.z = (tex2D(tex_flow_sor_I2,xx,yy)-tex2D(tex_flow_sor_I1,xx,yy)); IxIt = is.x*is.z; IyIt = is.y*is.z; u1[threadIdx.x][threadIdx.y] = u_g[y*pitchf1+x]; u2[threadIdx.x][threadIdx.y] = v_g[y*pitchf1+x]; pend[threadIdx.x][threadIdx.y] = penaltyd_g[y*pitchf1+x]; penr[threadIdx.x][threadIdx.y] = penaltyr_g[y*pitchf1+x]; } __syncthreads(); if(x < nx && y < ny){ xp = (x<nx-1 ? ((threadIdx.x<SF_BW-1 ? penr[threadIdx.x+1][threadIdx.y] : penaltyr_g[y*pitchf1+x+1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; xm = (x>0 ? ((threadIdx.x>0 ? penr[threadIdx.x-1][threadIdx.y] : penaltyr_g[y*pitchf1+x-1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; yp = (y<ny-1 ? ((threadIdx.y<SF_BH-1 ? penr[threadIdx.x][threadIdx.y+1] : penaltyr_g[(y+1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; ym = (y>0 ? ((threadIdx.y>0 ? penr[threadIdx.x][threadIdx.y-1] : penaltyr_g[(y-1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; sum = xp + xm + yp + ym; bu_g[y*pitchf1+x] = - pend[threadIdx.x][threadIdx.y] * IxIt + (x>0 ? xm*(threadIdx.x>0 ? u1[threadIdx.x-1][threadIdx.y] : u_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? u1[threadIdx.x+1][threadIdx.y] : u_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? u1[threadIdx.x][threadIdx.y-1] : u_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? u1[threadIdx.x][threadIdx.y+1] : u_g[(y+1)*pitchf1+x]) : 0.0f) - sum * u1[threadIdx.x][threadIdx.y]; bv_g[y*pitchf1+x] = - pend[threadIdx.x][threadIdx.y] * IyIt +(x>0 ? xm*(threadIdx.x>0 ? u2[threadIdx.x-1][threadIdx.y] : v_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? u2[threadIdx.x+1][threadIdx.y] : v_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? u2[threadIdx.x][threadIdx.y-1] : v_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? u2[threadIdx.x][threadIdx.y+1] : v_g[(y+1)*pitchf1+x]) : 0.0f) - sum * u2[threadIdx.x][threadIdx.y]; } } /** * @brief Kernel to compute one Red-Black-SOR iteration for the nonlinear * Euler-Lagrange equation taking into account penalty values and pixel * size for warping * @param bu_g Right-Hand-Side values for horizontal flow * @param bv_g Right-Hand-Side values for vertical flow * @param penaltyd_g Pointer to global device memory holding data term penalization * @param penaltyr_g Pointer to global device memory holding regularity term * penalization * @param du_g Pointer to global device memory for the horizontal * flow component increment * @param dv_g Pointer to global device memory for the vertical * flow component increment * @param nx Image width * @param ny Image height * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param lambda Smoothness weight * @param relaxation Overrelaxation for the SOR-solver * @param red Parameter deciding whether the red or black fields of a * checkerboard pattern are being updated * @param pitchf1 Image pitch for single float images */ __global__ void sorflow_nonlinear_warp_sor_shared ( const float *bu_g, const float *bv_g, const float *penaltyd_g, const float *penaltyr_g, float *du_g, float *dv_g, int nx, int ny, float hx, float hy, float lambda, float relaxation, int red, int pitchf1 ) { const float hx_1 = 1.0f / (2.0f*hx); const float hy_1 = 1.0f / (2.0f*hy); const float hx_2=lambda/(hx*hx); const float hy_2=lambda/(hy*hy); const int x = blockIdx.x * SF_BW + threadIdx.x; const int y = blockIdx.y * SF_BW + threadIdx.y; __shared__ float du[SF_BW][SF_BH]; __shared__ float dv[SF_BW][SF_BH]; __shared__ float pend[SF_BW][SF_BH]; __shared__ float penr[SF_BW][SF_BH]; const float xx = (float)x + SF_TEXTURE_OFFSET; const float yy = (float)y + SF_TEXTURE_OFFSET; float IxIx, IxIy, IyIy; float bu, bv; float3 is; float xp, xm, yp, ym, sum; if(x < nx && y < ny){ is.x = 0.5f*(tex2D(tex_flow_sor_I1,xx+1.0f,yy) -tex2D(tex_flow_sor_I1,xx-1.0f,yy) +tex2D(tex_flow_sor_I2,xx+1.0f,yy) -tex2D(tex_flow_sor_I2,xx-1.0f,yy))*hx_1; is.y = 0.5f*(tex2D(tex_flow_sor_I1,xx,yy+1.0f) -tex2D(tex_flow_sor_I1,xx,yy-1.0f) +tex2D(tex_flow_sor_I2,xx,yy+1.0f) -tex2D(tex_flow_sor_I2,xx,yy-1.0f))*hy_1; is.z = (tex2D(tex_flow_sor_I2,xx,yy)-tex2D(tex_flow_sor_I1,xx,yy)); IxIx = is.x*is.x; IxIy = is.x*is.y; IyIy = is.y*is.y; bu = bu_g[y*pitchf1+x]; bv = bv_g[y*pitchf1+x]; du[threadIdx.x][threadIdx.y] = du_g[y*pitchf1+x]; dv[threadIdx.x][threadIdx.y] = dv_g[y*pitchf1+x]; pend[threadIdx.x][threadIdx.y] = penaltyd_g[y*pitchf1+x]; penr[threadIdx.x][threadIdx.y] = penaltyr_g[y*pitchf1+x]; } __syncthreads(); if(x < nx && y < ny && ((x+y)%2) == red){ xp = (x<nx-1 ? ((threadIdx.x<SF_BW-1 ? penr[threadIdx.x+1][threadIdx.y] : penaltyr_g[y*pitchf1+x+1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; xm = (x>0 ? ((threadIdx.x>0 ? penr[threadIdx.x-1][threadIdx.y] : penaltyr_g[y*pitchf1+x-1]) + penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hx_2; yp = (y<ny-1 ? ((threadIdx.y<SF_BH-1 ? penr[threadIdx.x][threadIdx.y+1] : penaltyr_g[(y+1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; ym = (y>0 ? ((threadIdx.y>0 ? penr[threadIdx.x][threadIdx.y-1] : penaltyr_g[(y-1)*pitchf1+x])+ penr[threadIdx.x][threadIdx.y])*0.5f : 0.0f)*hy_2; sum = xp + xm + yp + ym; du_g[y*pitchf1+x] = (1.0f-relaxation)*du[threadIdx.x][threadIdx.y] + relaxation * ( bu - pend[threadIdx.x][threadIdx.y]*IxIy*dv[threadIdx.x][threadIdx.y] + (x>0 ? xm*(threadIdx.x>0 ? du[threadIdx.x-1][threadIdx.y] : du_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? du[threadIdx.x+1][threadIdx.y] : du_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? du[threadIdx.x][threadIdx.y-1] : du_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? du[threadIdx.x][threadIdx.y+1] : du_g[(y+1)*pitchf1+x]) : 0.0f) ) / (pend[threadIdx.x][threadIdx.y]*IxIx + sum); dv_g[y*pitchf1+x] = (1.0f-relaxation)*dv[threadIdx.x][threadIdx.y] + relaxation * (bv - pend[threadIdx.x][threadIdx.y]*IxIy*du[threadIdx.x][threadIdx.y] + (x>0 ? xm*(threadIdx.x>0 ? dv[threadIdx.x-1][threadIdx.y] : dv_g[y*pitchf1+x-1]) : 0.0f) + (x<nx-1 ? xp*(threadIdx.x<SF_BW-1 ? dv[threadIdx.x+1][threadIdx.y] : dv_g[y*pitchf1+x+1]) : 0.0f) + (y>0 ? ym*(threadIdx.y>0 ? dv[threadIdx.x][threadIdx.y-1] : dv_g[(y-1)*pitchf1+x]) : 0.0f) + (y<ny-1 ? yp*(threadIdx.y<SF_BH-1 ? dv[threadIdx.x][threadIdx.y+1] : dv_g[(y+1)*pitchf1+x]) : 0.0f) )/ (pend[threadIdx.x][threadIdx.y]*IyIy + sum); } } /** * @brief Method that calls the sorflow_nonlinear_warp_sor_shared in a loop, * with an outer loop for computing the diffisivity values for * one level of a coarse-to-fine implementation. * @param u_g Pointer to global device memory for the horizontal * flow component * @param v_g Pointer to global device memory for the vertical * flow component * @param du_g Pointer to global device memory for the horizontal * flow component increment * @param dv_g Pointer to global device memory for the vertical * flow component increment * @param bu_g Right-Hand-Side values for horizontal flow * @param bv_g Right-Hand-Side values for vertical flow * @param penaltyd_g Pointer to global device memory holding data term penalization * @param penaltyr_g Pointer to global device memory holding regularity term * penalization * @param nx Image width * @param ny Image height * @param pitchf1 Image pitch for single float images * @param hx Horizontal pixel size * @param hy Vertical pixel size * @param lambda Smoothness weight * @param outer_iterations Number of iterations of the penalty computation * @param inner_iterations Number of iterations for the SOR-solver * @param relaxation Overrelaxation for the SOR-solver * @param data_epsilon Smoothing parameter for the TV Penalization of the data * term * @param diff_epsilon Smoothing parameter for the TV Penalization of the * regularity term */ void sorflow_gpu_nonlinear_warp_level ( const float *u_g, const float *v_g, float *du_g, float *dv_g, float *bu_g, float *bv_g, float *penaltyd_g, float *penaltyr_g, int nx, int ny, int pitchf1, float hx, float hy, float lambda, float overrelaxation, int outer_iterations, int inner_iterations, float data_epsilon, float diff_epsilon ) { int i, j; int ngx = (nx%SF_BW) ? ((nx/SF_BW)+1) : (nx/SF_BW); int ngy = (ny%SF_BW) ? ((ny/SF_BW)+1) : (ny/SF_BW); dim3 dimGrid(ngx,ngy); dim3 dimBlock(SF_BW,SF_BH); cutilSafeCall( cudaMemset(du_g,0,pitchf1*ny*sizeof(float))); cutilSafeCall( cudaMemset(dv_g,0,pitchf1*ny*sizeof(float))); for(i=0;i<outer_iterations;i++){ sorflow_update_robustifications_warp_tex_shared<<<dimGrid,dimBlock>>> (u_g,v_g,du_g,dv_g,penaltyd_g,penaltyr_g,nx,ny,hx,hy, data_epsilon,diff_epsilon,pitchf1); catchkernel; sorflow_update_righthandside_shared<<<dimGrid,dimBlock>>> (u_g,v_g,penaltyd_g,penaltyr_g,bu_g,bv_g,nx,ny,hx,hy,lambda,pitchf1); catchkernel; for(j=0;j<inner_iterations;j++){ sorflow_nonlinear_warp_sor_shared<<<dimGrid,dimBlock>>> (bu_g,bv_g,penaltyd_g,penaltyr_g,du_g,dv_g,nx,ny,hx,hy,lambda,overrelaxation,0,pitchf1); catchkernel; sorflow_nonlinear_warp_sor_shared<<<dimGrid,dimBlock>>> (bu_g,bv_g,penaltyd_g,penaltyr_g,du_g,dv_g,nx,ny,hx,hy,lambda,overrelaxation,1,pitchf1); catchkernel; } } } float FlowLibGpuSOR::computeFlow() { float lambda = _lambda * 255.0f; int max_rec_depth; int warp_max_levels; int rec_depth; int ngx = (_nx%SF_BW) ? ((_nx/SF_BW)+1) : (_nx/SF_BW); int ngy = (_ny%SF_BH) ? ((_ny/SF_BH)+1) : (_ny/SF_BH); dim3 dimGrid(ngx,ngy); dim3 dimBlock(SF_BW,SF_BH); warp_max_levels = computeMaxWarpLevels(); max_rec_depth = (((_start_level+1) < warp_max_levels) ? (_start_level+1) : warp_max_levels) -1; if(max_rec_depth >= _I1pyramid->nl){ max_rec_depth = _I1pyramid->nl-1; } if(!textures_flow_sor_initialized){ tex_flow_sor_I1.addressMode[0] = cudaAddressModeClamp; tex_flow_sor_I1.addressMode[1] = cudaAddressModeClamp; tex_flow_sor_I1.filterMode = IMAGE_FILTER_METHOD; tex_flow_sor_I1.normalized = false; tex_flow_sor_I2.addressMode[0] = cudaAddressModeClamp; tex_flow_sor_I2.addressMode[1] = cudaAddressModeClamp; tex_flow_sor_I2.filterMode = IMAGE_FILTER_METHOD; tex_flow_sor_I2.normalized = false; textures_flow_sor_initialized = true; } int nx_fine, ny_fine, nx_coarse=0, ny_coarse=0; float hx_fine; float hy_fine; cutilSafeCall( cudaMemset(_u1_g,0,_pitchf1*_ny*sizeof(float))); cutilSafeCall( cudaMemset(_u2_g,0,_pitchf1*_ny*sizeof(float))); for(rec_depth = max_rec_depth; rec_depth >= 0; rec_depth--){ if(_verbose) fprintf(stderr," Level %i",rec_depth); nx_fine = _I1pyramid->nx[rec_depth]; ny_fine = _I1pyramid->ny[rec_depth]; hx_fine=(float)_nx/(float)nx_fine; hy_fine=(float)_ny/(float)ny_fine; cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I1, _I1pyramid->level[rec_depth], &flow_sor_float_tex, nx_fine, ny_fine, _I1pyramid->pitch[rec_depth]*sizeof(float)) ); if(_debug){ sprintf(_debugbuffer,"debug/GI1 %i.png",rec_depth); saveCudaImage(_debugbuffer,_I1pyramid->level[rec_depth],nx_fine,ny_fine,_I1pyramid->pitch[rec_depth],1,1.0f,-1.0f); sprintf(_debugbuffer,"debug/GI2 %i.png",rec_depth); saveCudaImage(_debugbuffer,_I2pyramid->level[rec_depth],nx_fine,ny_fine,_I2pyramid->pitch[rec_depth],1,1.0f,-1.0f); } if(rec_depth < max_rec_depth){ resampleAreaParallelSeparate(_u1_g,_u1_g,nx_coarse,ny_coarse,_pitchf1,nx_fine,ny_fine,_pitchf1,_b1); resampleAreaParallelSeparate(_u2_g,_u2_g,nx_coarse,ny_coarse,_pitchf1,nx_fine,ny_fine,_pitchf1,_b2); } if(rec_depth >= _end_level){ backwardRegistrationBilinearFunctionTex(_I2pyramid->level[rec_depth],_u1_g,_u2_g, _I2warp,_I1pyramid->level[rec_depth], nx_fine,ny_fine,_I2pyramid->pitch[rec_depth],_pitchf1,hx_fine,hy_fine); if(_debug){ sprintf(_debugbuffer,"debug/GW2 %i.png",rec_depth); saveCudaImage(_debugbuffer,_I2warp,nx_fine,ny_fine,_pitchf1,1,1.0f,-1.0f); } cutilSafeCall (cudaUnbindTexture(tex_flow_sor_I2)); cutilSafeCall( cudaBindTexture2D(0, &tex_flow_sor_I2, _I2warp, &flow_sor_float_tex, nx_fine, ny_fine, _pitchf1*sizeof(float)) ); sorflow_gpu_nonlinear_warp_level (_u1_g,_u2_g,_u1lvl,_u2lvl,_b1,_b2,_penDat,_penReg, nx_fine,ny_fine,_pitchf1, hx_fine,hy_fine, lambda,_overrelaxation, _oi,_ii, _dat_epsilon,_reg_epsilon); add_flow_fields<<<dimGrid,dimBlock>>>(_u1lvl,_u2lvl,_u1_g,_u2_g,nx_fine,ny_fine,_pitchf1); catchkernel; } else{ if(_verbose) fprintf(stderr," skipped"); } nx_coarse = nx_fine; ny_coarse = ny_fine; } if(_debug) delete [] _debugbuffer; unbind_textures_flow_sor(); //TODO: Timer return -1.0f; }
59517c5b4122ec449901bd458776b0f941d50569.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <math.h> #include <float.h> #include <hip/hip_vector_types.h> #include <vector_functions.h> #include "cutil_math.h" #define KSSTRUCT 50.75f #define KDSTRUCT -0.25f #define KSSHEAR 50.75f #define KDSHEAR -0.25f #define KSBEND 50.95f #define KDBEND -0.25f __constant__ float KsStruct = KSSTRUCT; __constant__ float KdStruct = KDSTRUCT; __constant__ float KsShear = KSSHEAR; __constant__ float KdShear = KDSHEAR; __constant__ float KsBend = KSBEND; __constant__ float KdBend = KDBEND; __constant__ float springStiffnes[12] = { KSSTRUCT, KSSTRUCT, KSSTRUCT, KSSTRUCT, KSSHEAR, KSSHEAR, KSSHEAR, KSSHEAR, KSBEND, KSBEND, KSBEND, KSBEND }; __constant__ float springDamping[12] = { KDSTRUCT, KDSTRUCT, KDSTRUCT, KDSTRUCT, KDSHEAR, KDSHEAR, KDSHEAR, KDSHEAR, KDBEND, KDBEND, KDBEND, KDBEND }; __constant__ int springCoord[2 * 12] = { 1, 0, 0, -1, -1, 0, 0, 1, 1, -1, -1, -1, -1, 1, 1, 1, 2, 0, 0, -2, -2, 0, 0, 2 }; //structural springs (adjacent neighbors) // o // | // o--m--o // | // o //shear springs (diagonal neighbors) // o o o // \ / // o m o // / \ // o o o //bend spring (adjacent neighbors 1 node away) // //o o o o o // | //o o | o o // | //o-------m-------o // | //o o | o o // | //o o o o o __forceinline__ __device__ void getSpringCoord(int k, int* x, int* y) { *x = springCoord[k * 2 + 0]; *y = springCoord[k * 2 + 1]; } __forceinline__ __device__ void getSpringCoeff(int k, float* ks, float* kd) { *ks = springStiffnes[k]; *kd = springDamping[k]; } extern "C" __global__ void verlet(const float4* __restrict__ g_pos_in, const float4* __restrict__ g_pos_old_in, float4* __restrict__ g_pos_out, float4* __restrict__ g_pos_old_out, float4* __restrict__ g_normals_out, const float* __restrict__ g_mass_in, float damp, float dt, float stepX, float stepY) { // workitem/worksize info int idX = blockIdx.x * blockDim.x + threadIdx.x; int idY = blockIdx.y * blockDim.y + threadIdx.y; int sizeX = gridDim.x*blockDim.x; int sizeY = gridDim.y*blockDim.y; int index = (idY * sizeX) + idX; float mass = g_mass_in[index]; float4 pos = g_pos_in[index]; float4 pos_old = g_pos_old_in[index]; float4 vel = (pos - pos_old) / dt; const float4 gravity = make_float4(0.0f, -0.00981f, 0.0f, 0.0f); float4 force = gravity*mass + vel*damp; float ks, kd; int x, y; for (int k = 0; k < 12; ++k) { getSpringCoord(k, &x, &y); getSpringCoeff(k, &ks, &kd); if (((idX + x) < (int)0) || ((idX + x) > (sizeX - 1))) continue; if (((idY + y) < (int)0) || ((idY + y) > (sizeY - 1))) continue; int index_neigh = (idY + y) * sizeX + (idX + x); float rest_length = length(make_float2(x * stepX, y * stepY)); float4 pos2 = g_pos_in[index_neigh]; float4 pos2_old = g_pos_old_in[index_neigh]; float4 vel2 = (pos2 - pos2_old) / dt; float4 deltaP = pos - pos2; float4 deltaV = vel - vel2; float dist = length(deltaP); float leftTerm = -ks * (dist - rest_length); float rightTerm = kd * (dot(deltaV, deltaP) / dist); float4 springForce = (leftTerm + rightTerm)*normalize(deltaP); force += springForce; } float4 normal; { int index_neigh_left = (idY)* sizeX + max((idX - 1), 0); int index_neigh_right = (idY)* sizeX + min((idX + 1), sizeX - 1); int index_neigh_bottom = max((idY - 1), 0) * sizeX + (idX); int index_neigh_top = min((idY + 1), sizeY - 1) * sizeX + (idX); float4 left = g_pos_in[index_neigh_left]; float4 right = g_pos_in[index_neigh_right]; float4 bottom = g_pos_in[index_neigh_bottom]; float4 top = g_pos_in[index_neigh_top]; float4 tangentX = right - left; if (dot(tangentX, tangentX) < 1e-10f) tangentX = make_float4(1.0f, 0.0f, 0.0f, 0.0f); else tangentX = normalize(tangentX); float4 tangentZ = bottom - top; if (dot(tangentZ, tangentZ) < 1e-10f) tangentZ = make_float4(0.0f, 0.0f, 1.0f, 0.0f); else tangentZ = normalize(tangentZ); normal = make_float4(cross(make_float3(tangentX), make_float3(tangentZ)), 0.0f); } float4 acc = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (mass != 0.0f) acc = force / mass; // verlet float4 tmp = pos; pos = pos * 2.0f - pos_old + acc * dt * dt; pos_old = tmp; float cf = 0.75f; float4 d = pos - pos_old; float4 rt = make_float4(0.0f, 0.0f, 0.0f, 0.0f); // y-up world plane { if (pos.y < 0.0f) { // collision float4 coll_dir = make_float4(0.0f, 1.0f, 0.0f, 0.0f); pos.y = 0.0f; float4 dt = d - coll_dir * dot(d, coll_dir); rt += -cf*dt; } } // sphere { float4 center = make_float4(0.0f, 2.0f, 0.0f, 1.0f); float radius = 1.75f; if (length(pos - center) < radius) { // collision float4 coll_dir = normalize(pos - center); pos = center + coll_dir * radius; float4 dt = d - coll_dir * dot(d, coll_dir); rt += -cf*dt; } } g_pos_out[index] = pos + rt; g_pos_old_out[index] = pos_old; g_normals_out[index] = normalize(normal); } extern "C" void cuK_cloth ( void* g_pos_in, void* g_pos_old_in, void* g_pos_out, void* g_pos_old_out, void* g_normals_out, void* mass, float damp, float dt, float stepX, float stepY, int gSizeX, int gSizeY, int lSizeX, int lSizeY ) { dim3 block(lSizeX, lSizeY, 1); dim3 grid(gSizeX / block.x, gSizeY / block.y, 1); hipLaunchKernelGGL(( verlet) , dim3(grid), dim3(block) , 0, 0, (float4*)g_pos_in, (float4*)g_pos_old_in, (float4*)g_pos_out, (float4*)g_pos_old_out, (float4*)g_normals_out, (float*)mass, damp, dt, stepX, stepY); }
59517c5b4122ec449901bd458776b0f941d50569.cu
#include <cuda.h> #include <cuda_runtime_api.h> #include <math.h> #include <float.h> #include <vector_types.h> #include <vector_functions.h> #include "cutil_math.h" #define KSSTRUCT 50.75f #define KDSTRUCT -0.25f #define KSSHEAR 50.75f #define KDSHEAR -0.25f #define KSBEND 50.95f #define KDBEND -0.25f __constant__ float KsStruct = KSSTRUCT; __constant__ float KdStruct = KDSTRUCT; __constant__ float KsShear = KSSHEAR; __constant__ float KdShear = KDSHEAR; __constant__ float KsBend = KSBEND; __constant__ float KdBend = KDBEND; __constant__ float springStiffnes[12] = { KSSTRUCT, KSSTRUCT, KSSTRUCT, KSSTRUCT, KSSHEAR, KSSHEAR, KSSHEAR, KSSHEAR, KSBEND, KSBEND, KSBEND, KSBEND }; __constant__ float springDamping[12] = { KDSTRUCT, KDSTRUCT, KDSTRUCT, KDSTRUCT, KDSHEAR, KDSHEAR, KDSHEAR, KDSHEAR, KDBEND, KDBEND, KDBEND, KDBEND }; __constant__ int springCoord[2 * 12] = { 1, 0, 0, -1, -1, 0, 0, 1, 1, -1, -1, -1, -1, 1, 1, 1, 2, 0, 0, -2, -2, 0, 0, 2 }; //structural springs (adjacent neighbors) // o // | // o--m--o // | // o //shear springs (diagonal neighbors) // o o o // \ / // o m o // / \ // o o o //bend spring (adjacent neighbors 1 node away) // //o o o o o // | //o o | o o // | //o-------m-------o // | //o o | o o // | //o o o o o __forceinline__ __device__ void getSpringCoord(int k, int* x, int* y) { *x = springCoord[k * 2 + 0]; *y = springCoord[k * 2 + 1]; } __forceinline__ __device__ void getSpringCoeff(int k, float* ks, float* kd) { *ks = springStiffnes[k]; *kd = springDamping[k]; } extern "C" __global__ void verlet(const float4* __restrict__ g_pos_in, const float4* __restrict__ g_pos_old_in, float4* __restrict__ g_pos_out, float4* __restrict__ g_pos_old_out, float4* __restrict__ g_normals_out, const float* __restrict__ g_mass_in, float damp, float dt, float stepX, float stepY) { // workitem/worksize info int idX = blockIdx.x * blockDim.x + threadIdx.x; int idY = blockIdx.y * blockDim.y + threadIdx.y; int sizeX = gridDim.x*blockDim.x; int sizeY = gridDim.y*blockDim.y; int index = (idY * sizeX) + idX; float mass = g_mass_in[index]; float4 pos = g_pos_in[index]; float4 pos_old = g_pos_old_in[index]; float4 vel = (pos - pos_old) / dt; const float4 gravity = make_float4(0.0f, -0.00981f, 0.0f, 0.0f); float4 force = gravity*mass + vel*damp; float ks, kd; int x, y; for (int k = 0; k < 12; ++k) { getSpringCoord(k, &x, &y); getSpringCoeff(k, &ks, &kd); if (((idX + x) < (int)0) || ((idX + x) > (sizeX - 1))) continue; if (((idY + y) < (int)0) || ((idY + y) > (sizeY - 1))) continue; int index_neigh = (idY + y) * sizeX + (idX + x); float rest_length = length(make_float2(x * stepX, y * stepY)); float4 pos2 = g_pos_in[index_neigh]; float4 pos2_old = g_pos_old_in[index_neigh]; float4 vel2 = (pos2 - pos2_old) / dt; float4 deltaP = pos - pos2; float4 deltaV = vel - vel2; float dist = length(deltaP); float leftTerm = -ks * (dist - rest_length); float rightTerm = kd * (dot(deltaV, deltaP) / dist); float4 springForce = (leftTerm + rightTerm)*normalize(deltaP); force += springForce; } float4 normal; { int index_neigh_left = (idY)* sizeX + max((idX - 1), 0); int index_neigh_right = (idY)* sizeX + min((idX + 1), sizeX - 1); int index_neigh_bottom = max((idY - 1), 0) * sizeX + (idX); int index_neigh_top = min((idY + 1), sizeY - 1) * sizeX + (idX); float4 left = g_pos_in[index_neigh_left]; float4 right = g_pos_in[index_neigh_right]; float4 bottom = g_pos_in[index_neigh_bottom]; float4 top = g_pos_in[index_neigh_top]; float4 tangentX = right - left; if (dot(tangentX, tangentX) < 1e-10f) tangentX = make_float4(1.0f, 0.0f, 0.0f, 0.0f); else tangentX = normalize(tangentX); float4 tangentZ = bottom - top; if (dot(tangentZ, tangentZ) < 1e-10f) tangentZ = make_float4(0.0f, 0.0f, 1.0f, 0.0f); else tangentZ = normalize(tangentZ); normal = make_float4(cross(make_float3(tangentX), make_float3(tangentZ)), 0.0f); } float4 acc = make_float4(0.0f, 0.0f, 0.0f, 0.0f); if (mass != 0.0f) acc = force / mass; // verlet float4 tmp = pos; pos = pos * 2.0f - pos_old + acc * dt * dt; pos_old = tmp; float cf = 0.75f; float4 d = pos - pos_old; float4 rt = make_float4(0.0f, 0.0f, 0.0f, 0.0f); // y-up world plane { if (pos.y < 0.0f) { // collision float4 coll_dir = make_float4(0.0f, 1.0f, 0.0f, 0.0f); pos.y = 0.0f; float4 dt = d - coll_dir * dot(d, coll_dir); rt += -cf*dt; } } // sphere { float4 center = make_float4(0.0f, 2.0f, 0.0f, 1.0f); float radius = 1.75f; if (length(pos - center) < radius) { // collision float4 coll_dir = normalize(pos - center); pos = center + coll_dir * radius; float4 dt = d - coll_dir * dot(d, coll_dir); rt += -cf*dt; } } g_pos_out[index] = pos + rt; g_pos_old_out[index] = pos_old; g_normals_out[index] = normalize(normal); } extern "C" void cuK_cloth ( void* g_pos_in, void* g_pos_old_in, void* g_pos_out, void* g_pos_old_out, void* g_normals_out, void* mass, float damp, float dt, float stepX, float stepY, int gSizeX, int gSizeY, int lSizeX, int lSizeY ) { dim3 block(lSizeX, lSizeY, 1); dim3 grid(gSizeX / block.x, gSizeY / block.y, 1); verlet <<< grid, block >>>((float4*)g_pos_in, (float4*)g_pos_old_in, (float4*)g_pos_out, (float4*)g_pos_old_out, (float4*)g_normals_out, (float*)mass, damp, dt, stepX, stepY); }
1888d8bde807ee1e66fcf2526fd486edcb0dac68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <iostream> #include <linalg/reduce_rows_by_key.cuh> #include <raft/random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveReduceRowsByKeyKernel(const Type* d_A, int lda, uint32_t* d_keys, const Type* d_weight, char* d_char_keys, int nrows, int ncols, int nkeys, Type* d_sums) { int c = threadIdx.x + blockIdx.x * blockDim.x; if (c >= ncols) return; int this_key = threadIdx.y + blockIdx.y * blockDim.y; Type sum = 0.0; for (int r = 0; r < nrows; r++) { if (this_key != d_keys[r]) continue; Type wt = 1; if (d_weight) wt = d_weight[r]; sum += d_A[lda * r + c] * wt; } d_sums[this_key * ncols + c] = sum; } template <typename Type> void naiveReduceRowsByKey(const Type* d_A, int lda, uint32_t* d_keys, const Type* d_weight, char* d_char_keys, int nrows, int ncols, int nkeys, Type* d_sums, hipStream_t stream) { hipMemset(d_sums, 0, sizeof(Type) * nkeys * ncols); hipLaunchKernelGGL(( naiveReduceRowsByKeyKernel), dim3(dim3((ncols + 31) / 32, nkeys)), dim3(dim3(32, 1)), 0, stream, d_A, lda, d_keys, d_weight, d_char_keys, nrows, ncols, nkeys, d_sums); } template <typename T> struct ReduceRowsInputs { T tolerance; int nobs; uint32_t cols; uint32_t nkeys; unsigned long long int seed; bool weighted; T max_weight; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const ReduceRowsInputs<T>& dims) { return os; } template <typename T> class ReduceRowTest : public ::testing::TestWithParam<ReduceRowsInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<ReduceRowsInputs<T>>::GetParam(); raft::random::Rng r(params.seed); raft::random::Rng r_int(params.seed); CUDA_CHECK(hipStreamCreate(&stream)); int nobs = params.nobs; uint32_t cols = params.cols; uint32_t nkeys = params.nkeys; raft::allocate(in, nobs * cols, stream); raft::allocate(keys, nobs, stream); raft::allocate(scratch_buf, nobs, stream); raft::allocate(out_ref, nkeys * cols, stream); raft::allocate(out, nkeys * cols, stream); r.uniform(in, nobs * cols, T(0.0), T(2.0 / nobs), stream); r_int.uniformInt(keys, nobs, (uint32_t)0, nkeys, stream); if (params.weighted) { raft::allocate(weight, nobs, stream); raft::random::Rng r(params.seed, raft::random::GeneratorType::GenPhilox); r.uniform(weight, nobs, T(1), params.max_weight, stream); } else { weight = nullptr; } naiveReduceRowsByKey(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys, out_ref, stream); if (params.weighted) { reduce_rows_by_key(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys, out, stream); } else { reduce_rows_by_key(in, cols, keys, scratch_buf, nobs, cols, nkeys, out, stream); } CUDA_CHECK(hipStreamSynchronize(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in)); CUDA_CHECK(hipFree(keys)); CUDA_CHECK(hipFree(scratch_buf)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); CUDA_CHECK(hipStreamDestroy(stream)); } protected: hipStream_t stream = 0; ReduceRowsInputs<T> params; T *in, *out_ref, *out; T* weight; uint32_t* keys; char* scratch_buf; int device_count = 0; }; // ReduceRowTestF // 128 Obs, 32 cols, 6 clusters const std::vector<ReduceRowsInputs<float>> inputsf2 = {{0.000001f, 128, 32, 6, 1234ULL, false}, {0.000001f, 128, 32, 6, 1234ULL, true, 1.0}, {0.000001f, 128, 32, 6, 1234ULL, true, 2.0}}; typedef ReduceRowTest<float> ReduceRowTestF; TEST_P(ReduceRowTestF, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestF, ::testing::ValuesIn(inputsf2)); // ReduceRowTestD // 128 Obs, 32 cols, 6 clusters, double precision const std::vector<ReduceRowsInputs<double>> inputsd2 = { {0.00000001, 128, 32, 6, 1234ULL, false}, {0.00000001, 128, 32, 6, 1234ULL, true, 2.0}, {0.00000001, 128, 32, 6, 1234ULL, true, 8.0}}; typedef ReduceRowTest<double> ReduceRowTestD; TEST_P(ReduceRowTestD, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestD, ::testing::ValuesIn(inputsd2)); // ReduceRowTestSmallnKey // 128 Obs, 32 cols, 3 clusters const std::vector<ReduceRowsInputs<float>> inputsf_small_nkey = { {0.000001f, 128, 32, 3, 1234ULL, false}, {0.000001f, 128, 32, 3, 1234ULL, true, 5.0}, {0.000001f, 128, 32, 3, 1234ULL, true, 8.0}}; typedef ReduceRowTest<float> ReduceRowTestSmallnKey; TEST_P(ReduceRowTestSmallnKey, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestSmallnKey, ::testing::ValuesIn(inputsf_small_nkey)); // ReduceRowTestBigSpace // 512 Obs, 1024 cols, 32 clusters, double precision const std::vector<ReduceRowsInputs<double>> inputsd_big_space = { {0.00000001, 512, 1024, 40, 1234ULL, false}, {0.00000001, 512, 1024, 40, 1234ULL, true, 4.0}, {0.00000001, 512, 1024, 40, 1234ULL, true, 16.0}}; typedef ReduceRowTest<double> ReduceRowTestBigSpace; TEST_P(ReduceRowTestBigSpace, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestBigSpace, ::testing::ValuesIn(inputsd_big_space)); // ReduceRowTestManyObs // 100000 Obs, 37 cols, 32 clusters const std::vector<ReduceRowsInputs<float>> inputsf_many_obs = { {0.00001f, 100000, 37, 32, 1234ULL, false}, {0.00001f, 100000, 37, 32, 1234ULL, true, 4.0}, {0.00001f, 100000, 37, 32, 1234ULL, true, 16.0}}; typedef ReduceRowTest<float> ReduceRowTestManyObs; TEST_P(ReduceRowTestManyObs, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyObs, ::testing::ValuesIn(inputsf_many_obs)); // ReduceRowTestManyClusters // 100000 Obs, 37 cols, 2048 clusters const std::vector<ReduceRowsInputs<float>> inputsf_many_cluster = { {0.00001f, 100000, 37, 2048, 1234ULL, false}, {0.00001f, 100000, 37, 2048, 1234ULL, true, 32.0}, {0.00001f, 100000, 37, 2048, 1234ULL, true, 16.0}}; typedef ReduceRowTest<float> ReduceRowTestManyClusters; TEST_P(ReduceRowTestManyClusters, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyClusters, ::testing::ValuesIn(inputsf_many_cluster)); } // end namespace LinAlg } // end namespace MLCommon
1888d8bde807ee1e66fcf2526fd486edcb0dac68.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <iostream> #include <linalg/reduce_rows_by_key.cuh> #include <raft/random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveReduceRowsByKeyKernel(const Type* d_A, int lda, uint32_t* d_keys, const Type* d_weight, char* d_char_keys, int nrows, int ncols, int nkeys, Type* d_sums) { int c = threadIdx.x + blockIdx.x * blockDim.x; if (c >= ncols) return; int this_key = threadIdx.y + blockIdx.y * blockDim.y; Type sum = 0.0; for (int r = 0; r < nrows; r++) { if (this_key != d_keys[r]) continue; Type wt = 1; if (d_weight) wt = d_weight[r]; sum += d_A[lda * r + c] * wt; } d_sums[this_key * ncols + c] = sum; } template <typename Type> void naiveReduceRowsByKey(const Type* d_A, int lda, uint32_t* d_keys, const Type* d_weight, char* d_char_keys, int nrows, int ncols, int nkeys, Type* d_sums, cudaStream_t stream) { cudaMemset(d_sums, 0, sizeof(Type) * nkeys * ncols); naiveReduceRowsByKeyKernel<<<dim3((ncols + 31) / 32, nkeys), dim3(32, 1), 0, stream>>>( d_A, lda, d_keys, d_weight, d_char_keys, nrows, ncols, nkeys, d_sums); } template <typename T> struct ReduceRowsInputs { T tolerance; int nobs; uint32_t cols; uint32_t nkeys; unsigned long long int seed; bool weighted; T max_weight; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const ReduceRowsInputs<T>& dims) { return os; } template <typename T> class ReduceRowTest : public ::testing::TestWithParam<ReduceRowsInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<ReduceRowsInputs<T>>::GetParam(); raft::random::Rng r(params.seed); raft::random::Rng r_int(params.seed); CUDA_CHECK(cudaStreamCreate(&stream)); int nobs = params.nobs; uint32_t cols = params.cols; uint32_t nkeys = params.nkeys; raft::allocate(in, nobs * cols, stream); raft::allocate(keys, nobs, stream); raft::allocate(scratch_buf, nobs, stream); raft::allocate(out_ref, nkeys * cols, stream); raft::allocate(out, nkeys * cols, stream); r.uniform(in, nobs * cols, T(0.0), T(2.0 / nobs), stream); r_int.uniformInt(keys, nobs, (uint32_t)0, nkeys, stream); if (params.weighted) { raft::allocate(weight, nobs, stream); raft::random::Rng r(params.seed, raft::random::GeneratorType::GenPhilox); r.uniform(weight, nobs, T(1), params.max_weight, stream); } else { weight = nullptr; } naiveReduceRowsByKey(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys, out_ref, stream); if (params.weighted) { reduce_rows_by_key(in, cols, keys, weight, scratch_buf, nobs, cols, nkeys, out, stream); } else { reduce_rows_by_key(in, cols, keys, scratch_buf, nobs, cols, nkeys, out, stream); } CUDA_CHECK(cudaStreamSynchronize(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in)); CUDA_CHECK(cudaFree(keys)); CUDA_CHECK(cudaFree(scratch_buf)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); CUDA_CHECK(cudaStreamDestroy(stream)); } protected: cudaStream_t stream = 0; ReduceRowsInputs<T> params; T *in, *out_ref, *out; T* weight; uint32_t* keys; char* scratch_buf; int device_count = 0; }; // ReduceRowTestF // 128 Obs, 32 cols, 6 clusters const std::vector<ReduceRowsInputs<float>> inputsf2 = {{0.000001f, 128, 32, 6, 1234ULL, false}, {0.000001f, 128, 32, 6, 1234ULL, true, 1.0}, {0.000001f, 128, 32, 6, 1234ULL, true, 2.0}}; typedef ReduceRowTest<float> ReduceRowTestF; TEST_P(ReduceRowTestF, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestF, ::testing::ValuesIn(inputsf2)); // ReduceRowTestD // 128 Obs, 32 cols, 6 clusters, double precision const std::vector<ReduceRowsInputs<double>> inputsd2 = { {0.00000001, 128, 32, 6, 1234ULL, false}, {0.00000001, 128, 32, 6, 1234ULL, true, 2.0}, {0.00000001, 128, 32, 6, 1234ULL, true, 8.0}}; typedef ReduceRowTest<double> ReduceRowTestD; TEST_P(ReduceRowTestD, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestD, ::testing::ValuesIn(inputsd2)); // ReduceRowTestSmallnKey // 128 Obs, 32 cols, 3 clusters const std::vector<ReduceRowsInputs<float>> inputsf_small_nkey = { {0.000001f, 128, 32, 3, 1234ULL, false}, {0.000001f, 128, 32, 3, 1234ULL, true, 5.0}, {0.000001f, 128, 32, 3, 1234ULL, true, 8.0}}; typedef ReduceRowTest<float> ReduceRowTestSmallnKey; TEST_P(ReduceRowTestSmallnKey, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestSmallnKey, ::testing::ValuesIn(inputsf_small_nkey)); // ReduceRowTestBigSpace // 512 Obs, 1024 cols, 32 clusters, double precision const std::vector<ReduceRowsInputs<double>> inputsd_big_space = { {0.00000001, 512, 1024, 40, 1234ULL, false}, {0.00000001, 512, 1024, 40, 1234ULL, true, 4.0}, {0.00000001, 512, 1024, 40, 1234ULL, true, 16.0}}; typedef ReduceRowTest<double> ReduceRowTestBigSpace; TEST_P(ReduceRowTestBigSpace, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestBigSpace, ::testing::ValuesIn(inputsd_big_space)); // ReduceRowTestManyObs // 100000 Obs, 37 cols, 32 clusters const std::vector<ReduceRowsInputs<float>> inputsf_many_obs = { {0.00001f, 100000, 37, 32, 1234ULL, false}, {0.00001f, 100000, 37, 32, 1234ULL, true, 4.0}, {0.00001f, 100000, 37, 32, 1234ULL, true, 16.0}}; typedef ReduceRowTest<float> ReduceRowTestManyObs; TEST_P(ReduceRowTestManyObs, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyObs, ::testing::ValuesIn(inputsf_many_obs)); // ReduceRowTestManyClusters // 100000 Obs, 37 cols, 2048 clusters const std::vector<ReduceRowsInputs<float>> inputsf_many_cluster = { {0.00001f, 100000, 37, 2048, 1234ULL, false}, {0.00001f, 100000, 37, 2048, 1234ULL, true, 32.0}, {0.00001f, 100000, 37, 2048, 1234ULL, true, 16.0}}; typedef ReduceRowTest<float> ReduceRowTestManyClusters; TEST_P(ReduceRowTestManyClusters, Result) { ASSERT_TRUE(raft::devArrMatch( out_ref, out, params.cols * params.nkeys, raft::CompareApprox<float>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(ReduceRowTests, ReduceRowTestManyClusters, ::testing::ValuesIn(inputsf_many_cluster)); } // end namespace LinAlg } // end namespace MLCommon
78a6c9bdcfd8c1df9a0549c53f9c66bef38be4a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stridedSlice.h" #include <iostream> #include <cassert> // gpu operation for nearest neighbor upsampling template <typename T> __global__ void gpuSlice( T* input, int nChannels, int iHeight, int iWidth, int oHeight, int oWidth, T* output) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; //printf("fill %d %d %d\n", x, y, z); if( x >= nChannels || y >= oHeight || z >= oWidth ) return; const T px = input[x * iWidth * iHeight + y * iWidth + z]; output[x * oWidth * oHeight + y * oWidth + z] = px; } // nearest neighbor upsampling template <typename T> hipError_t cudaSlice( T* input, int nChannels, int inputHeight, int inputWidth, T* output, hipStream_t stream ) { if( !input || !output ) { std::cout << "No input or no output" << std::endl; return hipErrorInvalidDevicePointer; } // launch kernel //std::cout << "input2: " << input2[0] << std::endl; const int outputHeight = inputHeight - 1; const int outputWidth = inputWidth; const dim3 blockDim(1, 16, 16); const dim3 gridDim(iDivUp(nChannels, blockDim.x), iDivUp(outputHeight, blockDim.y), iDivUp(outputWidth, blockDim.z)); hipLaunchKernelGGL(( gpuSlice<T>), dim3(gridDim), dim3(blockDim), 0, stream, input, nChannels, inputHeight, inputWidth, outputHeight, outputWidth, output); return CUDA(hipGetLastError()); } template hipError_t cudaSlice<float>(float*, int, int, int, float*, hipStream_t); template hipError_t cudaSlice<__half>(__half*, int, int, int, __half*, hipStream_t);
78a6c9bdcfd8c1df9a0549c53f9c66bef38be4a6.cu
#include "stridedSlice.h" #include <iostream> #include <cassert> // gpu operation for nearest neighbor upsampling template <typename T> __global__ void gpuSlice( T* input, int nChannels, int iHeight, int iWidth, int oHeight, int oWidth, T* output) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; const int z = blockIdx.z * blockDim.z + threadIdx.z; //printf("fill %d %d %d\n", x, y, z); if( x >= nChannels || y >= oHeight || z >= oWidth ) return; const T px = input[x * iWidth * iHeight + y * iWidth + z]; output[x * oWidth * oHeight + y * oWidth + z] = px; } // nearest neighbor upsampling template <typename T> cudaError_t cudaSlice( T* input, int nChannels, int inputHeight, int inputWidth, T* output, cudaStream_t stream ) { if( !input || !output ) { std::cout << "No input or no output" << std::endl; return cudaErrorInvalidDevicePointer; } // launch kernel //std::cout << "input2: " << input2[0] << std::endl; const int outputHeight = inputHeight - 1; const int outputWidth = inputWidth; const dim3 blockDim(1, 16, 16); const dim3 gridDim(iDivUp(nChannels, blockDim.x), iDivUp(outputHeight, blockDim.y), iDivUp(outputWidth, blockDim.z)); gpuSlice<T><<<gridDim, blockDim, 0, stream>>>(input, nChannels, inputHeight, inputWidth, outputHeight, outputWidth, output); return CUDA(cudaGetLastError()); } template cudaError_t cudaSlice<float>(float*, int, int, int, float*, cudaStream_t); template cudaError_t cudaSlice<__half>(__half*, int, int, int, __half*, cudaStream_t);
19e5cd240fe06ffa00c6a11aeead5989f373e5aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]>, created on 21.01.2019 // #include <array/NDArray.h> #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // set up given value to upper diagonal given // buffer - input buffer // shape - input shape // value - given value // diagonal - given upper diagonal (acceptable negative values also, 0 - the main diagonal) // row, cols - height and width of given matrix (MxN, rows = M, cols = N) // template <typename T> static SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, T value, int diagonal, sd::LongType rows, sd::LongType cols) { __shared__ sd::LongType rank; __shared__ T* array; if (0 == threadIdx.x) { rank = shape::rank(shape); array = reinterpret_cast<T*>(buffer); } __syncthreads(); for (sd::LongType i = blockIdx.x; i < rows; i += gridDim.x) { for (int j = threadIdx.x; j < cols; j += blockDim.x) { sd::LongType coords[2] = {i, j}; sd::LongType xOffset = shape::getOffset(shape, coords); if (i + diagonal <= j) array[xOffset] = value; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // set up given value to lower given diagonal // buffer - input buffer // shape - input shape // value - given value // diagonal - given lower diagonal (acceptable negative values also, 0 - the main diagonal) // row, cols - height and width of given matrix (MxN, rows = M, cols = N) // template <typename T> static SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, T value, int diagonal, sd::LongType rows, sd::LongType cols) { sd::LongType rank = shape::rank(shape); int totalThreads = blockDim.x; for (sd::LongType i = blockIdx.x; i < rows; i += gridDim.x) { for (int j = threadIdx.x; j < cols; j += totalThreads) { sd::LongType coords[2] = {i, j}; auto xOffset = shape::getOffset(shape, coords); if (i + diagonal >= j) *(reinterpret_cast<T*>(buffer) + xOffset) = value; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, double value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, double value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, float value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, float value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, int value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, int value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, float16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, float16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, bfloat16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, bfloat16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, sd::LongType value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, sd::LongType value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, int16_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, int16_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, uint8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, uint8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, int8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, int8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, bool value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, bool value, int diagonal, sd::LongType rows, sd::LongType cols); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static void setDiagonalValueUpper(void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, hipStream_t& stream) { dim3 launchDims(256, 512, 8192); hipLaunchKernelGGL(( setDiagValueUpperKernel<T>) , dim3(launchDims.x), dim3(launchDims.y), launchDims.z, stream, buffer, shape, value.e<T>(0), diagonal, rows, cols); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static void setDiagonalValueLower(void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, hipStream_t& stream) { dim3 launchDims(256, 512, 8192); hipLaunchKernelGGL(( setDiagValueLowerKernel<T>) , dim3(launchDims.x), dim3(launchDims.y), launchDims.z, stream, buffer, shape, value.e<T>(0), diagonal, rows, cols); } BUILD_SINGLE_TEMPLATE(template void setDiagonalValueUpper, (void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, hipStream_t& stream), SD_COMMON_TYPES); BUILD_SINGLE_TEMPLATE(template void setDiagonalValueLower, (void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, hipStream_t& stream), SD_COMMON_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace sd
19e5cd240fe06ffa00c6a11aeead5989f373e5aa.cu
/* ****************************************************************************** * * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * See the NOTICE file distributed with this work for additional * information regarding copyright ownership. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author GS <[email protected]>, created on 21.01.2019 // #include <array/NDArray.h> #include <loops/special_kernels.h> namespace sd { //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // set up given value to upper diagonal given // buffer - input buffer // shape - input shape // value - given value // diagonal - given upper diagonal (acceptable negative values also, 0 - the main diagonal) // row, cols - height and width of given matrix (MxN, rows = M, cols = N) // template <typename T> static SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, T value, int diagonal, sd::LongType rows, sd::LongType cols) { __shared__ sd::LongType rank; __shared__ T* array; if (0 == threadIdx.x) { rank = shape::rank(shape); array = reinterpret_cast<T*>(buffer); } __syncthreads(); for (sd::LongType i = blockIdx.x; i < rows; i += gridDim.x) { for (int j = threadIdx.x; j < cols; j += blockDim.x) { sd::LongType coords[2] = {i, j}; sd::LongType xOffset = shape::getOffset(shape, coords); if (i + diagonal <= j) array[xOffset] = value; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // set up given value to lower given diagonal // buffer - input buffer // shape - input shape // value - given value // diagonal - given lower diagonal (acceptable negative values also, 0 - the main diagonal) // row, cols - height and width of given matrix (MxN, rows = M, cols = N) // template <typename T> static SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, T value, int diagonal, sd::LongType rows, sd::LongType cols) { sd::LongType rank = shape::rank(shape); int totalThreads = blockDim.x; for (sd::LongType i = blockIdx.x; i < rows; i += gridDim.x) { for (int j = threadIdx.x; j < cols; j += totalThreads) { sd::LongType coords[2] = {i, j}; auto xOffset = shape::getOffset(shape, coords); if (i + diagonal >= j) *(reinterpret_cast<T*>(buffer) + xOffset) = value; } } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, double value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, double value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, float value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, float value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, int value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, int value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, float16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, float16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, bfloat16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, bfloat16 value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, sd::LongType value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, sd::LongType value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, int16_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, int16_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, uint8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, uint8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, int8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, int8_t value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueLowerKernel(void* buffer, sd::LongType* shape, bool value, int diagonal, sd::LongType rows, sd::LongType cols); template SD_KERNEL void setDiagValueUpperKernel(void* buffer, sd::LongType* shape, bool value, int diagonal, sd::LongType rows, sd::LongType cols); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static void setDiagonalValueUpper(void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, cudaStream_t& stream) { dim3 launchDims(256, 512, 8192); setDiagValueUpperKernel<T> <<<launchDims.x, launchDims.y, launchDims.z, stream>>>(buffer, shape, value.e<T>(0), diagonal, rows, cols); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename T> static void setDiagonalValueLower(void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, cudaStream_t& stream) { dim3 launchDims(256, 512, 8192); setDiagValueLowerKernel<T> <<<launchDims.x, launchDims.y, launchDims.z, stream>>>(buffer, shape, value.e<T>(0), diagonal, rows, cols); } BUILD_SINGLE_TEMPLATE(template void setDiagonalValueUpper, (void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, cudaStream_t& stream), SD_COMMON_TYPES); BUILD_SINGLE_TEMPLATE(template void setDiagonalValueLower, (void* buffer, sd::LongType* shape, NDArray const& value, int diagonal, sd::LongType rows, sd::LongType cols, cudaStream_t& stream), SD_COMMON_TYPES); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace sd
2dcf70b4483886a6a88c6b8442475bb4a83d8e96.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } }
2dcf70b4483886a6a88c6b8442475bb4a83d8e96.cu
#include "includes.h" __global__ void ForwardWarpKernel_PSF2x2(const float *u, const float *v, const float *src, const int w, const int h, const int flow_stride, const int image_stride, const float time_scale, float *normalization_factor, float *dst) { int j = threadIdx.x + blockDim.x * blockIdx.x; int i = threadIdx.y + blockDim.y * blockIdx.y; if (i >= h || j >= w) return; int flow_row_offset = i * flow_stride; int image_row_offset = i * image_stride; //bottom left corner of a target pixel float cx = u[flow_row_offset + j] * time_scale + (float)j + 1.0f; float cy = v[flow_row_offset + j] * time_scale + (float)i + 1.0f; // pixel containing bottom left corner float px; float py; float dx = modff (cx, &px); float dy = modff (cy, &py); // target pixel integer coords int tx; int ty; tx = (int) px; ty = (int) py; float value = src[image_row_offset + j]; float weight; // fill pixel containing bottom right corner if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing bottom left corner tx -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * dy; _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper left corner ty -= 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = (1.0f - dx) * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } // fill pixel containing upper right corner tx += 1; if (!((tx >= w) || (tx < 0) || (ty >= h) || (ty < 0))) { weight = dx * (1.0f - dy); _atomicAdd (dst + ty * image_stride + tx, value * weight); _atomicAdd (normalization_factor + ty * image_stride + tx, weight); } }
e15471f7840c99facbbacd91b88a15126121864b.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mse_layer_updater_cuda.h" #include <hip/hip_runtime.h> #include "util_cuda.h" #include "../mse_layer.h" #include "../neural_network_exception.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void mse_upd_kernel( float * __restrict output, const float * __restrict input0, const float * __restrict input1, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float local_err = input0[input_offset] - input1[input_offset]; err += local_err * local_err; feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) err += __shfl_down(err, tx); int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) err += __shfl_down(err, tx); } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } template<bool add_update_to_destination> __global__ void mse_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, float scale2, int elem_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; if (elem_id < elem_count) { if (add_update_to_destination) output[elem_id] += scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]); else output[elem_id] = scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]); } } template<bool add_update_to_destination> __global__ void mse_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, const float * __restrict scale_mask, float scale2, int elem_count_per_feature_map, int input_feature_map_count, int entry_count) { int neuron_id = blockDim.x * blockIdx.x + threadIdx.x; int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count)) { int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id]; float gradient = 0.0F; if (mask != 0.0F) { float actual_val = target_input_neurons[elem_id]; float predicted_val = deriv_input_neurons[elem_id]; gradient = actual_val - predicted_val; } if (add_update_to_destination) output[elem_id] += gradient * (mask * scale2); else output[elem_id] = gradient * (mask * scale2); } } mse_layer_updater_cuda::mse_layer_updater_cuda() { } mse_layer_updater_cuda::~mse_layer_updater_cuda() { } void mse_layer_updater_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); hipLaunchKernelGGL(( mse_upd_kernel), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id, *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], scale, entry_count); } void mse_layer_updater_cuda::enqueue_backward_data_propagation( hipStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { if (input_neurons_buffers.size() > 2) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); if (add_update_to_destination) hipLaunchKernelGGL(( mse_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], *input_neurons_buffers[2], scale * 2.0F, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); else hipLaunchKernelGGL(( mse_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], *input_neurons_buffers[2], scale * 2.0F, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); } else { int elem_count = entry_count * input_elem_count_per_entry_list[0]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); if (add_update_to_destination) hipLaunchKernelGGL(( mse_backprop_upd_kernel<true>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], scale * 2.0F, elem_count); else hipLaunchKernelGGL(( mse_backprop_upd_kernel<false>), dim3(kernel_dims.first), dim3(kernel_dims.second), 0, stream_id, *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], scale * 2.0F, elem_count); } } void mse_layer_updater_cuda::updater_configured() { if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end()) throw neural_network_exception("mse_layer_updater_cuda cannot do backward propagation for scale mask"); nnforge_shared_ptr<const mse_layer> layer_derived = nnforge_dynamic_pointer_cast<const mse_layer>(layer_schema); scale = layer_derived->scale; } bool mse_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } int mse_layer_updater_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
e15471f7840c99facbbacd91b88a15126121864b.cu
/* * Copyright 2011-2015 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mse_layer_updater_cuda.h" #include <cuda_runtime.h> #include "util_cuda.h" #include "../mse_layer.h" #include "../neural_network_exception.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; __global__ void mse_upd_kernel( float * __restrict output, const float * __restrict input0, const float * __restrict input1, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float local_err = input0[input_offset] - input1[input_offset]; err += local_err * local_err; feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) err += __shfl_down(err, tx); int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) err += __shfl_down(err, tx); } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } template<bool add_update_to_destination> __global__ void mse_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, float scale2, int elem_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; if (elem_id < elem_count) { if (add_update_to_destination) output[elem_id] += scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]); else output[elem_id] = scale2 * (target_input_neurons[elem_id] - deriv_input_neurons[elem_id]); } } template<bool add_update_to_destination> __global__ void mse_backprop_upd_kernel( float * __restrict output, const float * __restrict deriv_input_neurons, const float * __restrict target_input_neurons, const float * __restrict scale_mask, float scale2, int elem_count_per_feature_map, int input_feature_map_count, int entry_count) { int neuron_id = blockDim.x * blockIdx.x + threadIdx.x; int feature_map_id = blockDim.y * blockIdx.y + threadIdx.y; int entry_id = blockDim.z * blockIdx.z + threadIdx.z; if ((neuron_id < elem_count_per_feature_map) && (feature_map_id < input_feature_map_count) && (entry_id < entry_count)) { int elem_id = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; float mask = scale_mask[entry_id * elem_count_per_feature_map + neuron_id]; float gradient = 0.0F; if (mask != 0.0F) { float actual_val = target_input_neurons[elem_id]; float predicted_val = deriv_input_neurons[elem_id]; gradient = actual_val - predicted_val; } if (add_update_to_destination) output[elem_id] += gradient * (mask * scale2); else output[elem_id] = gradient * (mask * scale2); } } mse_layer_updater_cuda::mse_layer_updater_cuda() { } mse_layer_updater_cuda::~mse_layer_updater_cuda() { } void mse_layer_updater_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::ptr temporary_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); mse_upd_kernel<<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>( *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], scale, entry_count); } void mse_layer_updater_cuda::enqueue_backward_data_propagation( cudaStream_t stream_id, unsigned int input_index, cuda_linear_buffer_device::ptr input_errors_buffer, cuda_linear_buffer_device::const_ptr output_errors_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_neurons_buffers, cuda_linear_buffer_device::const_ptr output_neurons_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, cuda_linear_buffer_device::const_ptr temporary_per_entry_buffer, bool add_update_to_destination, unsigned int entry_count) { if (input_neurons_buffers.size() > 2) { std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); if (add_update_to_destination) mse_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], *input_neurons_buffers[2], scale * 2.0F, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); else mse_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], *input_neurons_buffers[2], scale * 2.0F, input_elem_count_per_feature_map_list[0], input_configuration_specific_list[0].feature_map_count, entry_count); } else { int elem_count = entry_count * input_elem_count_per_entry_list[0]; std::pair<dim3, dim3> kernel_dims = cuda_util::get_grid_and_threadblock_sizes_sequential_access( *cuda_config, elem_count); if (add_update_to_destination) mse_backprop_upd_kernel<true><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], scale * 2.0F, elem_count); else mse_backprop_upd_kernel<false><<<kernel_dims.first, kernel_dims.second, 0, stream_id>>>( *input_errors_buffer, *input_neurons_buffers[input_index], *input_neurons_buffers[1 - input_index], scale * 2.0F, elem_count); } } void mse_layer_updater_cuda::updater_configured() { if (actions.find(layer_action(layer_action::backward_data, 2)) != actions.end()) throw neural_network_exception("mse_layer_updater_cuda cannot do backward propagation for scale mask"); nnforge_shared_ptr<const mse_layer> layer_derived = nnforge_dynamic_pointer_cast<const mse_layer>(layer_schema); scale = layer_derived->scale; } bool mse_layer_updater_cuda::is_backward_data_dependent_on_output_buffer(unsigned int action_input_index) const { return false; } int mse_layer_updater_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
00048bd1fde7524fe298321854e8e327f9a5c076.hip
// !!! This is a file automatically generated by hipify!!! /* Vector Dot Product, using multiple GPUs with OpenMP C = A.B */ #include <stdio.h> #include <stdlib.h> #include <omp.h> // header for OpenMP #include <hip/hip_runtime.h> // Variables float* h_A; // host vectors float* h_B; float* h_C; float* h_G; // to store partial sum by process in each GPU, to prevent race condition // Functions void RandomInit(float*, int); // Device code __global__ void VecDot(const float* A, const float* B, float* C, int N) { extern __shared__ float cache[]; int i = blockDim.x * blockIdx.x + threadIdx.x; int cacheIndex = threadIdx.x; float temp = 0.0; // register for each thread while (i < N) { temp += A[i]*B[i]; i += blockDim.x*gridDim.x; } cache[cacheIndex] = temp; // set the cache value __syncthreads(); // perform parallel reduction, threadsPerBlock must be 2^m int ib = blockDim.x/2; while (ib != 0) { if(cacheIndex < ib) cache[cacheIndex] += cache[cacheIndex + ib]; __syncthreads(); ib /=2; } if(cacheIndex == 0) C[blockIdx.x] = cache[0]; } // Host code int main(void) { printf("Vector Dot Product with multiple GPUs \n"); int N, NGPU, cpu_thread_id=0; int *Dev; long mem = 1024*1024*1024; // 4 Giga for float data type. printf("Enter the number of GPUs: "); scanf("%d", &NGPU); printf("%d\n", NGPU); Dev = (int *)malloc(sizeof(int)*NGPU); int numDev = 0; printf("GPU device number: "); for(int i = 0; i < NGPU; i++) { scanf("%d", &Dev[i]); printf("%d ",Dev[i]); numDev++; if(getchar() == '\n') break; } printf("\n"); if(numDev != NGPU) { fprintf(stderr,"Should input %d GPU device numbers\n", NGPU); exit(1); } printf("Enter the size of the vectors: "); scanf("%d", &N); printf("%d\n", N); if (3*N > mem) { printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n"); exit(1); } // Set the sizes of threads and blocks int threadsPerBlock, m; printf("Enter the number of threads per block (2^m), m : "); scanf("%d", &m); threadsPerBlock = pow(2, m); printf("Block Size = %d\n", threadsPerBlock); if(threadsPerBlock > 1024) { printf("The number of threads per block must be less than 1024 ! \n"); exit(1); } int blocksPerGrid; printf("Enter the number of blocks per grid: "); scanf("%d",&blocksPerGrid); printf("Grid size = %d\n", blocksPerGrid); if(blocksPerGrid > 2147483647) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(1); } long size = N * sizeof(float); int sb = blocksPerGrid * sizeof(float); // Output array from GPU int sm = threadsPerBlock*sizeof(float); // GPU Shared Memory Size // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(sb * NGPU); h_G = (float*)malloc(sizeof(float) * NGPU); if (! h_A || ! h_B || ! h_C) { printf("!!! Not enough memory.\n"); exit(1); } // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); for(int i = 0; i < NGPU; i = i+1){ h_G[i] = 0.0; } // declare cuda event for timer hipEvent_t start, stop; // hipEventCreate(&start); // events must be created after devices are set // hipEventCreate(&stop); float Intime,gputime,Outime; // Set numbers of threads = numbers of GPU omp_set_num_threads(NGPU); // So that "cpu_thread_id" is declared under each threads, and they are independent. // All omp thread do the same code in this block. #pragma omp parallel private(cpu_thread_id) { float *d_A, *d_B, *d_C; cpu_thread_id = omp_get_thread_num(); hipSetDevice(Dev[cpu_thread_id]); // start the timer // And maybe since OpenMP thread id = 0 , start the first (?) // Start the clock here, to see how much time it takes to input array. // And also, we use a thread (here '0') to track the clock. if(cpu_thread_id == 0) { hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); } // Allocate vectors in device memory // N / NGPU should be divisible. hipMalloc((void**)&d_A, size/NGPU); hipMalloc((void**)&d_B, size/NGPU); // Since one threads handles one GPU hipMalloc((void**)&d_C, sb); // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, hipMemcpyHostToDevice); // Wait until all threads come to this step, synchronizes all threads on OpenMP #pragma omp barrier // stop the timer if(cpu_thread_id == 0) { hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &Intime, start, stop); printf("Data input time for GPU: %f (ms) \n",Intime); } // start the timer if(cpu_thread_id == 0) hipEventRecord(start,0); hipLaunchKernelGGL(( VecDot), dim3(blocksPerGrid), dim3(threadsPerBlock), sm, 0, d_A, d_B, d_C, N/NGPU); // Blocks until the device has completed all the preceding requested task. hipDeviceSynchronize(); // stop the timer if(cpu_thread_id == 0) { hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime)); } // Copy result from device memory to host memory // h_C contains the result in host memory // start the timer if(cpu_thread_id == 0){ hipEventRecord(start,0); } hipMemcpy(h_C+blocksPerGrid*cpu_thread_id, d_C, sb, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); for(int i = blocksPerGrid * cpu_thread_id; i < (blocksPerGrid * cpu_thread_id) + blocksPerGrid; i = i+1) { h_G[cpu_thread_id] = h_G[cpu_thread_id] + h_C[i]; } // Wait till OpenMP threads are finish! #pragma omp barrier } // Calculate the final result float DotGPU = 0.0; for(int i = 0; i < NGPU; i = i+1){ DotGPU = DotGPU + h_G[i]; } // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime( &Outime, start, stop); printf("Data output time for GPU: %f (ms) \n",Outime); float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); // Compute Dot Product by CPU // start the timer hipEventRecord(start,0); double DotCPU = 0.0; // compute the reference solution for (int i = 0; i < N; i = i+1) { DotCPU = DotCPU + (double)(h_A[i] * h_B[i]); } // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float cputime; hipEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/gputime_tot); // Destroy timer hipEventDestroy(start); hipEventDestroy(stop); // check result printf("Check result:\n"); printf("DotGPU = %f\n", DotGPU); printf("DotCPU = %f\n", DotCPU); double diff; diff = abs(DotCPU - (double)DotGPU); printf("abs(DotCPU - DotGPU)=%20.15e\n",diff); printf("error = abs(DotCPU - DotGPU) / DotCPU = %20.15e\n", diff / DotCPU); for (int i=0; i < NGPU; i++) { hipSetDevice(Dev[i]); hipDeviceReset(); } // Free all the vectors free(h_A); free(h_B); free(h_C); free(h_G); return 0; } // Allocates an array with random float entries. // From (0, 1) void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
00048bd1fde7524fe298321854e8e327f9a5c076.cu
/* Vector Dot Product, using multiple GPUs with OpenMP C = A.B */ #include <stdio.h> #include <stdlib.h> #include <omp.h> // header for OpenMP #include <cuda_runtime.h> // Variables float* h_A; // host vectors float* h_B; float* h_C; float* h_G; // to store partial sum by process in each GPU, to prevent race condition // Functions void RandomInit(float*, int); // Device code __global__ void VecDot(const float* A, const float* B, float* C, int N) { extern __shared__ float cache[]; int i = blockDim.x * blockIdx.x + threadIdx.x; int cacheIndex = threadIdx.x; float temp = 0.0; // register for each thread while (i < N) { temp += A[i]*B[i]; i += blockDim.x*gridDim.x; } cache[cacheIndex] = temp; // set the cache value __syncthreads(); // perform parallel reduction, threadsPerBlock must be 2^m int ib = blockDim.x/2; while (ib != 0) { if(cacheIndex < ib) cache[cacheIndex] += cache[cacheIndex + ib]; __syncthreads(); ib /=2; } if(cacheIndex == 0) C[blockIdx.x] = cache[0]; } // Host code int main(void) { printf("Vector Dot Product with multiple GPUs \n"); int N, NGPU, cpu_thread_id=0; int *Dev; long mem = 1024*1024*1024; // 4 Giga for float data type. printf("Enter the number of GPUs: "); scanf("%d", &NGPU); printf("%d\n", NGPU); Dev = (int *)malloc(sizeof(int)*NGPU); int numDev = 0; printf("GPU device number: "); for(int i = 0; i < NGPU; i++) { scanf("%d", &Dev[i]); printf("%d ",Dev[i]); numDev++; if(getchar() == '\n') break; } printf("\n"); if(numDev != NGPU) { fprintf(stderr,"Should input %d GPU device numbers\n", NGPU); exit(1); } printf("Enter the size of the vectors: "); scanf("%d", &N); printf("%d\n", N); if (3*N > mem) { printf("The size of these 3 vectors cannot be fitted into 4 Gbyte\n"); exit(1); } // Set the sizes of threads and blocks int threadsPerBlock, m; printf("Enter the number of threads per block (2^m), m : "); scanf("%d", &m); threadsPerBlock = pow(2, m); printf("Block Size = %d\n", threadsPerBlock); if(threadsPerBlock > 1024) { printf("The number of threads per block must be less than 1024 ! \n"); exit(1); } int blocksPerGrid; printf("Enter the number of blocks per grid: "); scanf("%d",&blocksPerGrid); printf("Grid size = %d\n", blocksPerGrid); if(blocksPerGrid > 2147483647) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(1); } long size = N * sizeof(float); int sb = blocksPerGrid * sizeof(float); // Output array from GPU int sm = threadsPerBlock*sizeof(float); // GPU Shared Memory Size // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(sb * NGPU); h_G = (float*)malloc(sizeof(float) * NGPU); if (! h_A || ! h_B || ! h_C) { printf("!!! Not enough memory.\n"); exit(1); } // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); for(int i = 0; i < NGPU; i = i+1){ h_G[i] = 0.0; } // declare cuda event for timer cudaEvent_t start, stop; // cudaEventCreate(&start); // events must be created after devices are set // cudaEventCreate(&stop); float Intime,gputime,Outime; // Set numbers of threads = numbers of GPU omp_set_num_threads(NGPU); // So that "cpu_thread_id" is declared under each threads, and they are independent. // All omp thread do the same code in this block. #pragma omp parallel private(cpu_thread_id) { float *d_A, *d_B, *d_C; cpu_thread_id = omp_get_thread_num(); cudaSetDevice(Dev[cpu_thread_id]); // start the timer // And maybe since OpenMP thread id = 0 , start the first (?) // Start the clock here, to see how much time it takes to input array. // And also, we use a thread (here '0') to track the clock. if(cpu_thread_id == 0) { cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); } // Allocate vectors in device memory // N / NGPU should be divisible. cudaMalloc((void**)&d_A, size/NGPU); cudaMalloc((void**)&d_B, size/NGPU); // Since one threads handles one GPU cudaMalloc((void**)&d_C, sb); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B+N/NGPU*cpu_thread_id, size/NGPU, cudaMemcpyHostToDevice); // Wait until all threads come to this step, synchronizes all threads on OpenMP #pragma omp barrier // stop the timer if(cpu_thread_id == 0) { cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &Intime, start, stop); printf("Data input time for GPU: %f (ms) \n",Intime); } // start the timer if(cpu_thread_id == 0) cudaEventRecord(start,0); VecDot<<<blocksPerGrid, threadsPerBlock, sm>>>(d_A, d_B, d_C, N/NGPU); // Blocks until the device has completed all the preceding requested task. cudaDeviceSynchronize(); // stop the timer if(cpu_thread_id == 0) { cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime)); } // Copy result from device memory to host memory // h_C contains the result in host memory // start the timer if(cpu_thread_id == 0){ cudaEventRecord(start,0); } cudaMemcpy(h_C+blocksPerGrid*cpu_thread_id, d_C, sb, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); for(int i = blocksPerGrid * cpu_thread_id; i < (blocksPerGrid * cpu_thread_id) + blocksPerGrid; i = i+1) { h_G[cpu_thread_id] = h_G[cpu_thread_id] + h_C[i]; } // Wait till OpenMP threads are finish! #pragma omp barrier } // Calculate the final result float DotGPU = 0.0; for(int i = 0; i < NGPU; i = i+1){ DotGPU = DotGPU + h_G[i]; } // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &Outime, start, stop); printf("Data output time for GPU: %f (ms) \n",Outime); float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); // Compute Dot Product by CPU // start the timer cudaEventRecord(start,0); double DotCPU = 0.0; // compute the reference solution for (int i = 0; i < N; i = i+1) { DotCPU = DotCPU + (double)(h_A[i] * h_B[i]); } // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float cputime; cudaEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/gputime_tot); // Destroy timer cudaEventDestroy(start); cudaEventDestroy(stop); // check result printf("Check result:\n"); printf("DotGPU = %f\n", DotGPU); printf("DotCPU = %f\n", DotCPU); double diff; diff = abs(DotCPU - (double)DotGPU); printf("abs(DotCPU - DotGPU)=%20.15e\n",diff); printf("error = abs(DotCPU - DotGPU) / DotCPU = %20.15e\n", diff / DotCPU); for (int i=0; i < NGPU; i++) { cudaSetDevice(Dev[i]); cudaDeviceReset(); } // Free all the vectors free(h_A); free(h_B); free(h_C); free(h_G); return 0; } // Allocates an array with random float entries. // From (0, 1) void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
9c861f4529f621e19f4e2ae09546c25de2011eb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* FLUIDS v.1 - SPH Fluid Simulator for CPU and GPU Copyright (C) 2008. Rama Hoetzlein, http://www.rchoetzlein.com ZLib license This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #define COLLI_DET 0.0001f #ifndef _PARTICLES_KERNEL_H_ #define _PARTICLES_KERNEL_H_ #include <stdio.h> #include "cutil_math.h" #define EPS 0.00001 #include "fluid_system_host.cuh" //#include <thrust/scan.h> #define TOTAL_THREADS 65536 #define BLOCK_THREADS 256 #define MAX_NBR 80 __constant__ FluidParams simData; // simulation data (on device) __device__ int bufNeighbor[ TOTAL_THREADS*MAX_NBR ]; __device__ float bufNdist[ TOTAL_THREADS*MAX_NBR ]; #define COLOR(r,g,b) ( (uint((r)*255.0f)<<24) | (uint((g)*255.0f)<<16) | (uint((b)*255.0f)<<8) ) #define COLORA(r,g,b,a) ( (uint((r)*255.0f)<<24) | (uint((g)*255.0f)<<16) | (uint((b)*255.0f)<<8) | uint((a)*255.0f) ) #define NULL_HASH 333333 #define OFFSET_CLR 12 #define OFFSET_NEXT 16 #define OFFSET_VEL 20 #define OFFSET_VEVAL 32 #define OFFSET_PRESS 48 #define OFFSET_DENS 52 #define OFFSET_FORCE 56 __device__ void mul_Matrix_Vector(float * matrix ,float3 * vector, float3 * result) { result->x=matrix[0]*vector->x+matrix[1]*vector->y+matrix[2]*vector->z; result->y=matrix[3]*vector->x+matrix[4]*vector->y+matrix[5]*vector->z; result->z=matrix[6]*vector->x+matrix[7]*vector->y+matrix[8]*vector->z; } __device__ void compute_matrix_value(float * matrix, float * result) { (*result)=matrix[0]*matrix[4]*matrix[8]+ matrix[1]*matrix[5]*matrix[6]+ matrix[2]*matrix[3]*matrix[7]- matrix[2]*matrix[4]*matrix[6]- matrix[0]*matrix[5]*matrix[7]- matrix[1]*matrix[3]*matrix[8]; } __global__ void computeDensity(float * out_density, float *in_G[], float3 * in_mean_p,char * bufPnts ,int numPnt,int x, int y,int z,float size_voxel)// { uint n = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; float3 min_=make_float3(-25,-20,0); float3 max_=make_float3(25,20,40); uint ndz =n/(x*y); uint ndy = (n-ndz*x*y)/x; uint ndx = (n-ndz*x*y-ndy*x); if(ndx<x&&ndy<y&&ndz<z) { int index=n; float den=0.0; float3 position; position.x=ndx*size_voxel+min_.x; position.y=ndy*size_voxel+min_.y; position.z=ndz*size_voxel+min_.z; for(int i=0;i<numPnt;i++) { char * data=bufPnts + i * simData.stride; float3 posi = *(float3 *)data; float3 dis=(posi-in_mean_p[i])*simData.sim_scale ; float3 Gr; mul_Matrix_Vector(in_G[i], & dis, &Gr); float l=length(Gr); if(l<simData.smooth_rad) { float G; compute_matrix_value(in_G[i],& G); float m=simData.smooth_rad-length(Gr); den+=G*m*m*m*m/(*(float*) (data + OFFSET_DENS)); } } den*=simData.pmass*simData.poly6kern; if(position.z>10) out_density[index]=1; else out_density[index]=0.0; } } __global__ void compute_vertices_number(int * case_number,int * vertices_number, float * color_density, int * d_numVertsTable, int x, int y, int z, float isolevel) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; uint ndz =index/(x*y); uint ndy = (index-ndz*x*y)/x; uint ndx = (index-ndz*x*y-ndy*x); if(ndx<x-1&&ndx>0&&ndy<y-1&&ndx>0&&ndz<z-1&&ndz>0) { //no intersections float color[8]; color[0]=color_density[index]; color[1]=color_density[index+x]; color[2]=color_density[index+x+1]; color[3]=color_density[index+1]; color[4]=color_density[index+x*y]; color[5]=color_density[index+x+x*y]; color[6]=color_density[index+x+1+x*y]; color[7]=color_density[index+x*y]; uint cubeindex=0; if(color[0]<isolevel) cubeindex+=1; if(color[1]<isolevel) cubeindex+=2; if(color[2]<isolevel) cubeindex+=4; if(color[3]<isolevel) cubeindex+=8; if(color[4]<isolevel) cubeindex+=16; if(color[5]<isolevel) cubeindex+=32; if(color[6]<isolevel) cubeindex+=64; if(color[7]<isolevel) cubeindex+=128; case_number[index]=cubeindex; vertices_number[index]=d_numVertsTable[cubeindex]; } } __device__ void computeVetexNormal(int edge, float * color_density, int ** d_edgeVertices, float3 * v,float3 * normals, float3 * normal, float3 * position, float size_voxel, float isolevels) { int x=d_edgeVertices[edge][0]; int y=d_edgeVertices[edge][1]; float density1=color_density[x]; float density2=color_density[y]; float3 normal1=normals[x]; float3 normal2=normals[y]; float3 v1=v[x]; float3 v2=v[y]; if(fabs(density2-isolevels)<EPS) { *normal=normal2; *position=v2; return; } if(abs(density1-isolevels)<EPS) { *normal=normal1; *position=v1; return; } float weight=(isolevels-density2)/(density1-density2); *normal=lerp(normal1,normal2,weight); *position=lerp(v1,v2,weight); } __global__ void marching_cube(float3 * points, float3 * normals, int * d_numVertsTable, int ** d_edgeVertices, int ** d_triTable, float * color_density,int x, int y, int z, float size_voxel, float isolevels, int * vertices_number,int * case_number ) { int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ndz =index/(x*y); int ndy = (index-ndz*x*y)/x; int ndx = (index-ndz*x*y-ndy*x); float3 min_=make_float3(-25,-20,0); //float3 max_=make_float3(25,20,40); if(ndx<x-1&&ndx>0&&ndy<y-1&&ndy>0&&ndz<z-1&&ndz>0) { int cases=case_number[index]; int numberVer=d_numVertsTable[cases]; if(numberVer==0) return; float color[8]; color[0]=color_density[index]; color[1]=color_density[index+x]; color[2]=color_density[index+x+1]; color[3]=color_density[index+1]; color[4]=color_density[index+x*y]; color[5]=color_density[index+x+x*y]; color[6]=color_density[index+x+1+x*y]; color[7]=color_density[index+x*y]; //compute vertices positon and normals float3 v[8]; v[0].x=ndx*size_voxel+min_.x; v[0].y=ndy*size_voxel+min_.y; v[0].z= ndz*size_voxel+min_.z; v[1].x=ndx*size_voxel+min_.x; v[1].y=(ndy+1)*size_voxel+min_.y; v[1].z=ndz*size_voxel+min_.z; v[2].x=(ndx+1)*size_voxel+min_.x; v[2].y=(ndy+1)*size_voxel+min_.y; v[2].z=ndz*size_voxel+min_.z; v[3].x=(ndx+1)*size_voxel+min_.x; v[3].y=ndy*size_voxel+min_.y; v[3].z=ndz*size_voxel+min_.z; v[4].x=ndx*size_voxel+min_.x; v[4].y=ndy*size_voxel+min_.y; v[4].z= (ndz+1)*size_voxel+min_.z; v[5].x=ndx*size_voxel+min_.x; v[5].y=(ndy+1)*size_voxel+min_.y; v[5].z=(ndz+1)*size_voxel+min_.z; v[6].x=(ndx+1)*size_voxel+min_.x; v[6].y=(ndy+1)*size_voxel+min_.y; v[6].z=(ndz+1)*size_voxel+min_.z; v[7].x=(ndx+1)*size_voxel+min_.x; v[7].y=ndy*size_voxel+min_.y; v[7].z=(ndz+1)*size_voxel+min_.z; float3 normal[8]; ///add two columns and rows to the normal[0].x=(color_density[index+1]-color_density[index-1])/size_voxel; normal[0].y=(color_density[index+x]-color_density[index-x])/size_voxel; normal[0].z=(color_density[index+x*y]-color_density[index-x*y])/size_voxel; normal[1].x=(color_density[index+1+x]-color_density[index-1+x])/size_voxel; normal[1].y=(color_density[index+x+x]-color_density[index-x+x])/size_voxel; normal[1].z=(color_density[index+x*y+x]-color_density[index-x*y+x])/size_voxel; normal[2].x=(color_density[index+1+x+1]-color_density[index-1+x+1])/size_voxel; normal[2].y=(color_density[index+x+x+1]-color_density[index-x+x+1])/size_voxel; normal[2].z=(color_density[index+x*y+x+1]-color_density[index-x*y+x+1])/size_voxel; normal[3].x=(color_density[index+1+1]-color_density[index-1+1])/size_voxel; normal[3].y=(color_density[index+x+1]-color_density[index-x+1])/size_voxel; normal[3].z=(color_density[index+x*y+1]-color_density[index-x*y+1])/size_voxel; normal[4].x=(color_density[index+1+x*y]-color_density[index-1+x*y])/size_voxel; normal[4].y=(color_density[index+x+x*y]-color_density[index-x+x*y])/size_voxel; normal[4].z= (color_density[index+x*y+x*y]-color_density[index-x*y+x*y])/size_voxel; normal[5].x=(color_density[index+1+x+x*y]-color_density[index-1+x+x*y])/size_voxel; normal[5].y=(color_density[index+x+x+x*y]-color_density[index-x+x+x*y])/size_voxel; normal[5].z=(color_density[index+x*y+x+x*y]-color_density[index-x*y+x+x*y])/size_voxel; normal[6].x=(color_density[index+1+x+1+x*y]-color_density[index-1+x+1+x*y])/size_voxel; normal[6].y=(color_density[index+x+x+1+x*y]-color_density[index-x+x+1+x*y])/size_voxel; normal[6].z=(color_density[index+x*y+x+1+x*y]-color_density[index-x*y+x+1+x*y])/size_voxel; normal[7].x=(color_density[index+1+1+x*y]-color_density[index-1+1+x*y])/size_voxel; normal[7].y=(color_density[index+x+1+x*y]-color_density[index-x+1+x*y])/size_voxel; normal[7].z=(color_density[index+x*y+1+x*y]-color_density[index-x*y+1+x*y])/size_voxel; //interplate int ver_index=vertices_number[index]; int i=0; while(numberVer!=0) { if(d_triTable[cases][i]!=-1)////// { //computeVetexNormal(d_triTable[cases][i], color, d_edgeVertices, v, normal, &normals[ver_index++], &points[ver_index++], size_voxel, isolevels); computeVetexNormal(d_triTable[cases][i], color, d_edgeVertices, v, normal, &normals[ver_index], &points[ver_index], size_voxel, isolevels); ver_index++; numberVer--; } i++; } } } __device__ void computeGi( float * B) { int m=3,n=3; // particle index float S[3]; float U[9]; float V[9]; float e[3]; float work[3]; int nct = min( m-1, n ); int nrt = max( 0, n-2 ); int i=0, j=0, k=0; for( k=0; k<max(nct,nrt); ++k ) { if( k < nct ) { // Compute the transformation for the k-th column and // place the k-th diagonal in s[k]. // Compute 2-norm of k-th column without under/overflow. S[k] = 0; for( i=k; i<m; ++i ) S[k] = hypot( S[k], B[i*n+k] ); if( S[k] != 0 ) { if( B[k*n+k] < 0 ) S[k] = -S[k]; for( i=k; i<m; ++i ) B[i*n+k] /= S[k]; B[k*n+k] += 1; } S[k] = -S[k]; } for( j=k+1; j<n; ++j ) { if( (k < nct) && ( S[k] != 0 ) ) { // apply the transformation float t = 0; for( i=k; i<m; ++i ) t += B[i*n+k] * B[i*n+j]; t = -t / B[k*n+k]; for( i=k; i<m; ++i ) B[i*n+j] += t*B[i*n+k]; } e[j] = B[k*n+j]; } // Place the transformation in U for subsequent back // multiplication. if( (k < nct) ) for( i=k; i<m; ++i ) U[i*n+k] = B[i*n+k]; if( k < nrt ) { // Compute the k-th row transformation and place the // k-th super-diagonal in e[k]. // Compute 2-norm without under/overflow. e[k] = 0; for( i=k+1; i<n; ++i ) e[k] = hypot( e[k], e[i] ); if( e[k] != 0 ) { if( e[k+1] < 0 ) e[k] = -e[k]; for( i=k+1; i<n; ++i ) e[i] /= e[k]; e[k+1] += 1; } e[k] = -e[k]; if( (k+1 < m) && ( e[k] != 0 ) ) { // apply the transformation for( i=k+1; i<m; ++i ) work[i] = 0; for( j=k+1; j<n; ++j ) for( i=k+1; i<m; ++i ) work[i] += e[j] * B[i*n+j]; for( j=k+1; j<n; ++j ) { float t = -e[j]/e[k+1]; for( i=k+1; i<m; ++i ) B[i*n+j] += t * work[i]; } } // Place the transformation in V for subsequent // back multiplication. for( i=k+1; i<n; ++i ) V[i*n+k] = e[i]; } } // Set up the final bidiagonal matrix or order p. //cout<<B<<endl; int p = n; if( nct < n ) S[nct] = B[nct*n+nct]; if( m < p ) S[p-1] = 0; if( nrt+1 < p ) e[nrt] = B[nrt*n+p-1]; e[p-1] = 0; // if required, generate U for( j=nct; j<n; ++j ) { for( i=0; i<m; ++i ) U[i*n+j] = 0; U[j*n+j] = 1; } for( k=nct-1; k>=0; --k ) if( S[k] != 0 ) { for( j=k+1; j<n; ++j ) { float t = 0; for( i=k; i<m; ++i ) t += U[i*n+k] * U[i*n+j]; t = -t / U[k*n+k]; for( i=k; i<m; ++i ) U[i*n+j] += t * U[i*n+k]; } for( i=k; i<m; ++i ) U[i*n+k] = -U[i*n+k]; U[k*n+k] = 1 + U[k*n+k]; for( i=0; i<k-1; ++i ) U[i*n+k] = 0; } else { for( i=0; i<m; ++i ) U[i*n+k] = 0; U[k*n+k] = 1; } // if required, generate V for( k=n-1; k>=0; --k ) { if( (k < nrt) && ( e[k] != 0 ) ) for( j=k+1; j<n; ++j ) { float t = 0; for( i=k+1; i<n; ++i ) t += V[i*n+k] * V[i*n+j]; t = -t / V[(k+1)*n+k]; for( i=k+1; i<n; ++i ) V[i*n+j] += t * V[i*n+k]; } for( i=0; i<n; ++i ) V[i*n+k] = 0; V[k*n+k] = 1; } int pp = p-1; int iter = 0; double eps = pow( 2.0, -5); while( p > 0 ) { int k = 0; int kase = 0; // Here is where a test for too many iterations would go. // This section of the program inspects for negligible // elements in the s and e arrays. On completion the // variables kase and k are set as follows. // kase = 1 if s(p) and e[k-1] are negligible and k<p // kase = 2 if s(k) is negligible and k<p // kase = 3 if e[k-1] is negligible, k<p, and // s(k), ..., s(p) are not negligible // kase = 4 if e(p-1) is negligible (convergence). for( k=p-2; k>=-1; --k ) { if( k == -1 ) break; if( abs(e[k]) <= eps*( abs(S[k])+abs(S[k+1]) ) ) { e[k] = 0; break; } } if( k == p-2 ) kase = 4; else { int ks; for( ks=p-1; ks>=k; --ks ) { if( ks == k ) break; float t = ( (ks != p) ? abs(e[ks]) : 0 ) + ( (ks != k+1) ? abs(e[ks-1]) : 0 ); if( abs(S[ks]) <= eps*t ) { S[ks] = 0; break; } } if( ks == k ) kase = 3; else if( ks == p-1 ) kase = 1; else { kase = 2; k = ks; } } k++; // Perform the task indicated by kase. switch( kase ) { // deflate negligible s(p) case 1: { float f = e[p-2]; e[p-2] = 0; for( j=p-2; j>=k; --j ) { float t = hypot( S[j], f ); float cs = S[j] / t; float sn = f / t; S[j] = t; if( j != k ) { f = -sn * e[j-1]; e[j-1] = cs * e[j-1]; } for( i=0; i<n; ++i ) { t = cs*V[i*n+j] + sn*V[i*n+p-1]; V[i*n+p-1] = -sn*V[i*n+j] + cs*V[i*n+p-1]; V[i*n+j] = t; } } } break; // split at negligible s(k) case 2: { float f = e[k-1]; e[k-1] = 0; for( j=k; j<p; ++j ) { float t = hypot( S[j], f ); float cs = S[j] / t; float sn = f / t; S[j] = t; f = -sn * e[j]; e[j] = cs * e[j]; for( i=0; i<m; ++i ) { t = cs*U[i*n+j] + sn*U[i*n+k-1]; U[i*n+k-1] = -sn*U[i*n+j] + cs*U[i*n+k-1]; U[i*n+j] = t; } } } break; // perform one qr step case 3: { // calculate the shift float scale = max( max( max( max( abs(S[p-1]), abs(S[p-2]) ), abs(e[p-2]) ), abs(S[k]) ), abs(e[k]) ); float sp = S[p-1] / scale; float spm1 = S[p-2] / scale; float epm1 = e[p-2] / scale; float sk = S[k] / scale; float ek = e[k] / scale; float b = ( (spm1+sp)*(spm1-sp) + epm1*epm1 ) / 2.0; float c = (sp*epm1) * (sp*epm1); float shift = 0; if( ( b != 0 ) || ( c != 0 ) ) { shift = sqrt( b*b+c ); if( b < 0 ) shift = -shift; shift = c / ( b+shift ); } float f = (sk+sp)*(sk-sp) + shift; float g = sk * ek; // chase zeros for( j=k; j<p-1; ++j ) { float t = hypot( f, g ); float cs = f / t; float sn = g / t; if( j != k ) e[j-1] = t; f = cs*S[j] + sn*e[j]; e[j] = cs*e[j] - sn*S[j]; g = sn * S[j+1]; S[j+1] = cs * S[j+1]; //Forward transformation of YT for( i=0; i<n; ++i ) { t = cs*V[i*n+j] + sn*V[i*n+j+1]; V[i*n+j+1] = -sn*V[i*n+j] + cs*V[i*n+j+1]; V[i*n+j] = t; } t = hypot( f, g ); cs = f / t; sn = g / t; S[j] = t; f = cs*e[j] + sn*S[j+1]; S[j+1] = -sn*e[j] + cs*S[j+1]; g = sn * e[j+1]; e[j+1] = cs * e[j+1]; if( ( j < m-1 ) ) for( i=0; i<m; ++i ) { t = cs*U[i*n+j] + sn*U[i*n+j+1]; U[i*n+j+1] = -sn*U[i*n+j] + cs*U[i*n+j+1]; U[i*n+j] = t; } } e[p-2] = f; iter = iter + 1; } break; // convergence case 4: { // Make the singular values positive. if( S[k] <= 0 ) { S[k] = ( S[k] < 0 ) ? -S[k] : 0; for( i=0; i<=pp; ++i ) V[i*n+k] = -V[i*n+k]; } // Order the singular values. while( k < pp ) { if( S[k] >= S[k+1] ) break; float t = S[k]; S[k] = S[k+1]; S[k+1] = t; if(( k < n-1 ) ) for( i=0; i<n; ++i ) { float temp=V[i*n+k]; V[i*n+k]=V[i*n+k+1]; V[i*n+k+1] =temp; }; if( ( k < m-1 ) ) for( i=0; i<m; ++i ) { float temp=U[i*n+k]; U[i*n+k]=U[i*n+k+1]; U[i*n+k+1] =temp; } k++; } iter = 0; p--; } break; } } float kr=4.0; float ks=1400.0; //float kn=.5; for(int i=2;i<3;i++) { S[i]=1/(ks*max(S[i],S[1]/kr)); } for(int i=0;i<9;i++) { B[i]=0.0; } for(int i=0;i<3;i++) { for(int j=0;j<3;j++) { for(int k=0;k<3;k++) { B[i*3+j]+=S[k]*U[i*3+k]*V[k*3+j]; } } } } __global__ void computePressure ( char* bufPnts, int numPnt) // bufPnts = mBuf[0].data; { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if(ndx<numPnt) { float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* posi; char *dat2; //char *length; //Fluid *p, *q; int cnt = 0; float3 dist; double sum, dsq, c; double d,/* d2,*/ mR, mR2; d = simData.sim_scale; mR =simData.smooth_rad; mR2 = mR*mR; sum = 0.0; for(int i = 0; i < numPnt; i ++){ dat2 = bufPnts + i*simData.stride; posi = (float3*)(dat2); if ( pos==posi ) continue; // dist.x = ( pos->x - posi->x)*d; // dist in cm // dist.y = ( pos->y - posi->y)*d; // dist.z = ( pos->z - posi->z)*d; //dist = (pos - posi)*make_float3(d); dist = (*pos - *posi)*d; dsq = dot(dist,dist); if ( mR2 > dsq ) { c = mR2 - dsq; sum += c * c * c; cnt++; //if ( p == m_CurrP ) q->tag = true; } } sum = sum * simData.pmass * simData.poly6kern; if( sum ==0.0) sum = 1.0; *(float*) ((char*)pos + OFFSET_PRESS) = ( sum - simData.rest_dens ) * simData.stiffness; *(float*) ((char*)pos + OFFSET_DENS) = 1.0f / sum; } //} //__syncthreads (); } __global__ void computeForce ( char* bufPnts,int numPnt, float ** out_matrix, float3 * mean_points) { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx < numPnt ) { float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* posi; float3 force; register double termPressure, termVelo, termDist; double /*c,*/ r, /*d, sum,*/ dsq; float3 dist; double mR, mR2; float press = *(float*) ((char*)pos + OFFSET_PRESS); float dens = *(float*) ((char*)pos + OFFSET_DENS); float3 veval = *(float3*) ((char*)pos + OFFSET_VEVAL ); float3 qeval; mR =simData.smooth_rad;// m_Param[SPH_SMOOTHRADIUS]; mR2 = (mR*mR); termVelo = simData.lapkern * simData.visc; force = make_float3(0,0,0); float weight_sum = 0.0; float3 mean_point=make_float3(0,0,0); float weights[2047]; for(int i = 0; i<numPnt;i++) { posi = (float3 *)(bufPnts + i * simData.stride); weights[i]=0.0; if ( pos == posi ) continue; dist = (*pos - *posi)*simData.sim_scale; dsq = dot(dist,dist);//(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( mR2 > dsq ) { r = sqrtf ( dsq ); //Force termPressure = -0.5f * (mR - r) * simData.spikykern * ( press + *(float*)((char*)posi+OFFSET_PRESS)) / r; termDist = (mR - r) * dens * *(float*)((char*)posi+OFFSET_DENS); qeval = *(float3*)((char*)posi+OFFSET_VEVAL); force.x += ( termPressure * dist.x + termVelo * (qeval.x - veval.x) ) * termDist; force.y += ( termPressure * dist.y + termVelo * (qeval.y - veval.y) ) * termDist; force.z += ( termPressure * dist.z + termVelo * (qeval.z - veval.z) ) * termDist; //MeanPoint float weight=1.0-r*r*r/mR/mR/mR; mean_point+=weight*(*posi); weight_sum+=weight; weights[i]=weight; } } *(float3*) ((char*)pos + OFFSET_FORCE ) = force; ///// mean matri if (weight_sum<pow( 2.0, -5)) mean_point=*pos; else mean_point/=weight_sum; mean_points[ndx]=mean_point; float *out=out_matrix[ndx]; // float out[9]; for(int i=0;i<9;i++) { out[i]=0.0; } for(int i=0;i<numPnt;i++) { float3 data = *((float3 *)(bufPnts+__mul24(i, simData.stride))); data=data-mean_point; out[0]+=weights[i]*data.x*data.x; out[1]+=weights[i]*data.x*data.y; out[2]+=weights[i]*data.x*data.z; out[3]+=weights[i]*data.y*data.x; out[4]+=weights[i]*data.y*data.y; out[5]+=weights[i]*data.y*data.z; out[6]+=weights[i]*data.z*data.x; out[7]+=weights[i]*data.z*data.y; out[8]+=weights[i]*data.z*data.z; /*out[0]=data.x; out[1]=data.y; out[2]=data.z; out[3]=data.x; out[4]=data.y; out[5]=data.z; out[6]=data.x; out[7]=data.y; out[8]=data.z;*/ /*out[0]=weights[i]; out[1]=weights[i]; out[2]=weights[i]; out[3]=weights[i]; out[4]=weights[i]; out[5]=weights[i]; out[6]=weights[i]; out[7]=weights[i];*/ } if(weight_sum>pow( 2.0, -5)) { for(int i=0;i<9;i++) { out[i]/=weight_sum; //out[i]=weights[i]; } } ////// computeGi(out); //float * o=out_matrix[ndx]; //for(int i=0;i<9;i++) //{ // o[i]=out[i]; //} } } __global__ void advanceParticles ( char* bufPnts, int numPnt, float dt,float ss,float time ) { // uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index /* if ( ndx < numPnt ) { char *ptnData_start, *ptnData_end; // Fluid* pnt; float3 norm, z, dir, acc, vnext, min, max; double adj; float speedLimit, speedLimit2, simScale, particleRadius; float stiff, damp, speed, diff; speedLimit = simData.speedLimit; speedLimit2 = speedLimit*speedLimit; stiff = simData.stiffness; damp = simData.damp; particleRadius = simData.particleRadius;// m_Param[SPH_PRADIUS]; min = simData.min;//m_Vec[SPH_VOLMIN]; max = simData.max;//m_Vec[SPH_VOLMAX]; simScale = ss;//m_Param[SPH_SIMSCALE]; float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* veval =(float3*) ((char*)pos + OFFSET_VEVAL ); float3* vel = (float3*) ((char*)pos + OFFSET_VEL ); // Compute acceration acc = *(float3*) ((char*)pos + OFFSET_FORCE ); //acc *= simData.pmass; // acc *= (1/m_Param[SPH_PMASS]); // Velocity limiting speed = acc.x*acc.x + acc.y*acc.y + acc.z*acc.z; if ( speed > speedLimit2 ) { acc.x *= speedLimit / sqrt(speed); acc.y *= speedLimit / sqrt(speed); acc.z *= speedLimit / sqrt(speed); } // Boundary Conditions // Z-axis walls diff = 2 * particleRadius - ( pos->z - min.z - (pos->x - min.x) * simData.slope )*simScale; if (diff > COLLI_DET ) { norm = make_float3 ( -simData.slope, 0, 1.0 - simData.slope ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z+veval.z);//norm.Dot ( veval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } diff = 2 * particleRadius - ( max.z - pos->z )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 0, -1 ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } // X-axis walls diff = 2 * particleRadius - ( pos->x - min.x + (sin(time*10.0)-1+(pos->y*0.025)*0.25) * simData.xminSin )*simScale; //diff = 2 * particleRadius - ( pnt->pos.x - min.x + (sin(time*10.0)-1) * m_Param[FORCE_XMIN_SIN] )*simScale; if (diff > COLLI_DET ) { norm = make_float3( 1.0, 0, 0 ); adj = (simData.xminSin + 1) * stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ) ; acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } diff = 2 * particleRadius - ( max.x - pos->x + (sin(time*10.0)-1) * simData.xmaxSin )*simScale; if (diff > COLLI_DET) { norm = make_float3( -1, 0, 0 ); adj = (simData.xmaxSin+1) * stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } // Y-axis walls diff = 2 * particleRadius - ( pos->y - min.y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 1, 0 ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } diff = 2 * particleRadius - ( max.y - pos->y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, -1, 0 ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } // Plane gravity //if ( simData.gravity > 0) acc.z += simData.gravity; // Leapfrog Integration ---------------------------- vnext = acc; vnext.x *= dt; vnext.y *= dt; vnext.z *= dt; vnext.x += vel.x; // v(t+1/2) = v(t-1/2) + a(t) dt vnext.y += vel.y; vnext.z += vel.z; veval = vel; veval += vnext; veval *= 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later vel = vnext; vnext *= dt/simScale; pos += vnext; // p(t+1) = p(t) + v(t+1/2) dt */ /* if ( m_Param[CLR_MODE]==1.0 ) { adj = fabs(vnext.x)+fabs(vnext.y)+fabs(vnext.z) / 7000.0; adj = (adj > 1.0) ? 1.0 : adj; pnt->clr = COLORA( 0, adj, adj, 1 ); } if ( m_Param[CLR_MODE]==2.0 ) { float v = 0.5 + ( pnt->pressure / 1500.0); if ( v < 0.1 ) v = 0.1; if ( v > 1.0 ) v = 1.0; pnt->clr = COLORA ( v, 1-v, 0, 1 ); } */ // Euler integration ------------------------------- /* acc += m_Gravity; acc *= m_DT; pnt->vel += acc; // v(t+1) = v(t) + a(t) dt pnt->vel_eval += acc; pnt->vel_eval *= m_DT/d; pnt->pos += pnt->vel_eval; pnt->vel_eval = pnt->vel; */ /* if ( m_Toggle[WRAP_X] ) { diff = pnt->pos.x - (m_Vec[SPH_VOLMIN].x + 2); // -- Simulates object in center of flow if ( diff <= 0 ) { pnt->pos.x = (m_Vec[SPH_VOLMAX].x - 2) + diff*2; pnt->pos.z = 10; } } } */ // time += m_DT; uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx < numPnt ) { // Get particle vars float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* vel = (float3*) ((char*)pos + OFFSET_VEL ); float3* veval = (float3*) ((char*)pos + OFFSET_VEVAL ); float3 accel = *(float3*) ((char*)pos + OFFSET_FORCE ); float3 vcurr, vnext; float3 norm,/* z, dir,*/ min, max; double adj; float speedLimit, speedLimit2, simScale, particleRadius; float stiff, damp, speed, diff; speedLimit = 200.0;//simData.speedLimit; speedLimit2 = speedLimit*speedLimit; particleRadius = 0.004;//simData.particleRadius; stiff = 10000.0;//simData.stiffness; damp = 256;//simData.damp; simScale = simData.sim_scale; min = /*simData.min;*/make_float3(-25,-20,0); max = /*simData.max;*/make_float3(25,20,40);//simData.max; // Leapfrog integration accel.x *= 0.00020543; // NOTE - To do: SPH_PMASS should be passed in accel.y *= 0.00020543; accel.z *= 0.00020543; speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > speedLimit2 ) { accel.x *= speedLimit/sqrt(speed); accel.y *= speedLimit/sqrt(speed); accel.z *= speedLimit/sqrt(speed); } diff = 2 * particleRadius - ( pos->z - min.z - (pos->x - min.x) *simData.slope )*simScale; if (diff > COLLI_DET ) { norm = make_float3 ( -simData.slope, 0, 1.0 - simData.slope ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( veval ); //accel.x = -accel.x;accel.y=-accel.y;accel.z=-accel.z; accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } diff = 2 * particleRadius - ( max.z - pos->z )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 0, -1 ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } // X-axis walls diff = 2 * particleRadius - ( pos->x - min.x + (sin(time*10.0)-1+(pos->y*0.025)*0.25) * simData.xminSin )*simScale; //diff = 2 * particleRadius - ( pnt->pos.x - min.x + (sin(time*10.0)-1) * m_Param[FORCE_XMIN_SIN] )*simScale; if (diff > COLLI_DET ) { norm = make_float3( 1.0, 0, 0 ); adj = (simData.xminSin + 1) * stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ) ; accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } diff = 2 * particleRadius - ( max.x - pos->x + (sin(time*10.0)-1) * simData.xmaxSin )*simScale; if (diff > COLLI_DET) { norm = make_float3( -1, 0, 0 ); adj = (simData.xmaxSin+1) * stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } // Y-axis walls diff = 2 * particleRadius - ( pos->y - min.y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 1, 0 ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } diff = 2 * particleRadius - ( max.y - pos->y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, -1, 0 ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } accel.z -= 9.8; vcurr = *vel; vnext.x = accel.x*dt+vcurr.x; vnext.y = accel.y*dt+vcurr.y; vnext.z = accel.z*dt+vcurr.z; accel.x = (vcurr.x+vnext.x)*0.5; accel.y = (vcurr.y+vnext.y)*0.5; accel.z = (vcurr.z+vnext.z)*0.5; *veval = accel; *vel =vnext; dt /= simData.sim_scale; vnext.x = pos->x + vnext.x*dt; vnext.y = pos->y + vnext.y*dt; vnext.z = pos->z + vnext.z*dt; *pos = vnext; } /* vcurr = *vel; vnext.x = accel.x*dt + vcurr.x; vnext.y = accel.y*dt + vcurr.y; vnext.z = accel.z*dt + vcurr.z; // v(t+1/2) = v(t-1/2) + a(t) dt accel.x = (vcurr.x + vnext.x) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later accel.y = (vcurr.y + vnext.y) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later accel.z = (vcurr.z + vnext.z) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later *veval = accel; *vel = vnext; dt /= simData.sim_scale; vnext.x = pos->x + vnext.x*dt; vnext.y = pos->y + vnext.y*dt; vnext.z = pos->z + vnext.z*dt; *pos = vnext; // p(t+1) = p(t) + v(t+1/2) dt } */ __syncthreads (); } #endif
9c861f4529f621e19f4e2ae09546c25de2011eb3.cu
/* FLUIDS v.1 - SPH Fluid Simulator for CPU and GPU Copyright (C) 2008. Rama Hoetzlein, http://www.rchoetzlein.com ZLib license This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #define COLLI_DET 0.0001f #ifndef _PARTICLES_KERNEL_H_ #define _PARTICLES_KERNEL_H_ #include <stdio.h> #include "cutil_math.h" #define EPS 0.00001 #include "fluid_system_host.cuh" //#include <thrust/scan.h> #define TOTAL_THREADS 65536 #define BLOCK_THREADS 256 #define MAX_NBR 80 __constant__ FluidParams simData; // simulation data (on device) __device__ int bufNeighbor[ TOTAL_THREADS*MAX_NBR ]; __device__ float bufNdist[ TOTAL_THREADS*MAX_NBR ]; #define COLOR(r,g,b) ( (uint((r)*255.0f)<<24) | (uint((g)*255.0f)<<16) | (uint((b)*255.0f)<<8) ) #define COLORA(r,g,b,a) ( (uint((r)*255.0f)<<24) | (uint((g)*255.0f)<<16) | (uint((b)*255.0f)<<8) | uint((a)*255.0f) ) #define NULL_HASH 333333 #define OFFSET_CLR 12 #define OFFSET_NEXT 16 #define OFFSET_VEL 20 #define OFFSET_VEVAL 32 #define OFFSET_PRESS 48 #define OFFSET_DENS 52 #define OFFSET_FORCE 56 __device__ void mul_Matrix_Vector(float * matrix ,float3 * vector, float3 * result) { result->x=matrix[0]*vector->x+matrix[1]*vector->y+matrix[2]*vector->z; result->y=matrix[3]*vector->x+matrix[4]*vector->y+matrix[5]*vector->z; result->z=matrix[6]*vector->x+matrix[7]*vector->y+matrix[8]*vector->z; } __device__ void compute_matrix_value(float * matrix, float * result) { (*result)=matrix[0]*matrix[4]*matrix[8]+ matrix[1]*matrix[5]*matrix[6]+ matrix[2]*matrix[3]*matrix[7]- matrix[2]*matrix[4]*matrix[6]- matrix[0]*matrix[5]*matrix[7]- matrix[1]*matrix[3]*matrix[8]; } __global__ void computeDensity(float * out_density, float *in_G[], float3 * in_mean_p,char * bufPnts ,int numPnt,int x, int y,int z,float size_voxel)// { uint n = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; float3 min_=make_float3(-25,-20,0); float3 max_=make_float3(25,20,40); uint ndz =n/(x*y); uint ndy = (n-ndz*x*y)/x; uint ndx = (n-ndz*x*y-ndy*x); if(ndx<x&&ndy<y&&ndz<z) { int index=n; float den=0.0; float3 position; position.x=ndx*size_voxel+min_.x; position.y=ndy*size_voxel+min_.y; position.z=ndz*size_voxel+min_.z; for(int i=0;i<numPnt;i++) { char * data=bufPnts + i * simData.stride; float3 posi = *(float3 *)data; float3 dis=(posi-in_mean_p[i])*simData.sim_scale ; float3 Gr; mul_Matrix_Vector(in_G[i], & dis, &Gr); float l=length(Gr); if(l<simData.smooth_rad) { float G; compute_matrix_value(in_G[i],& G); float m=simData.smooth_rad-length(Gr); den+=G*m*m*m*m/(*(float*) (data + OFFSET_DENS)); } } den*=simData.pmass*simData.poly6kern; if(position.z>10) out_density[index]=1; else out_density[index]=0.0; } } __global__ void compute_vertices_number(int * case_number,int * vertices_number, float * color_density, int * d_numVertsTable, int x, int y, int z, float isolevel) { uint index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; uint ndz =index/(x*y); uint ndy = (index-ndz*x*y)/x; uint ndx = (index-ndz*x*y-ndy*x); if(ndx<x-1&&ndx>0&&ndy<y-1&&ndx>0&&ndz<z-1&&ndz>0) { //no intersections float color[8]; color[0]=color_density[index]; color[1]=color_density[index+x]; color[2]=color_density[index+x+1]; color[3]=color_density[index+1]; color[4]=color_density[index+x*y]; color[5]=color_density[index+x+x*y]; color[6]=color_density[index+x+1+x*y]; color[7]=color_density[index+x*y]; uint cubeindex=0; if(color[0]<isolevel) cubeindex+=1; if(color[1]<isolevel) cubeindex+=2; if(color[2]<isolevel) cubeindex+=4; if(color[3]<isolevel) cubeindex+=8; if(color[4]<isolevel) cubeindex+=16; if(color[5]<isolevel) cubeindex+=32; if(color[6]<isolevel) cubeindex+=64; if(color[7]<isolevel) cubeindex+=128; case_number[index]=cubeindex; vertices_number[index]=d_numVertsTable[cubeindex]; } } __device__ void computeVetexNormal(int edge, float * color_density, int ** d_edgeVertices, float3 * v,float3 * normals, float3 * normal, float3 * position, float size_voxel, float isolevels) { int x=d_edgeVertices[edge][0]; int y=d_edgeVertices[edge][1]; float density1=color_density[x]; float density2=color_density[y]; float3 normal1=normals[x]; float3 normal2=normals[y]; float3 v1=v[x]; float3 v2=v[y]; if(fabs(density2-isolevels)<EPS) { *normal=normal2; *position=v2; return; } if(abs(density1-isolevels)<EPS) { *normal=normal1; *position=v1; return; } float weight=(isolevels-density2)/(density1-density2); *normal=lerp(normal1,normal2,weight); *position=lerp(v1,v2,weight); } __global__ void marching_cube(float3 * points, float3 * normals, int * d_numVertsTable, int ** d_edgeVertices, int ** d_triTable, float * color_density,int x, int y, int z, float size_voxel, float isolevels, int * vertices_number,int * case_number ) { int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; int ndz =index/(x*y); int ndy = (index-ndz*x*y)/x; int ndx = (index-ndz*x*y-ndy*x); float3 min_=make_float3(-25,-20,0); //float3 max_=make_float3(25,20,40); if(ndx<x-1&&ndx>0&&ndy<y-1&&ndy>0&&ndz<z-1&&ndz>0) { int cases=case_number[index]; int numberVer=d_numVertsTable[cases]; if(numberVer==0) return; float color[8]; color[0]=color_density[index]; color[1]=color_density[index+x]; color[2]=color_density[index+x+1]; color[3]=color_density[index+1]; color[4]=color_density[index+x*y]; color[5]=color_density[index+x+x*y]; color[6]=color_density[index+x+1+x*y]; color[7]=color_density[index+x*y]; //compute vertices positon and normals float3 v[8]; v[0].x=ndx*size_voxel+min_.x; v[0].y=ndy*size_voxel+min_.y; v[0].z= ndz*size_voxel+min_.z; v[1].x=ndx*size_voxel+min_.x; v[1].y=(ndy+1)*size_voxel+min_.y; v[1].z=ndz*size_voxel+min_.z; v[2].x=(ndx+1)*size_voxel+min_.x; v[2].y=(ndy+1)*size_voxel+min_.y; v[2].z=ndz*size_voxel+min_.z; v[3].x=(ndx+1)*size_voxel+min_.x; v[3].y=ndy*size_voxel+min_.y; v[3].z=ndz*size_voxel+min_.z; v[4].x=ndx*size_voxel+min_.x; v[4].y=ndy*size_voxel+min_.y; v[4].z= (ndz+1)*size_voxel+min_.z; v[5].x=ndx*size_voxel+min_.x; v[5].y=(ndy+1)*size_voxel+min_.y; v[5].z=(ndz+1)*size_voxel+min_.z; v[6].x=(ndx+1)*size_voxel+min_.x; v[6].y=(ndy+1)*size_voxel+min_.y; v[6].z=(ndz+1)*size_voxel+min_.z; v[7].x=(ndx+1)*size_voxel+min_.x; v[7].y=ndy*size_voxel+min_.y; v[7].z=(ndz+1)*size_voxel+min_.z; float3 normal[8]; ///add two columns and rows to the normal[0].x=(color_density[index+1]-color_density[index-1])/size_voxel; normal[0].y=(color_density[index+x]-color_density[index-x])/size_voxel; normal[0].z=(color_density[index+x*y]-color_density[index-x*y])/size_voxel; normal[1].x=(color_density[index+1+x]-color_density[index-1+x])/size_voxel; normal[1].y=(color_density[index+x+x]-color_density[index-x+x])/size_voxel; normal[1].z=(color_density[index+x*y+x]-color_density[index-x*y+x])/size_voxel; normal[2].x=(color_density[index+1+x+1]-color_density[index-1+x+1])/size_voxel; normal[2].y=(color_density[index+x+x+1]-color_density[index-x+x+1])/size_voxel; normal[2].z=(color_density[index+x*y+x+1]-color_density[index-x*y+x+1])/size_voxel; normal[3].x=(color_density[index+1+1]-color_density[index-1+1])/size_voxel; normal[3].y=(color_density[index+x+1]-color_density[index-x+1])/size_voxel; normal[3].z=(color_density[index+x*y+1]-color_density[index-x*y+1])/size_voxel; normal[4].x=(color_density[index+1+x*y]-color_density[index-1+x*y])/size_voxel; normal[4].y=(color_density[index+x+x*y]-color_density[index-x+x*y])/size_voxel; normal[4].z= (color_density[index+x*y+x*y]-color_density[index-x*y+x*y])/size_voxel; normal[5].x=(color_density[index+1+x+x*y]-color_density[index-1+x+x*y])/size_voxel; normal[5].y=(color_density[index+x+x+x*y]-color_density[index-x+x+x*y])/size_voxel; normal[5].z=(color_density[index+x*y+x+x*y]-color_density[index-x*y+x+x*y])/size_voxel; normal[6].x=(color_density[index+1+x+1+x*y]-color_density[index-1+x+1+x*y])/size_voxel; normal[6].y=(color_density[index+x+x+1+x*y]-color_density[index-x+x+1+x*y])/size_voxel; normal[6].z=(color_density[index+x*y+x+1+x*y]-color_density[index-x*y+x+1+x*y])/size_voxel; normal[7].x=(color_density[index+1+1+x*y]-color_density[index-1+1+x*y])/size_voxel; normal[7].y=(color_density[index+x+1+x*y]-color_density[index-x+1+x*y])/size_voxel; normal[7].z=(color_density[index+x*y+1+x*y]-color_density[index-x*y+1+x*y])/size_voxel; //interplate int ver_index=vertices_number[index]; int i=0; while(numberVer!=0) { if(d_triTable[cases][i]!=-1)////// { //computeVetexNormal(d_triTable[cases][i], color, d_edgeVertices, v, normal, &normals[ver_index++], &points[ver_index++], size_voxel, isolevels); computeVetexNormal(d_triTable[cases][i], color, d_edgeVertices, v, normal, &normals[ver_index], &points[ver_index], size_voxel, isolevels); ver_index++; numberVer--; } i++; } } } __device__ void computeGi( float * B) { int m=3,n=3; // particle index float S[3]; float U[9]; float V[9]; float e[3]; float work[3]; int nct = min( m-1, n ); int nrt = max( 0, n-2 ); int i=0, j=0, k=0; for( k=0; k<max(nct,nrt); ++k ) { if( k < nct ) { // Compute the transformation for the k-th column and // place the k-th diagonal in s[k]. // Compute 2-norm of k-th column without under/overflow. S[k] = 0; for( i=k; i<m; ++i ) S[k] = hypot( S[k], B[i*n+k] ); if( S[k] != 0 ) { if( B[k*n+k] < 0 ) S[k] = -S[k]; for( i=k; i<m; ++i ) B[i*n+k] /= S[k]; B[k*n+k] += 1; } S[k] = -S[k]; } for( j=k+1; j<n; ++j ) { if( (k < nct) && ( S[k] != 0 ) ) { // apply the transformation float t = 0; for( i=k; i<m; ++i ) t += B[i*n+k] * B[i*n+j]; t = -t / B[k*n+k]; for( i=k; i<m; ++i ) B[i*n+j] += t*B[i*n+k]; } e[j] = B[k*n+j]; } // Place the transformation in U for subsequent back // multiplication. if( (k < nct) ) for( i=k; i<m; ++i ) U[i*n+k] = B[i*n+k]; if( k < nrt ) { // Compute the k-th row transformation and place the // k-th super-diagonal in e[k]. // Compute 2-norm without under/overflow. e[k] = 0; for( i=k+1; i<n; ++i ) e[k] = hypot( e[k], e[i] ); if( e[k] != 0 ) { if( e[k+1] < 0 ) e[k] = -e[k]; for( i=k+1; i<n; ++i ) e[i] /= e[k]; e[k+1] += 1; } e[k] = -e[k]; if( (k+1 < m) && ( e[k] != 0 ) ) { // apply the transformation for( i=k+1; i<m; ++i ) work[i] = 0; for( j=k+1; j<n; ++j ) for( i=k+1; i<m; ++i ) work[i] += e[j] * B[i*n+j]; for( j=k+1; j<n; ++j ) { float t = -e[j]/e[k+1]; for( i=k+1; i<m; ++i ) B[i*n+j] += t * work[i]; } } // Place the transformation in V for subsequent // back multiplication. for( i=k+1; i<n; ++i ) V[i*n+k] = e[i]; } } // Set up the final bidiagonal matrix or order p. //cout<<B<<endl; int p = n; if( nct < n ) S[nct] = B[nct*n+nct]; if( m < p ) S[p-1] = 0; if( nrt+1 < p ) e[nrt] = B[nrt*n+p-1]; e[p-1] = 0; // if required, generate U for( j=nct; j<n; ++j ) { for( i=0; i<m; ++i ) U[i*n+j] = 0; U[j*n+j] = 1; } for( k=nct-1; k>=0; --k ) if( S[k] != 0 ) { for( j=k+1; j<n; ++j ) { float t = 0; for( i=k; i<m; ++i ) t += U[i*n+k] * U[i*n+j]; t = -t / U[k*n+k]; for( i=k; i<m; ++i ) U[i*n+j] += t * U[i*n+k]; } for( i=k; i<m; ++i ) U[i*n+k] = -U[i*n+k]; U[k*n+k] = 1 + U[k*n+k]; for( i=0; i<k-1; ++i ) U[i*n+k] = 0; } else { for( i=0; i<m; ++i ) U[i*n+k] = 0; U[k*n+k] = 1; } // if required, generate V for( k=n-1; k>=0; --k ) { if( (k < nrt) && ( e[k] != 0 ) ) for( j=k+1; j<n; ++j ) { float t = 0; for( i=k+1; i<n; ++i ) t += V[i*n+k] * V[i*n+j]; t = -t / V[(k+1)*n+k]; for( i=k+1; i<n; ++i ) V[i*n+j] += t * V[i*n+k]; } for( i=0; i<n; ++i ) V[i*n+k] = 0; V[k*n+k] = 1; } int pp = p-1; int iter = 0; double eps = pow( 2.0, -5); while( p > 0 ) { int k = 0; int kase = 0; // Here is where a test for too many iterations would go. // This section of the program inspects for negligible // elements in the s and e arrays. On completion the // variables kase and k are set as follows. // kase = 1 if s(p) and e[k-1] are negligible and k<p // kase = 2 if s(k) is negligible and k<p // kase = 3 if e[k-1] is negligible, k<p, and // s(k), ..., s(p) are not negligible // kase = 4 if e(p-1) is negligible (convergence). for( k=p-2; k>=-1; --k ) { if( k == -1 ) break; if( abs(e[k]) <= eps*( abs(S[k])+abs(S[k+1]) ) ) { e[k] = 0; break; } } if( k == p-2 ) kase = 4; else { int ks; for( ks=p-1; ks>=k; --ks ) { if( ks == k ) break; float t = ( (ks != p) ? abs(e[ks]) : 0 ) + ( (ks != k+1) ? abs(e[ks-1]) : 0 ); if( abs(S[ks]) <= eps*t ) { S[ks] = 0; break; } } if( ks == k ) kase = 3; else if( ks == p-1 ) kase = 1; else { kase = 2; k = ks; } } k++; // Perform the task indicated by kase. switch( kase ) { // deflate negligible s(p) case 1: { float f = e[p-2]; e[p-2] = 0; for( j=p-2; j>=k; --j ) { float t = hypot( S[j], f ); float cs = S[j] / t; float sn = f / t; S[j] = t; if( j != k ) { f = -sn * e[j-1]; e[j-1] = cs * e[j-1]; } for( i=0; i<n; ++i ) { t = cs*V[i*n+j] + sn*V[i*n+p-1]; V[i*n+p-1] = -sn*V[i*n+j] + cs*V[i*n+p-1]; V[i*n+j] = t; } } } break; // split at negligible s(k) case 2: { float f = e[k-1]; e[k-1] = 0; for( j=k; j<p; ++j ) { float t = hypot( S[j], f ); float cs = S[j] / t; float sn = f / t; S[j] = t; f = -sn * e[j]; e[j] = cs * e[j]; for( i=0; i<m; ++i ) { t = cs*U[i*n+j] + sn*U[i*n+k-1]; U[i*n+k-1] = -sn*U[i*n+j] + cs*U[i*n+k-1]; U[i*n+j] = t; } } } break; // perform one qr step case 3: { // calculate the shift float scale = max( max( max( max( abs(S[p-1]), abs(S[p-2]) ), abs(e[p-2]) ), abs(S[k]) ), abs(e[k]) ); float sp = S[p-1] / scale; float spm1 = S[p-2] / scale; float epm1 = e[p-2] / scale; float sk = S[k] / scale; float ek = e[k] / scale; float b = ( (spm1+sp)*(spm1-sp) + epm1*epm1 ) / 2.0; float c = (sp*epm1) * (sp*epm1); float shift = 0; if( ( b != 0 ) || ( c != 0 ) ) { shift = sqrt( b*b+c ); if( b < 0 ) shift = -shift; shift = c / ( b+shift ); } float f = (sk+sp)*(sk-sp) + shift; float g = sk * ek; // chase zeros for( j=k; j<p-1; ++j ) { float t = hypot( f, g ); float cs = f / t; float sn = g / t; if( j != k ) e[j-1] = t; f = cs*S[j] + sn*e[j]; e[j] = cs*e[j] - sn*S[j]; g = sn * S[j+1]; S[j+1] = cs * S[j+1]; //Forward transformation of YT for( i=0; i<n; ++i ) { t = cs*V[i*n+j] + sn*V[i*n+j+1]; V[i*n+j+1] = -sn*V[i*n+j] + cs*V[i*n+j+1]; V[i*n+j] = t; } t = hypot( f, g ); cs = f / t; sn = g / t; S[j] = t; f = cs*e[j] + sn*S[j+1]; S[j+1] = -sn*e[j] + cs*S[j+1]; g = sn * e[j+1]; e[j+1] = cs * e[j+1]; if( ( j < m-1 ) ) for( i=0; i<m; ++i ) { t = cs*U[i*n+j] + sn*U[i*n+j+1]; U[i*n+j+1] = -sn*U[i*n+j] + cs*U[i*n+j+1]; U[i*n+j] = t; } } e[p-2] = f; iter = iter + 1; } break; // convergence case 4: { // Make the singular values positive. if( S[k] <= 0 ) { S[k] = ( S[k] < 0 ) ? -S[k] : 0; for( i=0; i<=pp; ++i ) V[i*n+k] = -V[i*n+k]; } // Order the singular values. while( k < pp ) { if( S[k] >= S[k+1] ) break; float t = S[k]; S[k] = S[k+1]; S[k+1] = t; if(( k < n-1 ) ) for( i=0; i<n; ++i ) { float temp=V[i*n+k]; V[i*n+k]=V[i*n+k+1]; V[i*n+k+1] =temp; }; if( ( k < m-1 ) ) for( i=0; i<m; ++i ) { float temp=U[i*n+k]; U[i*n+k]=U[i*n+k+1]; U[i*n+k+1] =temp; } k++; } iter = 0; p--; } break; } } float kr=4.0; float ks=1400.0; //float kn=.5; for(int i=2;i<3;i++) { S[i]=1/(ks*max(S[i],S[1]/kr)); } for(int i=0;i<9;i++) { B[i]=0.0; } for(int i=0;i<3;i++) { for(int j=0;j<3;j++) { for(int k=0;k<3;k++) { B[i*3+j]+=S[k]*U[i*3+k]*V[k*3+j]; } } } } __global__ void computePressure ( char* bufPnts, int numPnt) // bufPnts = mBuf[0].data; { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if(ndx<numPnt) { float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* posi; char *dat2; //char *length; //Fluid *p, *q; int cnt = 0; float3 dist; double sum, dsq, c; double d,/* d2,*/ mR, mR2; d = simData.sim_scale; mR =simData.smooth_rad; mR2 = mR*mR; sum = 0.0; for(int i = 0; i < numPnt; i ++){ dat2 = bufPnts + i*simData.stride; posi = (float3*)(dat2); if ( pos==posi ) continue; // dist.x = ( pos->x - posi->x)*d; // dist in cm // dist.y = ( pos->y - posi->y)*d; // dist.z = ( pos->z - posi->z)*d; //dist = (pos - posi)*make_float3(d); dist = (*pos - *posi)*d; dsq = dot(dist,dist); if ( mR2 > dsq ) { c = mR2 - dsq; sum += c * c * c; cnt++; //if ( p == m_CurrP ) q->tag = true; } } sum = sum * simData.pmass * simData.poly6kern; if( sum ==0.0) sum = 1.0; *(float*) ((char*)pos + OFFSET_PRESS) = ( sum - simData.rest_dens ) * simData.stiffness; *(float*) ((char*)pos + OFFSET_DENS) = 1.0f / sum; } //} //__syncthreads (); } __global__ void computeForce ( char* bufPnts,int numPnt, float ** out_matrix, float3 * mean_points) { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx < numPnt ) { float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* posi; float3 force; register double termPressure, termVelo, termDist; double /*c,*/ r, /*d, sum,*/ dsq; float3 dist; double mR, mR2; float press = *(float*) ((char*)pos + OFFSET_PRESS); float dens = *(float*) ((char*)pos + OFFSET_DENS); float3 veval = *(float3*) ((char*)pos + OFFSET_VEVAL ); float3 qeval; mR =simData.smooth_rad;// m_Param[SPH_SMOOTHRADIUS]; mR2 = (mR*mR); termVelo = simData.lapkern * simData.visc; force = make_float3(0,0,0); float weight_sum = 0.0; float3 mean_point=make_float3(0,0,0); float weights[2047]; for(int i = 0; i<numPnt;i++) { posi = (float3 *)(bufPnts + i * simData.stride); weights[i]=0.0; if ( pos == posi ) continue; dist = (*pos - *posi)*simData.sim_scale; dsq = dot(dist,dist);//(dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( mR2 > dsq ) { r = sqrtf ( dsq ); //Force termPressure = -0.5f * (mR - r) * simData.spikykern * ( press + *(float*)((char*)posi+OFFSET_PRESS)) / r; termDist = (mR - r) * dens * *(float*)((char*)posi+OFFSET_DENS); qeval = *(float3*)((char*)posi+OFFSET_VEVAL); force.x += ( termPressure * dist.x + termVelo * (qeval.x - veval.x) ) * termDist; force.y += ( termPressure * dist.y + termVelo * (qeval.y - veval.y) ) * termDist; force.z += ( termPressure * dist.z + termVelo * (qeval.z - veval.z) ) * termDist; //MeanPoint float weight=1.0-r*r*r/mR/mR/mR; mean_point+=weight*(*posi); weight_sum+=weight; weights[i]=weight; } } *(float3*) ((char*)pos + OFFSET_FORCE ) = force; ///// mean matri if (weight_sum<pow( 2.0, -5)) mean_point=*pos; else mean_point/=weight_sum; mean_points[ndx]=mean_point; float *out=out_matrix[ndx]; // float out[9]; for(int i=0;i<9;i++) { out[i]=0.0; } for(int i=0;i<numPnt;i++) { float3 data = *((float3 *)(bufPnts+__mul24(i, simData.stride))); data=data-mean_point; out[0]+=weights[i]*data.x*data.x; out[1]+=weights[i]*data.x*data.y; out[2]+=weights[i]*data.x*data.z; out[3]+=weights[i]*data.y*data.x; out[4]+=weights[i]*data.y*data.y; out[5]+=weights[i]*data.y*data.z; out[6]+=weights[i]*data.z*data.x; out[7]+=weights[i]*data.z*data.y; out[8]+=weights[i]*data.z*data.z; /*out[0]=data.x; out[1]=data.y; out[2]=data.z; out[3]=data.x; out[4]=data.y; out[5]=data.z; out[6]=data.x; out[7]=data.y; out[8]=data.z;*/ /*out[0]=weights[i]; out[1]=weights[i]; out[2]=weights[i]; out[3]=weights[i]; out[4]=weights[i]; out[5]=weights[i]; out[6]=weights[i]; out[7]=weights[i];*/ } if(weight_sum>pow( 2.0, -5)) { for(int i=0;i<9;i++) { out[i]/=weight_sum; //out[i]=weights[i]; } } ////// computeGi(out); //float * o=out_matrix[ndx]; //for(int i=0;i<9;i++) //{ // o[i]=out[i]; //} } } __global__ void advanceParticles ( char* bufPnts, int numPnt, float dt,float ss,float time ) { // uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index /* if ( ndx < numPnt ) { char *ptnData_start, *ptnData_end; // Fluid* pnt; float3 norm, z, dir, acc, vnext, min, max; double adj; float speedLimit, speedLimit2, simScale, particleRadius; float stiff, damp, speed, diff; speedLimit = simData.speedLimit; speedLimit2 = speedLimit*speedLimit; stiff = simData.stiffness; damp = simData.damp; particleRadius = simData.particleRadius;// m_Param[SPH_PRADIUS]; min = simData.min;//m_Vec[SPH_VOLMIN]; max = simData.max;//m_Vec[SPH_VOLMAX]; simScale = ss;//m_Param[SPH_SIMSCALE]; float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* veval =(float3*) ((char*)pos + OFFSET_VEVAL ); float3* vel = (float3*) ((char*)pos + OFFSET_VEL ); // Compute acceration acc = *(float3*) ((char*)pos + OFFSET_FORCE ); //acc *= simData.pmass; // acc *= (1/m_Param[SPH_PMASS]); // Velocity limiting speed = acc.x*acc.x + acc.y*acc.y + acc.z*acc.z; if ( speed > speedLimit2 ) { acc.x *= speedLimit / sqrt(speed); acc.y *= speedLimit / sqrt(speed); acc.z *= speedLimit / sqrt(speed); } // Boundary Conditions // Z-axis walls diff = 2 * particleRadius - ( pos->z - min.z - (pos->x - min.x) * simData.slope )*simScale; if (diff > COLLI_DET ) { norm = make_float3 ( -simData.slope, 0, 1.0 - simData.slope ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z+veval.z);//norm.Dot ( veval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } diff = 2 * particleRadius - ( max.z - pos->z )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 0, -1 ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } // X-axis walls diff = 2 * particleRadius - ( pos->x - min.x + (sin(time*10.0)-1+(pos->y*0.025)*0.25) * simData.xminSin )*simScale; //diff = 2 * particleRadius - ( pnt->pos.x - min.x + (sin(time*10.0)-1) * m_Param[FORCE_XMIN_SIN] )*simScale; if (diff > COLLI_DET ) { norm = make_float3( 1.0, 0, 0 ); adj = (simData.xminSin + 1) * stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ) ; acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } diff = 2 * particleRadius - ( max.x - pos->x + (sin(time*10.0)-1) * simData.xmaxSin )*simScale; if (diff > COLLI_DET) { norm = make_float3( -1, 0, 0 ); adj = (simData.xmaxSin+1) * stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } // Y-axis walls diff = 2 * particleRadius - ( pos->y - min.y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 1, 0 ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } diff = 2 * particleRadius - ( max.y - pos->y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, -1, 0 ); adj = stiff * diff - damp * (norm.x*veval.x+norm.y*veval.y+norm.z*veval.z);//norm.Dot ( pnt->vel_eval ); acc.x += adj * norm.x; acc.y += adj * norm.y; acc.z += adj * norm.z; } // Plane gravity //if ( simData.gravity > 0) acc.z += simData.gravity; // Leapfrog Integration ---------------------------- vnext = acc; vnext.x *= dt; vnext.y *= dt; vnext.z *= dt; vnext.x += vel.x; // v(t+1/2) = v(t-1/2) + a(t) dt vnext.y += vel.y; vnext.z += vel.z; veval = vel; veval += vnext; veval *= 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later vel = vnext; vnext *= dt/simScale; pos += vnext; // p(t+1) = p(t) + v(t+1/2) dt */ /* if ( m_Param[CLR_MODE]==1.0 ) { adj = fabs(vnext.x)+fabs(vnext.y)+fabs(vnext.z) / 7000.0; adj = (adj > 1.0) ? 1.0 : adj; pnt->clr = COLORA( 0, adj, adj, 1 ); } if ( m_Param[CLR_MODE]==2.0 ) { float v = 0.5 + ( pnt->pressure / 1500.0); if ( v < 0.1 ) v = 0.1; if ( v > 1.0 ) v = 1.0; pnt->clr = COLORA ( v, 1-v, 0, 1 ); } */ // Euler integration ------------------------------- /* acc += m_Gravity; acc *= m_DT; pnt->vel += acc; // v(t+1) = v(t) + a(t) dt pnt->vel_eval += acc; pnt->vel_eval *= m_DT/d; pnt->pos += pnt->vel_eval; pnt->vel_eval = pnt->vel; */ /* if ( m_Toggle[WRAP_X] ) { diff = pnt->pos.x - (m_Vec[SPH_VOLMIN].x + 2); // -- Simulates object in center of flow if ( diff <= 0 ) { pnt->pos.x = (m_Vec[SPH_VOLMAX].x - 2) + diff*2; pnt->pos.z = 10; } } } */ // time += m_DT; uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx < numPnt ) { // Get particle vars float3* pos = (float3*) (bufPnts + __mul24(ndx, simData.stride)); float3* vel = (float3*) ((char*)pos + OFFSET_VEL ); float3* veval = (float3*) ((char*)pos + OFFSET_VEVAL ); float3 accel = *(float3*) ((char*)pos + OFFSET_FORCE ); float3 vcurr, vnext; float3 norm,/* z, dir,*/ min, max; double adj; float speedLimit, speedLimit2, simScale, particleRadius; float stiff, damp, speed, diff; speedLimit = 200.0;//simData.speedLimit; speedLimit2 = speedLimit*speedLimit; particleRadius = 0.004;//simData.particleRadius; stiff = 10000.0;//simData.stiffness; damp = 256;//simData.damp; simScale = simData.sim_scale; min = /*simData.min;*/make_float3(-25,-20,0); max = /*simData.max;*/make_float3(25,20,40);//simData.max; // Leapfrog integration accel.x *= 0.00020543; // NOTE - To do: SPH_PMASS should be passed in accel.y *= 0.00020543; accel.z *= 0.00020543; speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > speedLimit2 ) { accel.x *= speedLimit/sqrt(speed); accel.y *= speedLimit/sqrt(speed); accel.z *= speedLimit/sqrt(speed); } diff = 2 * particleRadius - ( pos->z - min.z - (pos->x - min.x) *simData.slope )*simScale; if (diff > COLLI_DET ) { norm = make_float3 ( -simData.slope, 0, 1.0 - simData.slope ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( veval ); //accel.x = -accel.x;accel.y=-accel.y;accel.z=-accel.z; accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } diff = 2 * particleRadius - ( max.z - pos->z )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 0, -1 ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } // X-axis walls diff = 2 * particleRadius - ( pos->x - min.x + (sin(time*10.0)-1+(pos->y*0.025)*0.25) * simData.xminSin )*simScale; //diff = 2 * particleRadius - ( pnt->pos.x - min.x + (sin(time*10.0)-1) * m_Param[FORCE_XMIN_SIN] )*simScale; if (diff > COLLI_DET ) { norm = make_float3( 1.0, 0, 0 ); adj = (simData.xminSin + 1) * stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ) ; accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } diff = 2 * particleRadius - ( max.x - pos->x + (sin(time*10.0)-1) * simData.xmaxSin )*simScale; if (diff > COLLI_DET) { norm = make_float3( -1, 0, 0 ); adj = (simData.xmaxSin+1) * stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } // Y-axis walls diff = 2 * particleRadius - ( pos->y - min.y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, 1, 0 ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } diff = 2 * particleRadius - ( max.y - pos->y )*simScale; if (diff > COLLI_DET) { norm = make_float3 ( 0, -1, 0 ); adj = stiff * diff - damp * (norm.x*veval->x+norm.y*veval->y+norm.z*veval->z);//norm.Dot ( pnt->vel_eval ); accel.x += adj * norm.x; accel.y += adj * norm.y; accel.z += adj * norm.z; } accel.z -= 9.8; vcurr = *vel; vnext.x = accel.x*dt+vcurr.x; vnext.y = accel.y*dt+vcurr.y; vnext.z = accel.z*dt+vcurr.z; accel.x = (vcurr.x+vnext.x)*0.5; accel.y = (vcurr.y+vnext.y)*0.5; accel.z = (vcurr.z+vnext.z)*0.5; *veval = accel; *vel =vnext; dt /= simData.sim_scale; vnext.x = pos->x + vnext.x*dt; vnext.y = pos->y + vnext.y*dt; vnext.z = pos->z + vnext.z*dt; *pos = vnext; } /* vcurr = *vel; vnext.x = accel.x*dt + vcurr.x; vnext.y = accel.y*dt + vcurr.y; vnext.z = accel.z*dt + vcurr.z; // v(t+1/2) = v(t-1/2) + a(t) dt accel.x = (vcurr.x + vnext.x) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later accel.y = (vcurr.y + vnext.y) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later accel.z = (vcurr.z + vnext.z) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 used to compute forces later *veval = accel; *vel = vnext; dt /= simData.sim_scale; vnext.x = pos->x + vnext.x*dt; vnext.y = pos->y + vnext.y*dt; vnext.z = pos->z + vnext.z*dt; *pos = vnext; // p(t+1) = p(t) + v(t+1/2) dt } */ __syncthreads (); } #endif
11424f50ea5514e5aa4a89d501e12816d273d4fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void kernel(int* a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // a[idx] = 7; a[idx] = blockIdx.x; // a[idx] = threadIdx.x; } int main() { int dc = -1; hipGetDeviceCount(&dc); printf("%d device(s) present\n", dc); int cd = -1; hipGetDevice(&cd); printf("device id used: %d\n", cd); int dimx = 32; int num_bytes = dimx * sizeof(int); int *d_a = 0, *h_a = 0; h_a = (int*)malloc(num_bytes); int rv = hipMalloc((void**)&d_a, num_bytes); if (0==h_a) { printf("couldn't allocate host memory\n"); } if (0==d_a) { printf("couldn't allocate device memory\n"); } hipMemset(d_a, 0, num_bytes); hipMemcpy(d_a, h_a, num_bytes, hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( kernel), dim3(8), dim3(8), 0, 0, d_a); hipEventRecord(stop, 0); hipEventSynchronize(stop); float et; hipEventElapsedTime(&et, start, stop); hipEventDestroy(start); hipEventDestroy(stop); printf("kernel execution time: %8.6fms\n", et); hipMemcpy(h_a, d_a, num_bytes, hipMemcpyDeviceToHost); for(int i=0; i<dimx; i++) { printf("%d ", h_a[i]); } printf("\n"); free(h_a); hipFree(d_a); return 0; }
11424f50ea5514e5aa4a89d501e12816d273d4fc.cu
#include <stdio.h> __global__ void kernel(int* a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // a[idx] = 7; a[idx] = blockIdx.x; // a[idx] = threadIdx.x; } int main() { int dc = -1; cudaGetDeviceCount(&dc); printf("%d device(s) present\n", dc); int cd = -1; cudaGetDevice(&cd); printf("device id used: %d\n", cd); int dimx = 32; int num_bytes = dimx * sizeof(int); int *d_a = 0, *h_a = 0; h_a = (int*)malloc(num_bytes); int rv = cudaMalloc((void**)&d_a, num_bytes); if (0==h_a) { printf("couldn't allocate host memory\n"); } if (0==d_a) { printf("couldn't allocate device memory\n"); } cudaMemset(d_a, 0, num_bytes); cudaMemcpy(d_a, h_a, num_bytes, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel<<<8, 8>>>(d_a); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float et; cudaEventElapsedTime(&et, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("kernel execution time: %8.6fms\n", et); cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost); for(int i=0; i<dimx; i++) { printf("%d ", h_a[i]); } printf("\n"); free(h_a); cudaFree(d_a); return 0; }
b80031f4077a7b102281aeea80b6a918ac1ce8b2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Initialize_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; unsigned int *randoms = NULL; hipMalloc(&randoms, XSIZE*YSIZE); int *bestSeen = NULL; hipMalloc(&bestSeen, XSIZE*YSIZE); int *origin = NULL; hipMalloc(&origin, XSIZE*YSIZE); int *mis = NULL; hipMalloc(&mis, XSIZE*YSIZE); int *incomplete = NULL; hipMalloc(&incomplete, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Initialize_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin,mis,incomplete); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Initialize_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin,mis,incomplete); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Initialize_Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, size,randoms,bestSeen,origin,mis,incomplete); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b80031f4077a7b102281aeea80b6a918ac1ce8b2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Initialize_Kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int size = XSIZE*YSIZE; unsigned int *randoms = NULL; cudaMalloc(&randoms, XSIZE*YSIZE); int *bestSeen = NULL; cudaMalloc(&bestSeen, XSIZE*YSIZE); int *origin = NULL; cudaMalloc(&origin, XSIZE*YSIZE); int *mis = NULL; cudaMalloc(&mis, XSIZE*YSIZE); int *incomplete = NULL; cudaMalloc(&incomplete, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Initialize_Kernel<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin,mis,incomplete); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Initialize_Kernel<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin,mis,incomplete); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Initialize_Kernel<<<gridBlock,threadBlock>>>(size,randoms,bestSeen,origin,mis,incomplete); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1532243510b542008ee6b91494a5db08ea68d8e7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hip/hip_runtime.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <cudf/utilities/error.hpp> #include "nvstrings/NVStrings.h" #include "../custring_view.cuh" #include "../util.h" #include "./NVStringsImpl.h" // remove the target characters from the beginning of each string NVStrings* NVStrings::lstrip(const char* to_strip) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); char* d_strip = nullptr; if (to_strip) { int len = (int)strlen(to_strip) + 1; // include null d_strip = device_alloc<char>(len, 0); CUDA_TRY(hipMemcpyAsync(d_strip, to_strip, len, hipMemcpyHostToDevice)) } // compute size of output buffer rmm::device_vector<size_t> lengths(count, 0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_lengths] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; unsigned int len = dstr->lstrip_size(d_strip); len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if (d_buffer == 0) { if (d_strip) RMM_FREE(d_strip, 0); return rtn; // all strings are null } // create offsets rmm::device_vector<size_t> offsets(count, 0); thrust::exclusive_scan(execpol->on(0), lengths.begin(), lengths.end(), offsets.begin()); // do the strip custring_view** d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = dstr->lstrip(d_strip, buffer); }); // if (d_strip) RMM_FREE(d_strip, 0); return rtn; } // remove the target character from the beginning and the end of each string NVStrings* NVStrings::strip(const char* to_strip) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); char* d_strip = nullptr; if (to_strip) { int len = (int)strlen(to_strip) + 1; // include null d_strip = device_alloc<char>(len, 0); CUDA_TRY(hipMemcpyAsync(d_strip, to_strip, len, hipMemcpyHostToDevice)) } // compute size of output buffer rmm::device_vector<size_t> lengths(count, 0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_lengths] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; unsigned int len = dstr->strip_size(d_strip); len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if (d_buffer == 0) { if (d_strip) RMM_FREE(d_strip, 0); return rtn; } // create offsets rmm::device_vector<size_t> offsets(count, 0); thrust::exclusive_scan(execpol->on(0), lengths.begin(), lengths.end(), offsets.begin()); // do the strip custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = dstr->strip(d_strip, buffer); }); // if (d_strip) RMM_FREE(d_strip, 0); return rtn; } // remove the target character from the end of each string NVStrings* NVStrings::rstrip(const char* to_strip) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); char* d_strip = nullptr; if (to_strip) { int len = (int)strlen(to_strip) + 1; // include null d_strip = device_alloc<char>(len, 0); CUDA_TRY(hipMemcpyAsync(d_strip, to_strip, len, hipMemcpyHostToDevice)) } // compute size of output buffer rmm::device_vector<size_t> lengths(count, 0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_lengths] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; unsigned int len = dstr->rstrip_size(d_strip); len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if (d_buffer == 0) { if (d_strip) RMM_FREE(d_strip, 0); return rtn; // all strings are null } // create offsets rmm::device_vector<size_t> offsets(count, 0); thrust::exclusive_scan(execpol->on(0), lengths.begin(), lengths.end(), offsets.begin()); // do the strip custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = dstr->rstrip(d_strip, buffer); }); // if (d_strip) RMM_FREE(d_strip, 0); return rtn; }
1532243510b542008ee6b91494a5db08ea68d8e7.cu
/* * Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuda_runtime.h> #include <rmm/rmm.h> #include <rmm/thrust_rmm_allocator.h> #include <thrust/device_vector.h> #include <thrust/for_each.h> #include <cudf/utilities/error.hpp> #include "nvstrings/NVStrings.h" #include "../custring_view.cuh" #include "../util.h" #include "./NVStringsImpl.h" // remove the target characters from the beginning of each string NVStrings* NVStrings::lstrip(const char* to_strip) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); char* d_strip = nullptr; if (to_strip) { int len = (int)strlen(to_strip) + 1; // include null d_strip = device_alloc<char>(len, 0); CUDA_TRY(cudaMemcpyAsync(d_strip, to_strip, len, cudaMemcpyHostToDevice)) } // compute size of output buffer rmm::device_vector<size_t> lengths(count, 0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_lengths] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; unsigned int len = dstr->lstrip_size(d_strip); len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if (d_buffer == 0) { if (d_strip) RMM_FREE(d_strip, 0); return rtn; // all strings are null } // create offsets rmm::device_vector<size_t> offsets(count, 0); thrust::exclusive_scan(execpol->on(0), lengths.begin(), lengths.end(), offsets.begin()); // do the strip custring_view** d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = dstr->lstrip(d_strip, buffer); }); // if (d_strip) RMM_FREE(d_strip, 0); return rtn; } // remove the target character from the beginning and the end of each string NVStrings* NVStrings::strip(const char* to_strip) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); char* d_strip = nullptr; if (to_strip) { int len = (int)strlen(to_strip) + 1; // include null d_strip = device_alloc<char>(len, 0); CUDA_TRY(cudaMemcpyAsync(d_strip, to_strip, len, cudaMemcpyHostToDevice)) } // compute size of output buffer rmm::device_vector<size_t> lengths(count, 0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_lengths] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; unsigned int len = dstr->strip_size(d_strip); len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if (d_buffer == 0) { if (d_strip) RMM_FREE(d_strip, 0); return rtn; } // create offsets rmm::device_vector<size_t> offsets(count, 0); thrust::exclusive_scan(execpol->on(0), lengths.begin(), lengths.end(), offsets.begin()); // do the strip custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = dstr->strip(d_strip, buffer); }); // if (d_strip) RMM_FREE(d_strip, 0); return rtn; } // remove the target character from the end of each string NVStrings* NVStrings::rstrip(const char* to_strip) { unsigned int count = size(); custring_view_array d_strings = pImpl->getStringsPtr(); auto execpol = rmm::exec_policy(0); char* d_strip = nullptr; if (to_strip) { int len = (int)strlen(to_strip) + 1; // include null d_strip = device_alloc<char>(len, 0); CUDA_TRY(cudaMemcpyAsync(d_strip, to_strip, len, cudaMemcpyHostToDevice)) } // compute size of output buffer rmm::device_vector<size_t> lengths(count, 0); size_t* d_lengths = lengths.data().get(); thrust::for_each_n(execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_lengths] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; unsigned int len = dstr->rstrip_size(d_strip); len = ALIGN_SIZE(len); d_lengths[idx] = (size_t)len; }); // create output object NVStrings* rtn = new NVStrings(count); char* d_buffer = rtn->pImpl->createMemoryFor(d_lengths); if (d_buffer == 0) { if (d_strip) RMM_FREE(d_strip, 0); return rtn; // all strings are null } // create offsets rmm::device_vector<size_t> offsets(count, 0); thrust::exclusive_scan(execpol->on(0), lengths.begin(), lengths.end(), offsets.begin()); // do the strip custring_view_array d_results = rtn->pImpl->getStringsPtr(); size_t* d_offsets = offsets.data().get(); thrust::for_each_n( execpol->on(0), thrust::make_counting_iterator<unsigned int>(0), count, [d_strings, d_strip, d_buffer, d_offsets, d_results] __device__(unsigned int idx) { custring_view* dstr = d_strings[idx]; if (!dstr) return; char* buffer = d_buffer + d_offsets[idx]; d_results[idx] = dstr->rstrip(d_strip, buffer); }); // if (d_strip) RMM_FREE(d_strip, 0); return rtn; }
a9ef198500fe13182364004dbd4ecb1ca47726eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> __global__ void VecAdd(int n, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A + B * where A is a (1 * n) vector * where B is a (1 * n) vector * where C is a (1 * n) vector * ********************************************************************/ // INSERT KERNEL CODE HERE int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) C[i] = A[i] + B[i]; } void basicVecAdd( float *A, float *B, float *C, int n) { // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 256; //INSERT CODE HERE dim3 DimGrid((n-1)/256 + 1, 1, 1); dim3 DimBlock(256, 1, 1); hipLaunchKernelGGL(( VecAdd) , dim3(DimGrid), dim3(DimBlock), 0, 0, n, A, B, C); }
a9ef198500fe13182364004dbd4ecb1ca47726eb.cu
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> __global__ void VecAdd(int n, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A + B * where A is a (1 * n) vector * where B is a (1 * n) vector * where C is a (1 * n) vector * ********************************************************************/ // INSERT KERNEL CODE HERE int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) C[i] = A[i] + B[i]; } void basicVecAdd( float *A, float *B, float *C, int n) { // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 256; //INSERT CODE HERE dim3 DimGrid((n-1)/256 + 1, 1, 1); dim3 DimBlock(256, 1, 1); VecAdd <<<DimGrid, DimBlock>>> (n, A, B, C); }
c0a3084aee9e4c8e1ae6e4de819102729ad299c6.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "amg_config.h" #include "convergence/combined_rel_ini_abs.h" #include <algorithm> namespace amgx { template<typename TConfig> RelativeAbsoluteCombinedConvergence<TConfig>::RelativeAbsoluteCombinedConvergence(AMG_Config &cfg, const std::string &cfg_scope) : Convergence<TConfig>(cfg, cfg_scope) { } template<class TConfig> void RelativeAbsoluteCombinedConvergence<TConfig>::convergence_init() { this->setTolerance(this->m_cfg->template getParameter<double>("tolerance", this->m_cfg_scope)); this->m_alt_rel_tolerance = this->m_cfg->template getParameter<double>("alt_rel_tolerance", this->m_cfg_scope); } template<class TConfig> bool RelativeAbsoluteCombinedConvergence<TConfig>::convergence_update_and_check(const PODVec_h &nrm, const PODVec_h &nrm_ini) { bool res_converged = true; bool res_converged_abs = true; bool res_converged_abs_precision = true; for (int i = 0; i < nrm.size(); i++) { bool conv_abs = nrm[i] < this->m_tolerance; res_converged_abs = res_converged_abs && conv_abs; bool conv = (nrm[i] / nrm_ini[i] <= this->m_alt_rel_tolerance); res_converged = res_converged && conv; bool conv_abs_precision = (nrm[i] <= ::max(nrm_ini[i] * Epsilon_conv<ValueTypeB>::value(), (PODValueTypeB)(1e-20))); res_converged_abs_precision = res_converged_abs_precision && conv_abs_precision; } if (res_converged_abs_precision) { return true; } return res_converged || res_converged_abs; } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class RelativeAbsoluteCombinedConvergence<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // end namespace
c0a3084aee9e4c8e1ae6e4de819102729ad299c6.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "amg_config.h" #include "convergence/combined_rel_ini_abs.h" #include <algorithm> namespace amgx { template<typename TConfig> RelativeAbsoluteCombinedConvergence<TConfig>::RelativeAbsoluteCombinedConvergence(AMG_Config &cfg, const std::string &cfg_scope) : Convergence<TConfig>(cfg, cfg_scope) { } template<class TConfig> void RelativeAbsoluteCombinedConvergence<TConfig>::convergence_init() { this->setTolerance(this->m_cfg->template getParameter<double>("tolerance", this->m_cfg_scope)); this->m_alt_rel_tolerance = this->m_cfg->template getParameter<double>("alt_rel_tolerance", this->m_cfg_scope); } template<class TConfig> bool RelativeAbsoluteCombinedConvergence<TConfig>::convergence_update_and_check(const PODVec_h &nrm, const PODVec_h &nrm_ini) { bool res_converged = true; bool res_converged_abs = true; bool res_converged_abs_precision = true; for (int i = 0; i < nrm.size(); i++) { bool conv_abs = nrm[i] < this->m_tolerance; res_converged_abs = res_converged_abs && conv_abs; bool conv = (nrm[i] / nrm_ini[i] <= this->m_alt_rel_tolerance); res_converged = res_converged && conv; bool conv_abs_precision = (nrm[i] <= std::max(nrm_ini[i] * Epsilon_conv<ValueTypeB>::value(), (PODValueTypeB)(1e-20))); res_converged_abs_precision = res_converged_abs_precision && conv_abs_precision; } if (res_converged_abs_precision) { return true; } return res_converged || res_converged_abs; } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class RelativeAbsoluteCombinedConvergence<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } // end namespace
bb80dabdf1bec6a936a081097309b8972a2b30ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "bpn_cuda.h" #define THRES 0.0001 /*Threshold of training error*/ __device__ double computeFunc(double x , Type t){ if(t == Linear) return x; if(t == Sigmoidal){ double out = 1 + exp(-x); return 1 / out; } } __device__ double computeDiff(double x , Type t){ if(t == Linear) return 1; if(t == Sigmoidal){ double out = computeFunc(x , t); return out * (1 - out); } } /*CUDA function to feed z value at the input*/ __global__ void forward_propagate_input(double* z_curr , double* bias , int size ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; z_curr[id] += bias[id]; return; } /* CUDA function to propagate z and a values from the input level(level[size - 1]) to the output level(level[0])*/ __global__ void forward_propagate_level(double* a_curr /*a values of the nodes of current level*/ , double* z_curr /*z values of the nodes of the current level*/ , double* weight /*weight connections between current(l) and previous(l + 1) level*/ , double* z_prev /*z values of the nodes of the previous(l + 1) level*/ , double* bias_curr /*bias value of the current level*/ , int prev /*size of previous level*/ , int curr /*size of current level*/ , Type t /*threshold function type of the current level*/ ){ int id = threadIdx.x + blockDim.x * blockIdx.x; if(id >= curr) return; int i; a_curr[id] = 0; for(i = 0 ; i < prev ; i ++) a_curr[id] += weight[id * prev + i] * z_prev[i]; a_curr[id] += bias_curr[id]; z_curr[id] = computeFunc(a_curr[id] , t); return; } /*Function to initialize the delta-values at the output*/ __global__ void reverse_propagate_output(double *delta_curr /*delta value of the current level*/ , double* z_curr /*z value of the current level*/ , double* target /*target value at the output*/ , double* a_curr /*a values at the output*/ , int size /*size of the output level*/ , Type t /*threshold value at the output*/ ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; delta_curr[id] = (z_curr[id] - target[id]) * computeDiff(a_curr[id] , t); return; } /*Function to propagate the delta-values from one level to another*/ __global__ void reverse_propagate_level(double *delta_curr , double *delta_next , double* weight_next , double* a_curr , Type t /*Type of threshold function at the current level*/ , int size_next /*Number of nodes at the next level*/ , int size /*Number of nodes at the current level*/ ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; int i; delta_curr[id] = 0; for(i = 0 ; i < size_next ; i ++) delta_curr[id] += delta_next[i] * weight_next[i * size + id]; delta_curr[id] *= computeDiff(a_curr[id] , t); return; } /*Function that updates weight between two levels*/ __global__ void weight_update(double* weight /*Weight between current and previous level*/ , double* delta_curr , double* z_prev , int size /*Size of current level*/ , int size_prev , double rate /*Rate at which the weights are updated*/ ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size * size_prev) return; int curr_node = id / size_prev; int prev_node = id % size_prev; weight[curr_node * size_prev + prev_node] -= rate * delta_curr[curr_node] * z_prev[prev_node]; return; } /*Function that updates the bias of a level*/ __global__ void bias_update(double* delta_curr , double* bias /*Bias of the current level*/ , int size , double rate){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; bias[id] -= rate * delta_curr[id]; return; } void copyBPNinput(BPN_CUDA* network , double *input){ int inputLevel = network->noLevels - 1; hipMemcpy(network->z_val + network->noNodes - network->nodeSize[inputLevel] , input , network->nodeSize[network->noLevels - 1] * sizeof(double) , hipMemcpyHostToDevice); hipMemcpy(network->a_val + network->noNodes - network->nodeSize[inputLevel], input , network->nodeSize[network->noLevels - 1] * sizeof(double) , hipMemcpyHostToDevice); return; } /*Function to compute the forward propagation of values*/ void forward(BPN_CUDA* network , double *input){ int i , sizePrev , sizeCurr; double* a_curr , double* z_curr , double* weight , double* z_prev , double* bias_curr; Type t = network->type[network->noLevels - 2]; copyBPNinput(network , input); sizePrev = network->nodeSize[network->noLevels - 1]; sizeCurr = network->nodeSize[network->noLevels - 2]; a_curr = network->a_val + network->noNodes - sizePrev - sizeCurr; z_curr = network->z_val + network->noNodes - sizePrev - sizeCurr; weight = network->weight + network->noWeight - sizePrev * sizeCurr; z_prev = network->z_val + network->noNodes - sizePrev; bias_curr = network->bias + network->noNodes - sizePrev - sizeCurr; hipLaunchKernelGGL(( forward_propagate_input), dim3((sizePrev / 1024 + 1)) , dim3((sizePrev > 1024 ? 1024 : sizePrev)), 0, 0, z_prev , bias_curr + sizeCurr , sizePrev); for(i = network->noLevels - 2 ; i > -1 ; i --){ hipLaunchKernelGGL(( forward_propagate_level), dim3((sizeCurr / 1024 + 1)) , dim3((sizeCurr > 1024 ? 1024 : sizeCurr)), 0, 0, a_curr , z_curr , weight , z_prev , bias_curr , sizePrev , sizeCurr , t); if(i == 0) break; if(i == 0) break; z_prev = z_curr; sizePrev = sizeCurr; sizeCurr = network->nodeSize[i - 1]; a_curr = a_curr - sizeCurr; z_curr = z_curr - sizeCurr; bias_curr = bias_curr - sizeCurr; weight = weight - sizePrev * sizeCurr; } } /*Function to compute the reverse propagation of values*/ double reverse(BPN_CUDA* network , double* target){ double *delta_curr , *z_curr , *a_curr , *delta_next , *weight_next , *z_curr_h , *target_d; delta_curr = network->delta; z_curr = network->z_val; a_curr = network->a_val; int size = network->nodeSize[0] , size_next; Type t = network->type[0]; double error = 0; z_curr_h = new double[size]; hipMemcpy(z_curr_h , z_curr , size * sizeof(double) , hipMemcpyDeviceToHost); hipMalloc((void**)&target_d , size * sizeof(double)); hipMemcpy(target_d , target , size * sizeof(double) , hipMemcpyHostToDevice); for(int i = 0 ; i < network->nodeSize[0] ; i ++) error += (target[i] - z_curr_h[i]) * (target[i] - z_curr_h[i]); hipLaunchKernelGGL(( reverse_propagate_output), dim3((size / 1024 + 1)) , dim3((size > 1024 ? 1024 : size)), 0, 0, delta_curr , z_curr , target_d , a_curr , size , t); delta_next = delta_curr; weight_next = network->weight; z_curr = NULL; delta_curr = delta_curr + size; a_curr = a_curr + size; t = network->type[1]; size_next = size; size = network->nodeSize[1]; for(int i = 1 ; i < network->noLevels ; i ++){ hipLaunchKernelGGL(( reverse_propagate_level), dim3((size / 1024 + 1)) , dim3((size > 1024 ? 1024 : size)), 0, 0, delta_curr , delta_next , weight_next , a_curr , t , size_next , size); if(i == network->noLevels - 1) break; delta_next = delta_curr; weight_next = weight_next + size * size_next; a_curr = a_curr + size; delta_curr = delta_curr + size; size_next = size; size = network->nodeSize[i + 1]; t = network->type[i + 1]; } return error; } void weight_bias_update(BPN_CUDA* network , double rate){ double* weight = network->weight; double* delta_curr = network->delta; int size = network->nodeSize[0]; double* z_prev = network->z_val + size; double* bias = network->bias; int size_prev = network->nodeSize[1]; for(int i = 1 ; i < network->noLevels ; i ++){ hipLaunchKernelGGL(( weight_update), dim3((size * size_prev / 1024 + 1)) , dim3((size * size_prev > 1024 ? 1024 : size * size_prev)), 0, 0, weight , delta_curr , z_prev , size , size_prev , rate); hipLaunchKernelGGL(( bias_update), dim3((size / 1024 + 1)) , dim3((size > 1024 ? 1024 : size)), 0, 0, delta_curr , bias , size , rate); if(i == network->noLevels - 1) break; weight = weight + size * size_prev; delta_curr = delta_curr + size; z_prev = z_prev + size_prev; bias = bias + size; size = size_prev; size_prev = network->nodeSize[i + 1]; } } int train(BPN_CUDA* network , double* input , double* output , int dataset_no , int input_size , int output_size , int total_iterations){ double error; double *ip , *op; int count = 0; if(total_iterations == -1) total_iterations = 1000; while(true){ error = 0; ip = input; op = output; for(int i = 0 ; i < dataset_no ; i ++){ forward(network , ip); error += reverse(network , op); weight_bias_update(network , network->training_rate); ip = ip + input_size; op = op + output_size; } //printf("%f\n" , error); if(error < THRES || count == total_iterations) break; count ++; } return count; } void initialize(BPN_CUDA* network , int* noNodes , int levels , Type* type , double rate){ network->noLevels = levels; network->nodeSize = new int[levels]; network->type = new Type[levels]; network->training_rate = rate; for(int i = 0 ; i < levels ; i ++){ network->nodeSize[i] = noNodes[i]; network->type[i] = type[i]; } int numNodes = 0; int numWeights = 0; for(int i = 0 ; i < levels ; i ++){ numNodes += noNodes[i]; if(i == 0) continue; numWeights += noNodes[i] * noNodes[i - 1]; } double *device_mem; hipMalloc((void**)&network->a_val , numNodes * sizeof(double)); hipMalloc((void**)&network->z_val , numNodes * sizeof(double)); hipMalloc((void**)&network->delta , numNodes * sizeof(double)); hipMalloc((void**)&network->bias , numNodes * sizeof(double)); hipMalloc((void**)&network->weight , numWeights * sizeof(double)); network->noNodes = numNodes; network->noWeight = numWeights; double* initval = new double[numNodes]; double* initweight = new double[numWeights]; time_t t; srand((unsigned)time(&t)); for(int i = 0 ; i < numWeights ; i ++){ initweight[i] = (double)(rand() % 50) / 100000.0; initweight[i] = initweight[i] == 0.0 ? 0.0001 : initweight[i]; if(i < numNodes){ initval[i] = (double)(rand() % 50) / 100000.0; initval[i] = initval[i] == 0.0 ? 0.0001 : initval[i]; } } if(numWeights == 2)//If number of weight connections is true, then no-weights = no-nodes + 1 initval[2] = 0.0001; hipMemcpy(network->a_val , initval , numNodes * sizeof(double) , hipMemcpyHostToDevice); hipMemcpy(network->z_val , initval , numNodes * sizeof(double) , hipMemcpyHostToDevice); hipMemcpy(network->delta , initval , numNodes * sizeof(double) , hipMemcpyHostToDevice); hipMemcpy(network->bias , initval , numNodes * sizeof(double) , hipMemcpyHostToDevice); hipMemcpy(network->weight , initweight , numWeights * sizeof(double) , hipMemcpyHostToDevice); } void returnOutput(BPN_CUDA* network , double* input , double* output){ int size = network->nodeSize[0]; forward(network , input); hipMemcpy(output , network->z_val , size * sizeof(double) , hipMemcpyDeviceToHost); return; }
bb80dabdf1bec6a936a081097309b8972a2b30ac.cu
#include "bpn_cuda.h" #define THRES 0.0001 /*Threshold of training error*/ __device__ double computeFunc(double x , Type t){ if(t == Linear) return x; if(t == Sigmoidal){ double out = 1 + exp(-x); return 1 / out; } } __device__ double computeDiff(double x , Type t){ if(t == Linear) return 1; if(t == Sigmoidal){ double out = computeFunc(x , t); return out * (1 - out); } } /*CUDA function to feed z value at the input*/ __global__ void forward_propagate_input(double* z_curr , double* bias , int size ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; z_curr[id] += bias[id]; return; } /* CUDA function to propagate z and a values from the input level(level[size - 1]) to the output level(level[0])*/ __global__ void forward_propagate_level(double* a_curr /*a values of the nodes of current level*/ , double* z_curr /*z values of the nodes of the current level*/ , double* weight /*weight connections between current(l) and previous(l + 1) level*/ , double* z_prev /*z values of the nodes of the previous(l + 1) level*/ , double* bias_curr /*bias value of the current level*/ , int prev /*size of previous level*/ , int curr /*size of current level*/ , Type t /*threshold function type of the current level*/ ){ int id = threadIdx.x + blockDim.x * blockIdx.x; if(id >= curr) return; int i; a_curr[id] = 0; for(i = 0 ; i < prev ; i ++) a_curr[id] += weight[id * prev + i] * z_prev[i]; a_curr[id] += bias_curr[id]; z_curr[id] = computeFunc(a_curr[id] , t); return; } /*Function to initialize the delta-values at the output*/ __global__ void reverse_propagate_output(double *delta_curr /*delta value of the current level*/ , double* z_curr /*z value of the current level*/ , double* target /*target value at the output*/ , double* a_curr /*a values at the output*/ , int size /*size of the output level*/ , Type t /*threshold value at the output*/ ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; delta_curr[id] = (z_curr[id] - target[id]) * computeDiff(a_curr[id] , t); return; } /*Function to propagate the delta-values from one level to another*/ __global__ void reverse_propagate_level(double *delta_curr , double *delta_next , double* weight_next , double* a_curr , Type t /*Type of threshold function at the current level*/ , int size_next /*Number of nodes at the next level*/ , int size /*Number of nodes at the current level*/ ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; int i; delta_curr[id] = 0; for(i = 0 ; i < size_next ; i ++) delta_curr[id] += delta_next[i] * weight_next[i * size + id]; delta_curr[id] *= computeDiff(a_curr[id] , t); return; } /*Function that updates weight between two levels*/ __global__ void weight_update(double* weight /*Weight between current and previous level*/ , double* delta_curr , double* z_prev , int size /*Size of current level*/ , int size_prev , double rate /*Rate at which the weights are updated*/ ){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size * size_prev) return; int curr_node = id / size_prev; int prev_node = id % size_prev; weight[curr_node * size_prev + prev_node] -= rate * delta_curr[curr_node] * z_prev[prev_node]; return; } /*Function that updates the bias of a level*/ __global__ void bias_update(double* delta_curr , double* bias /*Bias of the current level*/ , int size , double rate){ int id = threadIdx.x + blockIdx.x * blockDim.x; if(id >= size) return; bias[id] -= rate * delta_curr[id]; return; } void copyBPNinput(BPN_CUDA* network , double *input){ int inputLevel = network->noLevels - 1; cudaMemcpy(network->z_val + network->noNodes - network->nodeSize[inputLevel] , input , network->nodeSize[network->noLevels - 1] * sizeof(double) , cudaMemcpyHostToDevice); cudaMemcpy(network->a_val + network->noNodes - network->nodeSize[inputLevel], input , network->nodeSize[network->noLevels - 1] * sizeof(double) , cudaMemcpyHostToDevice); return; } /*Function to compute the forward propagation of values*/ void forward(BPN_CUDA* network , double *input){ int i , sizePrev , sizeCurr; double* a_curr , double* z_curr , double* weight , double* z_prev , double* bias_curr; Type t = network->type[network->noLevels - 2]; copyBPNinput(network , input); sizePrev = network->nodeSize[network->noLevels - 1]; sizeCurr = network->nodeSize[network->noLevels - 2]; a_curr = network->a_val + network->noNodes - sizePrev - sizeCurr; z_curr = network->z_val + network->noNodes - sizePrev - sizeCurr; weight = network->weight + network->noWeight - sizePrev * sizeCurr; z_prev = network->z_val + network->noNodes - sizePrev; bias_curr = network->bias + network->noNodes - sizePrev - sizeCurr; forward_propagate_input<<<(sizePrev / 1024 + 1) , (sizePrev > 1024 ? 1024 : sizePrev)>>>(z_prev , bias_curr + sizeCurr , sizePrev); for(i = network->noLevels - 2 ; i > -1 ; i --){ forward_propagate_level<<<(sizeCurr / 1024 + 1) , (sizeCurr > 1024 ? 1024 : sizeCurr)>>>(a_curr , z_curr , weight , z_prev , bias_curr , sizePrev , sizeCurr , t); if(i == 0) break; if(i == 0) break; z_prev = z_curr; sizePrev = sizeCurr; sizeCurr = network->nodeSize[i - 1]; a_curr = a_curr - sizeCurr; z_curr = z_curr - sizeCurr; bias_curr = bias_curr - sizeCurr; weight = weight - sizePrev * sizeCurr; } } /*Function to compute the reverse propagation of values*/ double reverse(BPN_CUDA* network , double* target){ double *delta_curr , *z_curr , *a_curr , *delta_next , *weight_next , *z_curr_h , *target_d; delta_curr = network->delta; z_curr = network->z_val; a_curr = network->a_val; int size = network->nodeSize[0] , size_next; Type t = network->type[0]; double error = 0; z_curr_h = new double[size]; cudaMemcpy(z_curr_h , z_curr , size * sizeof(double) , cudaMemcpyDeviceToHost); cudaMalloc((void**)&target_d , size * sizeof(double)); cudaMemcpy(target_d , target , size * sizeof(double) , cudaMemcpyHostToDevice); for(int i = 0 ; i < network->nodeSize[0] ; i ++) error += (target[i] - z_curr_h[i]) * (target[i] - z_curr_h[i]); reverse_propagate_output<<<(size / 1024 + 1) , (size > 1024 ? 1024 : size)>>>(delta_curr , z_curr , target_d , a_curr , size , t); delta_next = delta_curr; weight_next = network->weight; z_curr = NULL; delta_curr = delta_curr + size; a_curr = a_curr + size; t = network->type[1]; size_next = size; size = network->nodeSize[1]; for(int i = 1 ; i < network->noLevels ; i ++){ reverse_propagate_level<<<(size / 1024 + 1) , (size > 1024 ? 1024 : size)>>>(delta_curr , delta_next , weight_next , a_curr , t , size_next , size); if(i == network->noLevels - 1) break; delta_next = delta_curr; weight_next = weight_next + size * size_next; a_curr = a_curr + size; delta_curr = delta_curr + size; size_next = size; size = network->nodeSize[i + 1]; t = network->type[i + 1]; } return error; } void weight_bias_update(BPN_CUDA* network , double rate){ double* weight = network->weight; double* delta_curr = network->delta; int size = network->nodeSize[0]; double* z_prev = network->z_val + size; double* bias = network->bias; int size_prev = network->nodeSize[1]; for(int i = 1 ; i < network->noLevels ; i ++){ weight_update<<<(size * size_prev / 1024 + 1) , (size * size_prev > 1024 ? 1024 : size * size_prev)>>>(weight , delta_curr , z_prev , size , size_prev , rate); bias_update<<<(size / 1024 + 1) , (size > 1024 ? 1024 : size)>>>(delta_curr , bias , size , rate); if(i == network->noLevels - 1) break; weight = weight + size * size_prev; delta_curr = delta_curr + size; z_prev = z_prev + size_prev; bias = bias + size; size = size_prev; size_prev = network->nodeSize[i + 1]; } } int train(BPN_CUDA* network , double* input , double* output , int dataset_no , int input_size , int output_size , int total_iterations){ double error; double *ip , *op; int count = 0; if(total_iterations == -1) total_iterations = 1000; while(true){ error = 0; ip = input; op = output; for(int i = 0 ; i < dataset_no ; i ++){ forward(network , ip); error += reverse(network , op); weight_bias_update(network , network->training_rate); ip = ip + input_size; op = op + output_size; } //printf("%f\n" , error); if(error < THRES || count == total_iterations) break; count ++; } return count; } void initialize(BPN_CUDA* network , int* noNodes , int levels , Type* type , double rate){ network->noLevels = levels; network->nodeSize = new int[levels]; network->type = new Type[levels]; network->training_rate = rate; for(int i = 0 ; i < levels ; i ++){ network->nodeSize[i] = noNodes[i]; network->type[i] = type[i]; } int numNodes = 0; int numWeights = 0; for(int i = 0 ; i < levels ; i ++){ numNodes += noNodes[i]; if(i == 0) continue; numWeights += noNodes[i] * noNodes[i - 1]; } double *device_mem; cudaMalloc((void**)&network->a_val , numNodes * sizeof(double)); cudaMalloc((void**)&network->z_val , numNodes * sizeof(double)); cudaMalloc((void**)&network->delta , numNodes * sizeof(double)); cudaMalloc((void**)&network->bias , numNodes * sizeof(double)); cudaMalloc((void**)&network->weight , numWeights * sizeof(double)); network->noNodes = numNodes; network->noWeight = numWeights; double* initval = new double[numNodes]; double* initweight = new double[numWeights]; time_t t; srand((unsigned)time(&t)); for(int i = 0 ; i < numWeights ; i ++){ initweight[i] = (double)(rand() % 50) / 100000.0; initweight[i] = initweight[i] == 0.0 ? 0.0001 : initweight[i]; if(i < numNodes){ initval[i] = (double)(rand() % 50) / 100000.0; initval[i] = initval[i] == 0.0 ? 0.0001 : initval[i]; } } if(numWeights == 2)//If number of weight connections is true, then no-weights = no-nodes + 1 initval[2] = 0.0001; cudaMemcpy(network->a_val , initval , numNodes * sizeof(double) , cudaMemcpyHostToDevice); cudaMemcpy(network->z_val , initval , numNodes * sizeof(double) , cudaMemcpyHostToDevice); cudaMemcpy(network->delta , initval , numNodes * sizeof(double) , cudaMemcpyHostToDevice); cudaMemcpy(network->bias , initval , numNodes * sizeof(double) , cudaMemcpyHostToDevice); cudaMemcpy(network->weight , initweight , numWeights * sizeof(double) , cudaMemcpyHostToDevice); } void returnOutput(BPN_CUDA* network , double* input , double* output){ int size = network->nodeSize[0]; forward(network , input); cudaMemcpy(output , network->z_val , size * sizeof(double) , cudaMemcpyDeviceToHost); return; }
c743ca658955bf2a369e1644204b74248c82418a.hip
// !!! This is a file automatically generated by hipify!!! #include "point.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "point.h" #include<iostream> #include "pointExtensions.cuh" #include <time.h> #include <math.h> #include "constants.cuh" #include "sortParameters.h" #include "sortingFramework.cuh" #include "launchHelper.cuh" #include "processingUtils.cuh" #include "distanceFunctions.cuh" #include "sketchedDistanceScanners.cuh" #include "cudaHelpers.cuh" #include "resultDTO.h" __global__ void knn(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions, int k, Point* result, int func) { Point threadQueue[THREAD_QUEUE_SIZE]; int lane = threadIdx.x % WARPSIZE; Parameters params; params.lane = lane; int warpId = (blockIdx.x * blockDim.x + threadIdx.x) / WARPSIZE; int resultIdx = warpId * k; int queryId = warpId * dimensions; if (warpId >= nQueries) return; float maxKDistance = (float)INT_MAX; int warpQueueSize = k / WARPSIZE; int candidateSetSize = THREAD_QUEUE_SIZE - warpQueueSize; int localMaxKDistanceIdx = THREAD_QUEUE_SIZE - warpQueueSize; Point swapPoint; int queuePosition = 0; //Fill thread queue with defaults for (int i = 0; i < THREAD_QUEUE_SIZE; i++) { threadQueue[i] = createPoint(-1, maxKDistance); } float magnitude_query = 0; for (int j = 0; j < dimensions; j++) { magnitude_query += queryPoints[queryId + j] * queryPoints[queryId + j]; } magnitude_query = sqrt(magnitude_query); //Iterate over data; for (int i = lane; i < nData; i += WARPSIZE) { float distance = 0.0; distance = runDistanceFunction(func, &dataPoints[i*dimensions], &queryPoints[queryId], dimensions, magnitude_query); Point currentPoint = createPoint(i, distance); if (WITH_TQ_OR_BUFFER) { //run TQ for (int j = candidateSetSize - 1; j >= 0; j--) { // simple sorting. if (currentPoint.distance < threadQueue[j].distance) { swapPoint = threadQueue[j]; threadQueue[j] = currentPoint; currentPoint = swapPoint; } } //Verify that head of thread queue is not smaller than biggest k distance. if (__ballot_sync(FULL_MASK, threadQueue[0].distance < maxKDistance) && __activemask() == FULL_MASK) { startSort(threadQueue, swapPoint, params); maxKDistance = broadCastMaxK(threadQueue[candidateSetSize].distance); } } else { //run buffer if (currentPoint.distance < maxKDistance || same(currentPoint, maxKDistance)) { threadQueue[queuePosition++] = currentPoint; } if (__ballot_sync(FULL_MASK, queuePosition >= candidateSetSize) && __activemask() == FULL_MASK) { startSort(threadQueue, swapPoint, params); maxKDistance = broadCastMaxK(threadQueue[candidateSetSize].distance); //printQueue(threadQueue); queuePosition = 0; } } } startSort(threadQueue, swapPoint, params); //Copy result from warp queues to result array in reverse order. int kIdx = (WARPSIZE - lane) - 1; int warpQueueIdx = THREAD_QUEUE_SIZE - 1; for (int i = kIdx; i < k; i += WARPSIZE) { result[resultIdx + i] = threadQueue[warpQueueIdx--]; } } __global__ void normalizeData(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions) { transformToUnitVectors(queryPoints, nQueries, dimensions); transformToUnitVectors(dataPoints, nData, dimensions); } __global__ void preprocess(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions, int* minValues) { transformData(dataPoints, queryPoints, nData, nQueries, dimensions, minValues); } __global__ void runScan(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions, int k, Point* result, int func) { int warpId = (blockIdx.x * blockDim.x + threadIdx.x) / WARPSIZE; int queryIndex = warpId * dimensions; if (warpId < nQueries) { scanHammingDistance(dataPoints, &queryPoints[queryIndex], dimensions, nullptr,nullptr, dimensions, nData, nQueries, k, func, 2, result); } } Result runMemOptimizedLinearScan(int k, int d, int N_query, int N_data, float* data, float* queries, int distanceFunc) { setDevice(); int numberOfThreads = calculateThreadsLocal(N_query); int numberOfBlocks = calculateBlocksLocal(N_query); if (THREAD_QUEUE_SIZE <= 8 || THREAD_QUEUE_SIZE > 64) { numberOfThreads /= 2; numberOfBlocks *= 2; } int resultSize = N_query * k; Point *resultArray = (Point*)malloc(resultSize * sizeof(Point)); Result res; res.setupResult(N_query, k); // queries float* dev_query_points = mallocArray(queries, N_query * d, true); // data float* dev_data_points = mallocArray(data, N_data * d, true); // result Point* dev_result = mallocArray(resultArray, resultSize); if (distanceFunc == 2) { printf("Starting preprocess \n"); int* minValues = (int*)malloc(d * sizeof(int)); for (int i = 0; i < d; i++) { minValues[i] = 0; } int* dev_minValues = mallocArray<int>(minValues, d, true); preprocess << <1, numberOfThreads >> > (dev_query_points, dev_data_points, N_query, N_data, d, dev_minValues); waitForKernel(); normalizeData << < numberOfBlocks, numberOfThreads >> > (dev_query_points, dev_data_points, N_query, N_data, d); waitForKernel(); printf("Done preprocessing \n"); } printf("Launching KNN \n"); size_t free_byte; size_t total_byte; hipMemGetInfo(&free_byte, &total_byte); double free_byte_double = (double)free_byte; double totals_byte_double = (double)total_byte; double used_bytes = totals_byte_double - free_byte_double; printf("Free bytes: %f, total_bytes: %f, used bytes %f \n", ((free_byte_double / 1024) / 1024), ((totals_byte_double / 1024) / 1024), ((used_bytes/1024)/1024)); clock_t before = clock(); knn << <numberOfBlocks, numberOfThreads>> > (dev_query_points, dev_data_points, N_query, N_data, d, k, dev_result, distanceFunc); waitForKernel(); clock_t time_lapsed = clock() - before; printf("Time calculate on the GPU: %d \n", (time_lapsed * 1000 / CLOCKS_PER_SEC)); res.scanTime = (time_lapsed * 1000 / CLOCKS_PER_SEC); copyArrayToHost(resultArray, dev_result, resultSize); res.copyResultPoints(resultArray, N_query, k); //Free memory... freeDeviceArray(dev_query_points); freeDeviceArray(dev_data_points); freeDeviceArray(dev_result); free(resultArray); resetDevice(); return res; }
c743ca658955bf2a369e1644204b74248c82418a.cu
#include "point.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "point.h" #include<iostream> #include "pointExtensions.cuh" #include <time.h> #include <math.h> #include "constants.cuh" #include "sortParameters.h" #include "sortingFramework.cuh" #include "launchHelper.cuh" #include "processingUtils.cuh" #include "distanceFunctions.cuh" #include "sketchedDistanceScanners.cuh" #include "cudaHelpers.cuh" #include "resultDTO.h" __global__ void knn(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions, int k, Point* result, int func) { Point threadQueue[THREAD_QUEUE_SIZE]; int lane = threadIdx.x % WARPSIZE; Parameters params; params.lane = lane; int warpId = (blockIdx.x * blockDim.x + threadIdx.x) / WARPSIZE; int resultIdx = warpId * k; int queryId = warpId * dimensions; if (warpId >= nQueries) return; float maxKDistance = (float)INT_MAX; int warpQueueSize = k / WARPSIZE; int candidateSetSize = THREAD_QUEUE_SIZE - warpQueueSize; int localMaxKDistanceIdx = THREAD_QUEUE_SIZE - warpQueueSize; Point swapPoint; int queuePosition = 0; //Fill thread queue with defaults for (int i = 0; i < THREAD_QUEUE_SIZE; i++) { threadQueue[i] = createPoint(-1, maxKDistance); } float magnitude_query = 0; for (int j = 0; j < dimensions; j++) { magnitude_query += queryPoints[queryId + j] * queryPoints[queryId + j]; } magnitude_query = sqrt(magnitude_query); //Iterate over data; for (int i = lane; i < nData; i += WARPSIZE) { float distance = 0.0; distance = runDistanceFunction(func, &dataPoints[i*dimensions], &queryPoints[queryId], dimensions, magnitude_query); Point currentPoint = createPoint(i, distance); if (WITH_TQ_OR_BUFFER) { //run TQ for (int j = candidateSetSize - 1; j >= 0; j--) { // simple sorting. if (currentPoint.distance < threadQueue[j].distance) { swapPoint = threadQueue[j]; threadQueue[j] = currentPoint; currentPoint = swapPoint; } } //Verify that head of thread queue is not smaller than biggest k distance. if (__ballot_sync(FULL_MASK, threadQueue[0].distance < maxKDistance) && __activemask() == FULL_MASK) { startSort(threadQueue, swapPoint, params); maxKDistance = broadCastMaxK(threadQueue[candidateSetSize].distance); } } else { //run buffer if (currentPoint.distance < maxKDistance || same(currentPoint, maxKDistance)) { threadQueue[queuePosition++] = currentPoint; } if (__ballot_sync(FULL_MASK, queuePosition >= candidateSetSize) && __activemask() == FULL_MASK) { startSort(threadQueue, swapPoint, params); maxKDistance = broadCastMaxK(threadQueue[candidateSetSize].distance); //printQueue(threadQueue); queuePosition = 0; } } } startSort(threadQueue, swapPoint, params); //Copy result from warp queues to result array in reverse order. int kIdx = (WARPSIZE - lane) - 1; int warpQueueIdx = THREAD_QUEUE_SIZE - 1; for (int i = kIdx; i < k; i += WARPSIZE) { result[resultIdx + i] = threadQueue[warpQueueIdx--]; } } __global__ void normalizeData(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions) { transformToUnitVectors(queryPoints, nQueries, dimensions); transformToUnitVectors(dataPoints, nData, dimensions); } __global__ void preprocess(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions, int* minValues) { transformData(dataPoints, queryPoints, nData, nQueries, dimensions, minValues); } __global__ void runScan(float* queryPoints, float* dataPoints, int nQueries, int nData, int dimensions, int k, Point* result, int func) { int warpId = (blockIdx.x * blockDim.x + threadIdx.x) / WARPSIZE; int queryIndex = warpId * dimensions; if (warpId < nQueries) { scanHammingDistance(dataPoints, &queryPoints[queryIndex], dimensions, nullptr,nullptr, dimensions, nData, nQueries, k, func, 2, result); } } Result runMemOptimizedLinearScan(int k, int d, int N_query, int N_data, float* data, float* queries, int distanceFunc) { setDevice(); int numberOfThreads = calculateThreadsLocal(N_query); int numberOfBlocks = calculateBlocksLocal(N_query); if (THREAD_QUEUE_SIZE <= 8 || THREAD_QUEUE_SIZE > 64) { numberOfThreads /= 2; numberOfBlocks *= 2; } int resultSize = N_query * k; Point *resultArray = (Point*)malloc(resultSize * sizeof(Point)); Result res; res.setupResult(N_query, k); // queries float* dev_query_points = mallocArray(queries, N_query * d, true); // data float* dev_data_points = mallocArray(data, N_data * d, true); // result Point* dev_result = mallocArray(resultArray, resultSize); if (distanceFunc == 2) { printf("Starting preprocess \n"); int* minValues = (int*)malloc(d * sizeof(int)); for (int i = 0; i < d; i++) { minValues[i] = 0; } int* dev_minValues = mallocArray<int>(minValues, d, true); preprocess << <1, numberOfThreads >> > (dev_query_points, dev_data_points, N_query, N_data, d, dev_minValues); waitForKernel(); normalizeData << < numberOfBlocks, numberOfThreads >> > (dev_query_points, dev_data_points, N_query, N_data, d); waitForKernel(); printf("Done preprocessing \n"); } printf("Launching KNN \n"); size_t free_byte; size_t total_byte; cudaMemGetInfo(&free_byte, &total_byte); double free_byte_double = (double)free_byte; double totals_byte_double = (double)total_byte; double used_bytes = totals_byte_double - free_byte_double; printf("Free bytes: %f, total_bytes: %f, used bytes %f \n", ((free_byte_double / 1024) / 1024), ((totals_byte_double / 1024) / 1024), ((used_bytes/1024)/1024)); clock_t before = clock(); knn << <numberOfBlocks, numberOfThreads>> > (dev_query_points, dev_data_points, N_query, N_data, d, k, dev_result, distanceFunc); waitForKernel(); clock_t time_lapsed = clock() - before; printf("Time calculate on the GPU: %d \n", (time_lapsed * 1000 / CLOCKS_PER_SEC)); res.scanTime = (time_lapsed * 1000 / CLOCKS_PER_SEC); copyArrayToHost(resultArray, dev_result, resultSize); res.copyResultPoints(resultArray, N_query, k); //Free memory... freeDeviceArray(dev_query_points); freeDeviceArray(dev_data_points); freeDeviceArray(dev_result); free(resultArray); resetDevice(); return res; }
eb5e56078e915f88ea73091c8d7711bd0c3cc72f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> // Array access macros #define INPUT(i,j) A[(i) + (j)*(m+patchSize-1)] #define OUTPUT(i,j) B[(i) + (j)*m*m] #define FILTER(i) H[(i)] __global__ void sampleAdd(double const * const A, double *B, double *H, int m, int n, int patchSize) { // Get pixel (x,y) in input int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i>=((patchSize - 1) / 2) && i<=m+((patchSize - 1) / 2) && j>=((patchSize - 1) / 2) && j<=m+((patchSize - 1) / 2)) { //do not scan pixels in the pad for (int k = -(patchSize - 1) / 2; k <= (patchSize - 1) / 2; k++) { //scan the neighbours in an area patchSize x patchSize for (int l = -(patchSize - 1) / 2; l <= (patchSize - 1) / 2; l++) { OUTPUT(i - ((patchSize - 1) / 2) + m * (j - ((patchSize - 1) / 2)), k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize) = INPUT(k + i, l + j); //assign the neighbors' value OUTPUT(i - ((patchSize - 1) / 2) + m * (j - ((patchSize - 1) / 2)), k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize) *= (FILTER(k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize)); //multiply that value with a filter } } } }
eb5e56078e915f88ea73091c8d7711bd0c3cc72f.cu
#include <math.h> #include <stdio.h> // Array access macros #define INPUT(i,j) A[(i) + (j)*(m+patchSize-1)] #define OUTPUT(i,j) B[(i) + (j)*m*m] #define FILTER(i) H[(i)] __global__ void sampleAdd(double const * const A, double *B, double *H, int m, int n, int patchSize) { // Get pixel (x,y) in input int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i>=((patchSize - 1) / 2) && i<=m+((patchSize - 1) / 2) && j>=((patchSize - 1) / 2) && j<=m+((patchSize - 1) / 2)) { //do not scan pixels in the pad for (int k = -(patchSize - 1) / 2; k <= (patchSize - 1) / 2; k++) { //scan the neighbours in an area patchSize x patchSize for (int l = -(patchSize - 1) / 2; l <= (patchSize - 1) / 2; l++) { OUTPUT(i - ((patchSize - 1) / 2) + m * (j - ((patchSize - 1) / 2)), k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize) = INPUT(k + i, l + j); //assign the neighbors' value OUTPUT(i - ((patchSize - 1) / 2) + m * (j - ((patchSize - 1) / 2)), k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize) *= (FILTER(k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize)); //multiply that value with a filter } } } }
0c1c23cbea27736a84c7ed8340fb458393e300fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void kernelB(float* r, float* x, float* y, float* z, int size) { for (int i = threadIdx.x; i < size; i += blockDim.x) { r[i] = x[i] * y[i] + z[i]; } }
0c1c23cbea27736a84c7ed8340fb458393e300fe.cu
__global__ void kernelB(float* r, float* x, float* y, float* z, int size) { for (int i = threadIdx.x; i < size; i += blockDim.x) { r[i] = x[i] * y[i] + z[i]; } }
5412ddabee4891115aeffaf82c6265107a09b417.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdint.h> #include <stdlib.h> #include <string.h> #include <cutil.h> #include "util.h" #include "ref_2dhisto.h" #define H_ERROR_CHECKS 0 #if H_ERROR_CHECKS #include <assert.h> #include <stdio.h> #endif #define HBLOCK_SIZE_LOG2 7 #define HBLOCK_SIZE (1 << HBLOCK_SIZE_LOG2) // = 32 #define HMBLOCK_SIZE_LOG2 8 #define HMBLOCK_SIZE (1 << HMBLOCK_SIZE_LOG2) // = 32 #define LBLOCK_SIZE_LOG2 5 #define LBLOCK_SIZE (1 << LBLOCK_SIZE_LOG2) // = 256 #define LBLOCK_WARPS (LBLOCK_SIZE >> 5) #define USE_MEDIUM_PATH 1 #if USE_MEDIUM_PATH // For now only MEDIUM_BLOCK_SIZE_LOG2 == LBLOCK_SIZE_LOG2 works # define MEDIUM_BLOCK_SIZE_LOG2 8 # define MEDIUM_BLOCK_SIZE (1 << MEDIUM_BLOCK_SIZE_LOG2) // 128 # define MBLOCK_WARPS (MEDIUM_BLOCK_SIZE >> 5) #define MED_THREAD_DEGEN 16 #endif #define RBLOCK_SIZE 64 #define RMAXSTEPS 80 #define NHSTEPSPERKEY 32 #define MAX_NHSTEPS 1024 #define MAX_MULTISTEPS 1024 #define MAX_NLHSTEPS 2048 #define GATHER_BLOCK_SIZE_LOG2 6 #define GATHER_BLOCK_SIZE (1 << GATHER_BLOCK_SIZE_LOG2) #define STRATEGY_CHECK_INTERVAL_LOG2 7 #define STRATEGY_CHECK_INTERVAL (1 << STRATEGY_CHECK_INTERVAL_LOG2) #define HISTOGRAM_DEGEN_LIMIT 20 #define HASH_COLLISION_STEPS 2 const int numActiveUpperLimit = 24; #define USE_JENKINS_HASH 0 #define LARGE_NBIN_CHECK_INTERVAL_LOG2 5 #define LARGE_NBIN_CHECK_INTERVAL (1 << LARGE_NBIN_CHECK_INTERVAL_LOG2) #define SMALL_BLOCK_SIZE_LOG2 6 #define SMALL_BLOCK_SIZE (1 << SMALL_BLOCK_SIZE_LOG2) #define MAX_SMALL_STEPS 2040 #if __CUDA_ARCH__ >= 120 #define USE_ATOMICS_HASH 0 #else #define USE_ATOMICS_HASH 0 #endif #if (__CUDA_ARCH__ >= 200) # define USE_BALLOT_HISTOGRAM 1 #else # define USE_BALLOT_HISTOGRAM 0 #endif #ifndef __device__ #define __device__ #endif #ifndef __host__ #define __host__ #endif #ifndef __shared__ #define __shared__ #endif static unsigned int* d_Data = NULL; static unsigned int* d_Histogram = NULL; enum histogram_type { histogram_generic, histogram_atomic_inc, histogram_atomic_add, }; template <histogram_type histotype, typename OUTPUTTYPE> static int getHistogramBufSize(OUTPUTTYPE zero, int nOut); template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static hipError_t callHistogramKernel( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev = false, hipStream_t stream = 0, void* tmpBuffer = NULL, bool allowMultiPass = true); template <histogram_type histotype, int nMultires, int nDim, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static hipError_t callHistogramKernelNDim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT* starts, INDEXT* ends, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev = false, hipStream_t stream = 0, void* tmpBuffer = NULL, bool allowMultiPass = true); template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> hipError_t callHistogramKernel2Dim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT x0, INDEXT x1, INDEXT y0, INDEXT y1, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, hipStream_t stream, void* tmpBuffer, bool allowMultiPass = true); struct test_xform { __host__ __device__ void operator() (unsigned int* input, int i, int* res_idx, unsigned int* res, int nres) const { *res_idx++ = input[i]; *res++ = 1; } }; // Sum-functor to be used for reduction - just a normal sum of two integers struct test_sumfun { __device__ __host__ unsigned int operator() (unsigned int res1, unsigned int res2) const{ return res1 + res2; } }; __global__ void computeHistogram(unsigned int *buffer, int size, unsigned int *histo ) { __shared__ unsigned int temp[1024]; temp[threadIdx.x + 0] = 0; temp[threadIdx.x + 256] = 0; temp[threadIdx.x + 512] = 0; temp[threadIdx.x + 768] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int offset = blockDim.x * gridDim.x; while (i < size) { atomicAdd( &temp[buffer[i]], 1); i += offset; } __syncthreads(); atomicAdd( &(histo[threadIdx.x + 0]), temp[threadIdx.x + 0] ); atomicAdd( &(histo[threadIdx.x + 256]), temp[threadIdx.x + 256] ); atomicAdd( &(histo[threadIdx.x + 512]), temp[threadIdx.x + 512] ); atomicAdd( &(histo[threadIdx.x + 768]), temp[threadIdx.x + 768] ); } extern "C" void opt_init(unsigned int** h_Data, int width, int height) { hipMalloc((void **)&d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int)); hipMemset( d_Histogram, 0,HISTO_HEIGHT * HISTO_WIDTH * sizeof( unsigned int )); unsigned int *data = new unsigned int[width*height]; for(int j = 0;j<height;++j) { memcpy(data+j*width, h_Data[j], sizeof(unsigned int)*width); } hipMalloc((void **)&d_Data, width*height*sizeof(unsigned int)); hipMemcpy(d_Data, data, width*height*sizeof(unsigned int), hipMemcpyHostToDevice); delete []data; } extern "C" void opt_2dhisto(int size) { test_xform xform; test_sumfun sum; callHistogramKernel<histogram_atomic_inc, 1>(d_Data, xform, sum, 0, size, 0U, d_Histogram, HISTO_HEIGHT * HISTO_WIDTH, true); } extern "C" void opt_free() { hipFree(d_Histogram); hipFree(d_Data); } extern "C" void opt_copyFromDevice(unsigned char* output) { unsigned int* h_Histogram = new unsigned int[HISTO_HEIGHT * HISTO_WIDTH]; hipMemcpy(h_Histogram, d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int), hipMemcpyDeviceToHost); for(int i = 0;i<HISTO_HEIGHT * HISTO_WIDTH;++i) { output[i] = h_Histogram[i]>255?255:h_Histogram[i]; } delete[] h_Histogram; } //#include <stdio.h> template <typename OUTPUTTYPE, typename SUMFUNTYPE> __global__ void multireduceKernel(OUTPUTTYPE* input, int n, int nOut, int nsteps, SUMFUNTYPE sumFun, OUTPUTTYPE zero, int stride, OUTPUTTYPE* initialValues) { int tid = threadIdx.x; int bidx = blockIdx.x; int bidy = blockIdx.y; OUTPUTTYPE myout = zero; int i; for (i = 0; i < nsteps; i++) { int subIndex = bidx * RBLOCK_SIZE + tid; int cidx = subIndex + i * RBLOCK_SIZE * gridDim.x; if (cidx < n) { // printf("t(%2d)b(%3d,%2d) r(%d)\n", tid, bidx, bidy, cidx + bidy * stride); myout = sumFun(myout, input[cidx + bidy * stride]); } } __shared__ OUTPUTTYPE tmp[RBLOCK_SIZE / 2]; for (int curLimit = RBLOCK_SIZE / 2; curLimit > 0; curLimit >>= 1) { // First write out the current result for threads above the limit if (tid >= curLimit && tid < (curLimit << 1)) tmp[tid - curLimit] = myout; // Otherwise wait for the write the complete and add that value to our result __syncthreads(); if (tid < curLimit) myout = sumFun(myout, tmp[tid]); // IMPORTANT: Wait before new loop for the read to complete __syncthreads(); } // Done! myout contains the result for our block for thread 0!! if (tid == 0) { // NOTE: If gridDim == 1 then we have finally reached the last iteration and // can write the result into the final result-value array // (ie. The same as initialvalue-array) if (gridDim.x == 1) { OUTPUTTYPE initVal = initialValues[bidy]; initialValues[bidy] = sumFun(initVal, myout); // And we are DONE! } else { // printf("t(%2d)b(%3d,%2d) w(%d)\n", tid, bidx, bidy, bidx + bidy * stride); initialValues[bidx + bidy * stride] = myout; } } } template <typename OUTPUTTYPE, typename SUMFUNTYPE> static void callMultiReduce( int arrLen, int nOut, OUTPUTTYPE* h_results, OUTPUTTYPE* input, SUMFUNTYPE sumFunObj, OUTPUTTYPE zero, hipStream_t stream, void* tmpbuf, bool outInDev) { int n = arrLen; // Set-up yet another temp buffer: (TODO: Pool alloc somehow?) OUTPUTTYPE* resultTemp = NULL; // TODO: Why do we need such a large temporary array? // Shouldn't sizeof(OUTPUTTYPE) * nOut * xblocks be enough?? if (tmpbuf) { resultTemp = (OUTPUTTYPE*)tmpbuf; } else { hipMalloc((void**)&resultTemp, sizeof(OUTPUTTYPE) * nOut * arrLen); #if H_ERROR_CHECKS //printf("resultTemp = %p\n", resultTemp); hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror0 = %s\n", hipGetErrorString( error )); #endif } OUTPUTTYPE* output = resultTemp; enum hipMemcpyKind fromOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyHostToDevice; enum hipMemcpyKind toOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost; // Copy initial values: do { int steps = (n + (RBLOCK_SIZE - 1)) / RBLOCK_SIZE; if (steps > RMAXSTEPS) steps = RMAXSTEPS; int yblocks = nOut; int xblocks = (n + (steps * RBLOCK_SIZE - 1)) / (steps * RBLOCK_SIZE); const dim3 block = RBLOCK_SIZE; const dim3 grid(xblocks, yblocks, 1); if (xblocks == 1) // LAST ONE to start { //printf("hipMemcpy(%p, %p, %d, %d);\n", output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut); if (stream != 0) hipMemcpyAsync(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut, stream); else hipMemcpy(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut); } #if H_ERROR_CHECKS hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror1 = %s\n", hipGetErrorString( error )); #endif // Then the actual kernel call hipLaunchKernelGGL(( multireduceKernel), dim3(grid), dim3(block), 0, stream, input, n, nOut, steps, sumFunObj, zero, arrLen, output); #if H_ERROR_CHECKS error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror2 = %s\n", hipGetErrorString( error )); #endif if (xblocks > 1) { // Swap pointers: OUTPUTTYPE* tmpptr = output; output = input; input = tmpptr; } n = xblocks; } while(n > 1); // Then copy back the results: //hipMemcpyAsync(h_results, resultTemp, sizeof(OUTPUTTYPE) * nOut, hipMemcpyDeviceToHost, CURRENT_STREAM()); // TODO: Support async copy here?? if (outInDev && stream != 0) hipMemcpyAsync(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut, stream); else hipMemcpy(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut); #if H_ERROR_CHECKS hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror3 = %s\n", hipGetErrorString( error )); #endif if (!tmpbuf) { hipFree(resultTemp); } #if H_ERROR_CHECKS error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror4 = %s\n", hipGetErrorString( error )); #endif } template <typename SUMFUNTYPE, typename OUTPUTTYPE> __global__ void gatherKernel(SUMFUNTYPE sumfunObj, OUTPUTTYPE* blockOut, int nOut, int nEntries, OUTPUTTYPE zero) { //int resIdx = threadIdx.x + blockDim.x * blockIdx.x; int resIdx = blockIdx.x; if (resIdx < nOut) { // Let's divide the nEntries first evenly on all threads and read 4 entries in a row int locEntries = (nEntries) >> (GATHER_BLOCK_SIZE_LOG2); // Note: Original array entry is stored in resIdx + nOut * nEntries! OUTPUTTYPE res = zero; if (threadIdx.x == 0) res = blockOut[resIdx + nOut * nEntries]; // Shift starting ptr: blockOut = &blockOut[resIdx]; int locIdx = threadIdx.x * locEntries; for (int i=0; i < locEntries/4; i++) { OUTPUTTYPE x1 = blockOut[nOut * (locIdx + (i << 2))]; OUTPUTTYPE x2 = blockOut[nOut * (locIdx + (i << 2) + 1)]; OUTPUTTYPE x3 = blockOut[nOut * (locIdx + (i << 2) + 2)]; OUTPUTTYPE x4 = blockOut[nOut * (locIdx + (i << 2) + 3)]; res = sumfunObj(res, x1); res = sumfunObj(res, x2); res = sumfunObj(res, x3); res = sumfunObj(res, x4); } // Then do the rest for (int j = (locEntries/4)*4; j < locEntries; j++) { OUTPUTTYPE x1 = blockOut[nOut * (locIdx + j)]; res = sumfunObj(res, x1); } // Still handle rest starting from index "locEntries * BLOCK_SIZE": locIdx = threadIdx.x + (locEntries << GATHER_BLOCK_SIZE_LOG2); if (locIdx < nEntries) res = sumfunObj(res, blockOut[nOut * locIdx]); // Ok - all that is left is to do the final parallel reduction between threads: { __shared__ OUTPUTTYPE data[GATHER_BLOCK_SIZE]; //volatile OUTPUTTYPE* data = (volatile OUTPUTTYPE*)&dataTmp[0]; // TODO Compiler complains with volatile from this - why? //error: no operator "=" matches these operands // operand types are: volatile myTestType_s = myTestType // Silly - does not happen with built-in types (nice...) data[threadIdx.x] = res; #if GATHER_BLOCK_SIZE == 512 __syncthreads(); if (threadIdx.x < 256) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 256]); #endif #if GATHER_BLOCK_SIZE >= 256 __syncthreads(); if (threadIdx.x < 128) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 128]); #endif #if GATHER_BLOCK_SIZE >= 128 __syncthreads(); if (threadIdx.x < 64) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 64]); __syncthreads(); #endif #if GATHER_BLOCK_SIZE >= 64 __syncthreads(); if (threadIdx.x < 32) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 32]); #endif __syncthreads(); if (threadIdx.x < 16) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 16]); __syncthreads(); if (threadIdx.x < 8) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 8]); __syncthreads(); if (threadIdx.x < 4) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 4]); __syncthreads(); if (threadIdx.x < 2) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 2]); __syncthreads(); if (threadIdx.x < 1) *blockOut = sumfunObj(data[threadIdx.x], data[threadIdx.x + 1]); } } } #define FREE_MUTEX_ID 0xffeecafe #define TAKE_WARP_MUTEX(ID) do { \ int warpIdWAM = threadIdx.x >> 5; \ __shared__ volatile int lockVarWarpAtomicMutex;\ bool doneWAM = false;\ bool allDone = false; \ while(!allDone){ \ __syncthreads(); \ if (!doneWAM) lockVarWarpAtomicMutex = warpIdWAM; \ __syncthreads(); \ if (lockVarWarpAtomicMutex == FREE_MUTEX_ID) allDone = true; \ __syncthreads(); \ if (lockVarWarpAtomicMutex == warpIdWAM){ /* We Won */ // User code comes here #define GIVE_WARP_MUTEX(ID) doneWAM = true; \ lockVarWarpAtomicMutex = FREE_MUTEX_ID; \ } \ } \ __syncthreads(); \ } while(0) // NOTE: Init must be called from divergent-free code (or with exited warps) #define INIT_WARP_MUTEX2(MUTEX) do { MUTEX = FREE_MUTEX_ID; __syncthreads(); } while(0) #if 0 && __CUDA_ARCH__ >= 120 // TODO: NOT WORKING THIS CODEPATH - find out why #define TAKE_WARP_MUTEX2(MUTEX) do { \ int warpIdWAM = 1000000 + threadIdx.x / 32; \ bool doneWAM = false;\ while(!doneWAM){ \ int old = -2; \ if (threadIdx.x % 32 == 0) \ old = atomicCAS(&MUTEX, FREE_MUTEX_ID, warpIdWAM); \ if (__any(old == FREE_MUTEX_ID)){ /* We Won */ // User code comes here #define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \ atomicExch(&MUTEX, FREE_MUTEX_ID); \ } \ } \ } while(0) #else #define TAKE_WARP_MUTEX2(MUTEX) do { \ int warpIdWAM = 1000000 + threadIdx.x / 32; \ bool doneWAM = false;\ bool allDone = false; \ while(!allDone){ \ __syncthreads(); \ if (!doneWAM) MUTEX = warpIdWAM; \ __syncthreads(); \ if (MUTEX == FREE_MUTEX_ID) allDone = true; \ if (MUTEX == warpIdWAM){ /* We Won */ // User code comes here #define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \ MUTEX = FREE_MUTEX_ID; \ } \ } \ } while(0) #endif #if USE_BALLOT_HISTOGRAM template <typename OUTPUTTYPE> static inline __device__ OUTPUTTYPE mySillyPopCount(unsigned int mymask, OUTPUTTYPE zero) { return zero; } static inline __device__ int mySillyPopCount(unsigned int mymask, int zero) { return (int)__popc(mymask); } static inline __device__ unsigned int mySillyPopCount(unsigned int mymask, unsigned int zero) { return (unsigned int)__popc(mymask); } static inline __device__ long long mySillyPopCount(unsigned int mymask, long long zero) { return (long long)__popc(mymask); } static inline __device__ unsigned long long mySillyPopCount(unsigned int mymask, unsigned long long zero) { return (unsigned long long)__popc(mymask); } template <histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ bool ballot_makeUnique( SUMFUNTYPE sumfunObj, int myKey, OUTPUTTYPE* myOut, OUTPUTTYPE* s_vals, int* s_keys, int* nSameKeys) { unsigned int mymask; /* #if HBLOCK_SIZE != 32 #error Please use threadblocks of 32 threads #endif*/ //startKey = s_keys[startIndex]; // First dig out for each thread who are the other threads that have the same key as us... //int i = 0; if (checkNSame) { unsigned int donemask = 0; int startIndex = 32 - 1; int startKey = s_keys[startIndex]; *nSameKeys = 0; while (~donemask != 0 /*&& i++ < 32*/) { unsigned int mask = __ballot(myKey == startKey); if (myKey == startKey) mymask = mask; donemask |= mask; { int nSame = __popc(mask); if (nSame > *nSameKeys) *nSameKeys = nSame; } startIndex = 31 - __clz(~donemask); //if (myKey == 0) printf("Startindex = %d, donemask = 0x%08x, mask = 0x%08x\n", startIndex, donemask, mask); if (startIndex >= 0) startKey = s_keys[startIndex]; } } else { unsigned int donemask = 0; int startIndex = 32 - 1; while (startIndex >= 0) { int startKey = s_keys[startIndex]; unsigned int mask = __ballot(myKey == startKey); if (myKey == startKey) mymask = mask; donemask |= mask; startIndex = 31 - __clz(~donemask); } } // Ok now mymask contains those threads - now we just reduce locally - all threads run at the same // time, but reducing threads lose always half of them with each iteration - it would help // to work with more than 32 entries, but the algorithm seems to get tricky there. { // Compute the left side of the mask and the right side. rmask first will contain our thread index, but // we zero it out immediately unsigned int lmask = (mymask >> (threadIdx.x & 31)) << (threadIdx.x & 31); int IamNth = __popc(lmask) - 1; bool Iwrite = IamNth == 0; if (histotype == histogram_atomic_inc) { // Fast-path for atomic inc *myOut = mySillyPopCount(mymask, *myOut); return Iwrite && (myKey >= 0); } else { unsigned int rmask = mymask & (~lmask); // Now compute which number is our thread in the subarray of those threads that have the same key // starting from the left (ie. index == 31). So for thread 31 this will be always zero. int nextIdx = 31 - __clz(rmask); s_vals[(threadIdx.x & 31)] = *myOut; //if (myKey == 0) printf("tid = %02d, IamNth = %02d, mask = 0x%08x, rmask = 0x%08x \n", threadIdx.x, IamNth, mymask, rmask); //bool done = __all(nextIdx < 0); // TODO: Unroll 5? while (!__all(nextIdx < 0)) { // Reduce towards those threads that have lower IamNth // Our thread reads the next one if our internal ID is even if ((IamNth & 0x1) == 0) { if (nextIdx >= 0){ // if (myKey == 0) printf("tid:%02d, add with %02d\n", threadIdx.x, nextIdx); *myOut = sumfunObj(*myOut, s_vals[nextIdx]); } // And writes to the shared memory if our internal ID is third on every 4-long subarray: if ((IamNth & 0x3) == 2) { // if (myKey == 0) printf("Tid %02d, store\n", threadIdx.x); s_vals[(threadIdx.x & 31)] = *myOut; } } // Now the beautiful part: Kill every other bit in the rmask bitfield. How, you ask? // Using ballot: Every bit we want to kill has IamNth odd, or conversely, we only // want to keep those bits that have IamNth even... rmask &= __ballot((IamNth & 0x1) == 0); nextIdx = 31 - __clz(rmask); // if (myKey == 0) printf("tid = %02d, next = %02d, key = %d\n", threadIdx.x, rmask, nextIdx, myKey); IamNth >>= 1; //printf("i = %d\n", i); } // And voila, we are done - write out the result: return Iwrite && (myKey >= 0); } } } #endif template <bool laststeps, typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void myAtomicWarpAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, bool Iwrite, int* warpmutex) { // Taken from http://forums.nvidia.com/index.php?showtopic=72925 // This is a tad slow, but allows arbitrary operation // For writes of 16 bytes or less AtomicCAS could be faster // (See CUDA programming guide) TAKE_WARP_MUTEX(0); //__shared__ int warpmutex; //INIT_WARP_MUTEX2(*warpmutex); //TAKE_WARP_MUTEX2(*warpmutex); bool write = Iwrite; #define MU_TEMP_MAGIC 0xffffaaaa *keyAddr = MU_TEMP_MAGIC; while (1) { // Vote whose turn is it - remember, one thread does succeed always!: if (write) *keyAddr = threadIdx.x; if (*keyAddr == MU_TEMP_MAGIC) break; if (*keyAddr == threadIdx.x) // We won! { // Do arbitrary atomic op: *addr = sumfunObj(*addr, val); write = false; *keyAddr = MU_TEMP_MAGIC; } } GIVE_WARP_MUTEX(0); //GIVE_WARP_MUTEX2(*warpmutex); #undef MU_TEMP_MAGIC } template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void myAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj) { // Taken from http://forums.nvidia.com/index.php?showtopic=72925 // This is a tad slow, but allows arbitrary operation // For writes of 16 bytes or less AtomicCAS could be faster // (See CUDA programming guide) bool write = true; #define MU_TEMP_MAGIC 0xffffaaaa *keyAddr = MU_TEMP_MAGIC; while (1) { // Vote whose turn is it - remember, one thread does succeed always!: if (write ) *keyAddr = threadIdx.x; if (*keyAddr == MU_TEMP_MAGIC) break; if (*keyAddr == threadIdx.x) // We won! { // Do arbitrary atomic op: *addr = sumfunObj(*addr, val); write = false; *keyAddr = MU_TEMP_MAGIC; } } #undef MU_TEMP_MAGIC } /*static __inline__ __device__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) { return __ullAtomicAdd(address, val); }*/ template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val) { //*addr = val; } template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, int val) { //*addr = val; } #if 0 template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, float val) { //*addr = val; } #endif template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, unsigned int val) { //*addr = val; } template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void myAtomicAddStats(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, int* nSameOut, bool Iwrite) { // Taken from http://forums.nvidia.com/index.php?showtopic=72925 bool write = true; *keyAddr = 0xffffffff; while (Iwrite) { // Vote whose turn is it - remember, one thread does succeed always!: if (write ) *keyAddr = threadIdx.x; if (*keyAddr == 0xffffffff) break; if (*keyAddr == threadIdx.x) // We won! { // Do arbitrary atomic op: *addr = sumfunObj(*addr, val); write = false; *keyAddr = 0xffffffff; } else { *nSameOut = *nSameOut + 1; } } { // Then find max __shared__ int nSame[HBLOCK_SIZE]; nSame[threadIdx.x] = *nSameOut; #define TMPMAX(A,B) (A) > (B) ? (A) : (B) #define tidx threadIdx.x if (tidx < 16) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 16]); if (tidx < 8) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 8]); if (tidx < 4) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 4]); if (tidx < 2) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 2]); if (tidx < 1) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 1]); #undef TMPMAX #undef tidx // Broadcast to all threads *nSameOut = nSame[0]; } } // TODO: Make unique within one warp? template<histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ bool reduceToUnique(OUTPUTTYPE* res, int myKey, int* nSame, SUMFUNTYPE sumfunObj, int* keys, OUTPUTTYPE* outputs) { keys[(threadIdx.x & 31)] = myKey; #if USE_BALLOT_HISTOGRAM return ballot_makeUnique<histotype, checkNSame>(sumfunObj, myKey, res, outputs, keys, nSame); #else { int i; bool writeResult = myKey >= 0; int myIdx = (threadIdx.x & 31) + 1; outputs[(threadIdx.x & 31)] = *res; // The assumption for sanity of this loop here is that all the data is in registers or shared memory and // hence this loop will not actually be __that__ slow.. Also it helps if the data is spread out (ie. there are // a lot of different indices here) for (i = 1; i < 32 && writeResult; i++) { if (myIdx >= 32) myIdx = 0; // Is my index the same as the index on the index-list? if (keys[myIdx] == myKey /*&& threadIdx.x != myIdx*/) { if (checkNSame) (*nSame)++; // If yes, then we can sum up the result using users sum-functor *res = sumfunObj(*res, outputs[myIdx]); // But if somebody else is summing up this index already, we don't need to (wasted effort done here) if (myIdx < threadIdx.x) writeResult = false; } myIdx++; } // Ok - we are done - now we can proceed in writing the result (if some other thread isn't doing it already) if (checkNSame) { // Manual reduce int tid = threadIdx.x; keys[tid] = *nSame; if (tid < 16) keys[tid] = keys[tid] > keys[tid + 16] ? keys[tid] : keys[tid+16]; if (tid < 8) keys[tid] = keys[tid] > keys[tid + 8] ? keys[tid] : keys[tid+8]; if (tid < 4) keys[tid] = keys[tid] > keys[tid + 4] ? keys[tid] : keys[tid+4]; if (tid < 2) keys[tid] = keys[tid] > keys[tid + 2] ? keys[tid] : keys[tid+2]; if (tid < 1) keys[tid] = keys[tid] > keys[tid + 1] ? keys[tid] : keys[tid+1]; *nSame = keys[0]; } return writeResult; } #endif } static inline __host__ __device__ void checkStrategyFun(bool *reduce, int nSame, int nSameTot, int step, int nBinSetslog2) { #if __CUDA_ARCH__ >= 200 #define STR_LIMIT 12 #else #define STR_LIMIT 24 #endif // TODO: Fix average case - a lot of things to tune here... if ((nSameTot > STR_LIMIT * step || nSame > STR_LIMIT)) *reduce = true; else *reduce = false; #undef STR_LIMIT } // Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up) template <typename SUMFUNTYPE> static inline __device__ void wrapAtomicAdd2(float* addr, float val, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 200 atomicAdd(addr, val); #else myAtomicAdd(addr, val, key, sumFunObj); #endif } template <typename SUMFUNTYPE,typename OUTPUTTYPE> static inline __device__ void wrapAtomicAdd2(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj) { atomicAdd(addr, val); } // Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up) template <bool laststeps, typename SUMFUNTYPE> static inline __device__ void wrapAtomicAdd2Warp(float* addr, float val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 200 if (Iwrite) atomicAdd(addr, val); #else myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex); #endif } template <bool laststeps, typename SUMFUNTYPE,typename OUTPUTTYPE> static inline __device__ void wrapAtomicAdd2Warp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { if (Iwrite) atomicAdd(addr, val); } template <typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2(addr, val, key, sumFunObj); #else myAtomicAdd(addr, val, key, sumFunObj); #endif } template <typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicInc(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2((int*)addr, 1, key, sumFunObj); #else //myAtomicAdd((int*)addr, 1, key, sumFunObj); #endif } template <typename SUMFUNTYPE> static inline __device__ void wrapAtomicInc(int* addr, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2(addr, 1, key, sumFunObj); #else myAtomicAdd(addr, 1, key, sumFunObj); #endif } template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicAddWarp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2Warp<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex); #else myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex); #endif } template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicIncWarp(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2Warp<laststeps>((int*)addr, 1, key, sumFunObj, Iwrite, warpmutex); #else //myAtomicAdd((int*)addr, 1, key, sumFunObj); #endif } template <bool laststeps, typename SUMFUNTYPE> static inline __device__ void wrapAtomicIncWarp(int* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2Warp<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex); #else myAtomicWarpAdd<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex); #endif } // TODO: Consider the following: // First private hash for each warp - later, share hash-tables between warps // Try also: private hashes for some threads of one warp etc template <typename OUTPUTTYPE> struct myHash { int* keys; #if !USE_ATOMICS_HASH int* locks; #endif OUTPUTTYPE* vals; OUTPUTTYPE* myBlockOut; }; template <typename OUTPUTTYPE> static inline __device__ void InitHash(struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE zero, int hashSizelog2) { int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2; int* myEntry = &hash->keys[threadIdx.x]; for (int i = 0; i < nloops; i++) { *myEntry = -1; myEntry += LBLOCK_SIZE; } if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2)) { *myEntry = -1; } // Done } #if 0 // OLD code template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void FlushHash(struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2) { int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2; OUTPUTTYPE* myVal = &hash->vals[threadIdx.x]; int* key = &hash->keys[threadIdx.x]; for (int i = 0; i < nloops; i++) { int keyIndex = *key; if (keyIndex >= 0) { hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]); *key = -1; } key += LBLOCK_SIZE; myVal += LBLOCK_SIZE; } if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2)) { int keyIndex = *key; if (keyIndex >= 0){ hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]); *key = -1; } } } #endif // 0 // See: http://www.burtleburtle.net/bob/hash/doobs.html // Mix by Bob Jenkins #define HISTO_JENKINS_MIX(A, B, C) \ do { \ A -= B; A -= C; A ^= (C>>13); \ B -= C; B -= A; B ^= (A<<8); \ C -= A; C -= B; C ^= (B>>13); \ A -= B; A -= C; A ^= (C>>12); \ B -= C; B -= A; B ^= (A<<16); \ C -= A; C -= B; C ^= (B>>5); \ A -= B; A -= C; A ^= (C>>3); \ B -= C; B -= A; B ^= (A<<10); \ C -= A; C -= B; C ^= (B>>15); \ } while (0) static inline __device__ unsigned int histogramHashFunction(int key) { #if USE_JENKINS_HASH unsigned int a = (unsigned int)key; unsigned int c,b; // TODO: What are good constants? b = 0x9e3779b9; c = 0xf1232345; HISTO_JENKINS_MIX(a, b, c); return c; #else // Golden ratio hash return (0x9e3779b9u * (unsigned int)key); #endif } #if USE_ATOMICS_HASH template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique) { if (unique) { if (Iwrite) { hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]); } return; } unsigned int hashkey = histogramHashFunction(myKey); volatile __shared__ bool hashFull; int index = (int)(hashkey >> (32 - hashSizelog2)); bool Iamdone = !Iwrite; bool IFlush = Iwrite; hashFull = true; while (hashFull) { // Mark here hash full, and if any thread has problems finding // free entry in hash, then that thread sets hashFull to nonzero if (threadIdx.x == 0) hashFull = false; // Do atomic-part int old = -2; int expect = -1; while (!Iamdone && !hashFull) { old = atomicCAS(&hash->keys[index], expect, -3); if (old == expect) // We won! { int key = old; if (key == -1 || key == myKey) { if (key == -1) { hash->vals[index] = res; } else { hash->vals[index] = sumfunObj(res, hash->vals[index]); IFlush = false; } hash->keys[index] = myKey; Iamdone = true; } else { hashFull = true; hash->keys[index] = key; expect = -1; } } else { if (old != myKey) { hashFull = true; expect = -1; } else { expect = old; } } } if (IFlush && Iamdone) { OUTPUTTYPE* myVal = &hash->vals[index]; int* key = &hash->keys[index]; // TODO: Workaround - get rid of if. Where do the extra flushes come from? if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]); //hash->myBlockOut[myKey] = sumfunObj(*myVal, hash->myBlockOut[myKey]); *key = -1; } } } #else template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique) { if (unique) { if (Iwrite) { hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]); } return; } unsigned int hashkey = histogramHashFunction(myKey); volatile __shared__ int hashFull; int index = (int)(hashkey >> (32 - hashSizelog2)); bool Iamdone = false; bool IFlush = Iwrite; // TODO: syncthreads()... hashFull = -10; while (hashFull != 0) { volatile int* lock = &hash->locks[index]; bool write = Iwrite; #define TMP_LOCK_MAGIC 0xfffffffe *lock = TMP_LOCK_MAGIC; // Mark here hash full, and if any thread has problems finding // free entry in hash, then that thread sets hashFull to nonzero if (threadIdx.x == 0) hashFull = 0; // Do atomic-part while (1) { if (!Iamdone && write) *lock = threadIdx.x; if (*lock == TMP_LOCK_MAGIC) break; if (*lock == threadIdx.x) // We won! { int key = hash->keys[index]; if (key == -1) { hash->keys[index] = myKey; hash->vals[index] = res; Iamdone = true; } else if (key == myKey) { hash->vals[index] = sumfunObj(res, hash->vals[index]); Iamdone = true; IFlush = false; } else { hashFull = 1; } // Do arbitrary atomic op: write = false; *lock = TMP_LOCK_MAGIC; } } if (IFlush) { OUTPUTTYPE* myVal = &hash->vals[index]; int* key = &hash->keys[index]; // TODO: Workaround - get rid of if. Where do the extra flushes come from? if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]); *key = -1; } } #undef TMP_LOCK_MAGIC } #endif template <histogram_type histotype, int nMultires, bool reduce, bool checkStrategy, bool laststep, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histo_largenbin_step(INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, OUTPUTTYPE zero, INDEXT* myStart, INDEXT end, struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE* blockOut, int nOut, int stepNum, int stepsleft, int* nSameTot, bool* reduceOut, int hashSizelog2, OUTPUTTYPE* rOuts, int* rKeys) { if (!laststep) { if (checkStrategy) { int myKeys[nMultires]; int nSame = 0; OUTPUTTYPE res[nMultires]; xformObj(input, *myStart, &myKeys[0], &res[0], nMultires); // TODO: Unroll? addtoHash is a big function.. Hmm but, unrolling would enable registers probably bool Iwrite; #define ADD_ONE_RESULT(RESIDX, NSAME, CHECK) \ do { if (RESIDX < nMultires) { \ Iwrite = reduceToUnique<histotype, CHECK> \ (&res[RESIDX % nMultires], myKeys[RESIDX % nMultires], NSAME, sumfunObj, rKeys, rOuts); \ if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; \ AddToHash(res[RESIDX % nMultires], myKeys[RESIDX % nMultires], hash, sumfunObj, hashSizelog2, Iwrite, true); \ } } while (0) ADD_ONE_RESULT(0, &nSame, true); ADD_ONE_RESULT(1, NULL, false); ADD_ONE_RESULT(2, NULL, false); ADD_ONE_RESULT(3, NULL, false); #undef ADD_ONE_RESULT //#pragma unroll for (int resid = 4; resid < nMultires; resid++) { bool Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts); if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, true); } *nSameTot += nSame; checkStrategyFun(reduceOut, nSame, *nSameTot, stepNum, 0); *myStart += LBLOCK_SIZE; } else { INDEXT startLim = *myStart + ((LBLOCK_SIZE << LARGE_NBIN_CHECK_INTERVAL_LOG2) - LBLOCK_SIZE); for (; *myStart < startLim; *myStart += LBLOCK_SIZE) { int myKeys[nMultires]; OUTPUTTYPE res[nMultires]; xformObj(input, *myStart, &myKeys[0], &res[0], nMultires); //#pragma unroll bool Iwrite = true; #define ADD_ONE_RESULT(RES) \ do { if (RES < nMultires) { \ if (reduce){ Iwrite = reduceToUnique<histotype, false>(&res[RES % nMultires], \ myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \ if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;} \ AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, \ sumfunObj, hashSizelog2, Iwrite, reduce); \ } } while (0) ADD_ONE_RESULT(0); ADD_ONE_RESULT(1); ADD_ONE_RESULT(2); ADD_ONE_RESULT(3); #undef ADD_ONE_RESULT for (int resid = 4; resid < nMultires; resid++) { bool Iwrite = true; if (reduce){ Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts); if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, reduce); } } } } else // These are the last steps then { for (int substep = 0; substep < stepsleft; substep++) { int myKeys[nMultires]; OUTPUTTYPE res[nMultires]; bool Iwrite = false; if (*myStart < end) { Iwrite = true; xformObj(input, *myStart, &myKeys[0], &res[0], nMultires); } else { #pragma unroll for (int resid = 0; resid < nMultires; resid++) { res[resid] = zero; myKeys[resid] = 0; } } //#pragma unroll { bool Iwrite2 = Iwrite; #define ADD_ONE_RESULT(RES) \ do { if (RES < nMultires) { \ if (reduce){ Iwrite2 = reduceToUnique<histotype, false> \ (&res[RES % nMultires], myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \ if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } \ AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, sumfunObj, hashSizelog2, Iwrite2, reduce); \ } } while(0) ADD_ONE_RESULT(0); ADD_ONE_RESULT(1); ADD_ONE_RESULT(2); ADD_ONE_RESULT(3); #undef ADD_ONE_RESULT for (int resid = 4; resid < nMultires; resid++) { //bool Iwrite2 = true; if (reduce){ Iwrite2 = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts); if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite2, reduce); } } *myStart += LBLOCK_SIZE; } } } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histo_kernel_largeNBins( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int nSteps, int hashSizelog2) { extern __shared__ int keys[]; #if USE_ATOMICS_HASH OUTPUTTYPE* vals = (OUTPUTTYPE*)(&keys[1 << hashSizelog2]); if (hashSizelog2 < LBLOCK_SIZE_LOG2) vals = &keys[1 << LBLOCK_SIZE_LOG2]; #else int* locks = &keys[1 << hashSizelog2]; if (hashSizelog2 < LBLOCK_SIZE_LOG2) locks = &keys[1 << LBLOCK_SIZE_LOG2]; OUTPUTTYPE* vals = (OUTPUTTYPE*)(&locks[1 << hashSizelog2]); #endif /*int* rKeys = (int*)(&vals[1 << hashSizelog2]); OUTPUTTYPE* rOuts = (OUTPUTTYPE*)(&rKeys[LBLOCK_SIZE]);*/ int* rKeys = &keys[0]; OUTPUTTYPE* rOuts = vals; struct myHash<OUTPUTTYPE> hash; hash.keys = keys; #if !USE_ATOMICS_HASH hash.locks = locks; #endif hash.vals = vals; // Where do we put the results from our warp (block)? hash.myBlockOut = &blockOut[nOut * blockIdx.x]; INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << LBLOCK_SIZE_LOG2) + threadIdx.x); // Assert that myStart is not out of bounds! int nFullSteps = nSteps >> LARGE_NBIN_CHECK_INTERVAL_LOG2; bool reduce = false; InitHash(&hash, zero, hashSizelog2); int nSameTot = 0; for (int fstep = 0; fstep < nFullSteps; fstep++) { int stepNum = fstep << LARGE_NBIN_CHECK_INTERVAL_LOG2; histo_largenbin_step<histotype, nMultires, true, true, false,INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); if (reduce) { histo_largenbin_step<histotype, nMultires, true, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); } else { histo_largenbin_step<histotype, nMultires, false, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); } } // Last steps int nstepsleft = nSteps - (nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2); if (nstepsleft > 0) { int stepNum = nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2; if (reduce) histo_largenbin_step<histotype, nMultires, true, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); else histo_largenbin_step<histotype, nMultires, false, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); } // Flush values still in hash //FlushHash(&hash, sumfunObj, hashSizelog2); } #if USE_MEDIUM_PATH // template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histo_kernel_mediumNBins( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int nSteps) { #if __CUDA_ARCH__ >= 120 OUTPUTTYPE* ourOut = &blockOut[nOut * (threadIdx.x % MED_THREAD_DEGEN) * blockIdx.x]; INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << MEDIUM_BLOCK_SIZE_LOG2) + threadIdx.x); bool reduce = false; int nSameTot = 0; for (int step = 0; step < nSteps - 1; step++) { bool check = false; int myKey[nMultires]; OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKey[0], &myOut[0],nMultires); // TODO: magic constant if ((step & 63) == 0) check = true; { int nSame; __shared__ int keys[MEDIUM_BLOCK_SIZE]; __shared__ OUTPUTTYPE rOut[MEDIUM_BLOCK_SIZE]; int warpIdx = threadIdx.x >> 5; int* wkeys = &keys[warpIdx << 5]; OUTPUTTYPE* wOut = &rOut[warpIdx << 5]; bool Iwrite; #define ADD_ONE_RESULT(RESID) \ do { if (RESID < nMultires) { \ if (reduce || check){ \ if (check) Iwrite = reduceToUnique<histotype, true> \ (&myOut[RESID % nMultires], myKey[RESID % nMultires], \ &nSame, sumfunObj, wkeys, wOut); \ else Iwrite = reduceToUnique<histotype, false> \ (&myOut[RESID % nMultires], myKey[RESID % nMultires], NULL, sumfunObj, \ wkeys, wOut); \ if (Iwrite) \ atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \ if (check){ \ nSameTot += nSame; \ checkStrategyFun(&reduce, nSame, nSameTot, step, 0); \ check = false; \ } \ } else { \ if (histotype == histogram_atomic_inc) \ atomicAdd(&ourOut[myKey[RESID % nMultires]], 1); \ else if (histotype == histogram_atomic_add) \ atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \ } } \ } while(0) ADD_ONE_RESULT(0); ADD_ONE_RESULT(1); ADD_ONE_RESULT(2); ADD_ONE_RESULT(3); //#pragma unroll for (int resid = 4; resid < nMultires; resid++) { ADD_ONE_RESULT(resid); } } myStart += MEDIUM_BLOCK_SIZE; } if (myStart < end) { int myKey[nMultires]; OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKey[0], &myOut[0],nMultires); for (int resid = 0; resid < nMultires; resid++) { if (histotype == histogram_atomic_inc) { atomicAdd(&ourOut[myKey[resid]], 1); } else if (histotype == histogram_atomic_add) { atomicAdd(&ourOut[myKey[resid]], myOut[resid]); } } } #endif // __CUDA_ARCH__ } #endif // USE_MEDIUM_PATH static int determineHashSizeLog2(size_t outSize, int* nblocks, hipDeviceProp_t* props) { // TODO: Magic hat-constant 500 reserved for inputs, how to compute? int sharedTot = (props->sharedMemPerBlock - 500) /* / LBLOCK_WARPS*/; //int sharedTot = 32000; // How many blocks of 32 keys could we have? //int nb32Max = sharedTot / (32 * outSize); // But ideally we should run at least 4 active blocks per SM, // How can we balance this? Well - with very low ablock-values (a), // we perform bad, but after 4, adding more // will help less and less, whereas adding more to the hash always helps! #if USE_ATOMICS_HASH outSize += sizeof(int); #else outSize += sizeof(int); #endif int naMax = sharedTot / (32 * outSize); while (naMax > numActiveUpperLimit) naMax >>= 1; int nb32 = sharedTot / (32 * outSize * naMax); // Now we have "number of pieces", use it to compute some nice power-of-two hash-size int hashSize = nb32 * 32; unsigned int res = 0; if (hashSize >= 1<<16) { hashSize >>= 16; res += 16; } if (hashSize >= 1<< 8) { hashSize >>= 8; res += 8; } if (hashSize >= 1<< 4) { hashSize >>= 4; res += 4; } if (hashSize >= 1<< 2) { hashSize >>= 2; res += 2; } if (hashSize >= 1<< 1) { res += 1; } // Now res holds the log2 of hash size => n active blocksMEDIUM_BLOCK_SIZE_LOG2 = sharedTot / (outSize << res); *nblocks = (sharedTot / (outSize << res)) * props->multiProcessorCount; if (*nblocks > props->multiProcessorCount * 8) *nblocks = props->multiProcessorCount * 8; return res; } template <typename OUTPUTTYPE> __global__ void initKernel(OUTPUTTYPE* tmpOut, OUTPUTTYPE zeroVal, int tmpOutSize, int steps) { int idx = blockIdx.x * blockDim.x * steps + threadIdx.x; for (int step = 0; step < steps; step++) { if (idx < tmpOutSize) tmpOut[idx] = zeroVal; idx += blockDim.x; } } template <histogram_type histotype, typename OUTPUTTYPE> static int getLargeBinTmpbufsize(int nOut, hipDeviceProp_t* props, int cuda_arch) { int nblocks; int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props); int arrLen = nblocks; #if USE_MEDIUM_PATH if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add)) arrLen *= MED_THREAD_DEGEN; #endif return (arrLen + 1) * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callHistogramKernelLargeNBins( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, hipDeviceProp_t* props, int cuda_arch, hipStream_t stream, int* getTmpBufSize, void* tmpBuffer, bool outInDev) { int nblocks; int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props); INDEXT size = end - start; // Check if there is something to do actually... if (end <= start) { if (getTmpBufSize) getTmpBufSize = 0; return; } dim3 block = LBLOCK_SIZE; dim3 grid = nblocks; int arrLen = nblocks; #if USE_MEDIUM_PATH if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add)) arrLen *= MED_THREAD_DEGEN; #endif INDEXT nSteps = size / (INDEXT)( LBLOCK_SIZE * nblocks); OUTPUTTYPE* tmpOut; //int n = nblocks; if (getTmpBufSize) { *getTmpBufSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE); return; } if (tmpBuffer){ tmpOut = (OUTPUTTYPE*)tmpBuffer; } else { size_t allocSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE); hipMalloc((void**)&tmpOut, allocSize); } //printf("Using hash-based histogram: hashsize = %d, nblocksToT = %d\n", (1 << hashSizelog2), nblocks); #if USE_ATOMICS_HASH int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int)); #else int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int) * 2); #endif // The shared memory here is needed for the reduction code (ie. reduce to unique) // TODO: new hash-code could probably reuse the memory reserved for the hash-table, // it would just need to reinit the keys to -1 after use - think about it. if (cuda_arch >= 200 && histotype == histogram_atomic_inc) { if (hashSizelog2 < LBLOCK_SIZE_LOG2) extSharedNeeded += (sizeof(int) << (LBLOCK_SIZE_LOG2 - hashSizelog2)); } else { if (hashSizelog2 < LBLOCK_SIZE_LOG2) extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << (LBLOCK_SIZE_LOG2 - hashSizelog2)); } //printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps); { #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((arrLen * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps <<= 1; if (nsteps * initgrid.x * IBLOCK_SIZE < arrLen * nOut) initgrid.x++; } hipLaunchKernelGGL(( initKernel), dim3(initgrid),dim3(initblock),0,stream, tmpOut, zero, arrLen * nOut, nsteps); } //int medExtShared = nOut; //const int shLimit = 0; //const int shLimit = 0;//16000 / 2; // Codepath below is a lot faster for random bins, a tad faster for real use-case // and a lot slower for degenerate key-distributions #if USE_MEDIUM_PATH if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add)) { const dim3 block = MEDIUM_BLOCK_SIZE; dim3 grid = nblocks; INDEXT nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks); INDEXT nFullSteps = 1; if (nSteps <= 0) { nFullSteps = 0; nblocks = (size >> MEDIUM_BLOCK_SIZE_LOG2); if ((nblocks << MEDIUM_BLOCK_SIZE_LOG2) < size) nblocks++; } if (nSteps > MAX_NLHSTEPS) { nFullSteps = size / ( MEDIUM_BLOCK_SIZE * nblocks * MAX_NLHSTEPS); nSteps = MAX_NLHSTEPS; } for (INDEXT step = 0; step < nFullSteps; step++) { hipLaunchKernelGGL(( histo_kernel_mediumNBins<histotype, nMultires>), dim3(grid), dim3(block), 0, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps); start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps); } size = end - start; nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks); if (nSteps > 0) { hipLaunchKernelGGL(( histo_kernel_mediumNBins<histotype, nMultires>), dim3(grid), dim3(block), 0, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps); start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps); size = end - start; } if (size > 0) { int ntblocks = size / ( MEDIUM_BLOCK_SIZE ); if (ntblocks * MEDIUM_BLOCK_SIZE < size) ntblocks++; grid.x = ntblocks; hipLaunchKernelGGL(( histo_kernel_mediumNBins<histotype, nMultires>), dim3(grid), dim3(block), 0, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1); } } else #endif // USE_MEDIUM_PATH { INDEXT nFullSteps = 1; if (nSteps <= 0) { nFullSteps = 0; nblocks = (size >> LBLOCK_SIZE_LOG2); if ((nblocks << LBLOCK_SIZE_LOG2) < size) nblocks++; } if (nSteps > MAX_NLHSTEPS) { nFullSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks * MAX_NLHSTEPS); nSteps = MAX_NLHSTEPS; } for (int step = 0; step < nFullSteps; step++) { hipLaunchKernelGGL(( histo_kernel_largeNBins<histotype, nMultires>), dim3(grid), dim3(block), extSharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2); start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps); } size = end - start; nSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks); if (nSteps > 0) { hipLaunchKernelGGL(( histo_kernel_largeNBins<histotype, nMultires>), dim3(grid), dim3(block), extSharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2); start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps); size = end - start; } if (size > 0) { int ntblocks = size / ( LBLOCK_SIZE ); if (ntblocks * LBLOCK_SIZE < size) ntblocks++; grid.x = ntblocks; hipLaunchKernelGGL(( histo_kernel_largeNBins<histotype, nMultires>), dim3(grid), dim3(block), extSharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1, hashSizelog2); } } #if H_ERROR_CHECKS hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror = %s\n", hipGetErrorString( error )); #endif // OK - so now tmpOut contains our gold - we just need to dig it out now enum hipMemcpyKind fromOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyHostToDevice; enum hipMemcpyKind toOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost; if (stream != 0) hipMemcpyAsync(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream); else hipMemcpy(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut); grid.x = nOut; //grid.x = nOut >> LBLOCK_SIZE_LOG2; //if ((grid.x << LBLOCK_SIZE_LOG2) < nOut) grid.x++; block.x = GATHER_BLOCK_SIZE; hipLaunchKernelGGL(( gatherKernel), dim3(grid), dim3(block), 0, stream, sumfunObj, tmpOut, nOut, arrLen /** LBLOCK_WARPS*/, zero); // TODO: Async copy here also??? if (outInDev && stream != 0) hipMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream); else hipMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut); // CPU-code path for debugging here: /* { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(nblocks * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); hipMemcpy(h_tmp, tmpOut, nblocks*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < nblocks; i++) { res = sumfunObj(res, h_tmp[i * nOut + resIdx]); } out[resIdx] = sumfunObj(res, out[resIdx]); } free(h_tmp); } */ if (!tmpBuffer) hipFree(tmpOut); } static int determineNKeySetsLog2(size_t size_out, int nOut, hipDeviceProp_t* props) { // 32 threads per block, one block shares one binset // Go for 2x occupancy = 64 active threads per block // Hence if we have NBinSets, then we need tot_size x nOut x NBinSets x 2 bytes of shared // On sm_20 we have 48 000 bytes and on sm_1x 16 000 // Hence nbinsets = SharedMem / (2 * tot_size * nOut) // For example sm_20, 16 int bins: // nbinsets = 48000 / 2 * 4 * 16 = 48000 / 2*64 = 48000 / 128 = 375... // More than enough, but is it enough active threadblocks?? int nBytesShared = 16000; size_t sizetot = size_out + sizeof(int); int nBinSets = nBytesShared / (sizetot * 2 * nOut); // NOTE: Disabling for now - advantages seem nonexistent // if (nBinSets >= 32) return 5; // if (nBinSets >= 16) return 4; // if (nBinSets >= 8) return 3; // if (nBinSets >= 4) return 2; // if (nBinSets >= 2) return 1; if (nBinSets >= 1) return 0; return -1; } #if __CUDA_ARCH__ >= 200 template <int nMultires> static inline __device__ bool checkForReduction (int* myKeys, int* rkeys) { // Idea - if there is a large number of degenerate entries then we don't need to check them all for degeneracy // TODO: Implement the wonderful idea //return ((threadIdx.x >> 5) & 3) < 3; #if 1 bool myKeyDegenerate; //TAKE_WARP_MUTEX(0); rkeys[threadIdx.x & 31] = myKeys[0]; // Check two thirds myKeyDegenerate = (myKeys[0] == (rkeys[(threadIdx.x + 1) & 31])) /*|| (myKeys[0] == (rkeys[(threadIdx.x + 8) & 31]))*/; //GIVE_WARP_MUTEX(0); unsigned int degenMask = __ballot(myKeyDegenerate); // Estimate number of degenerate keys - if all are degenerate, the estimate is accurate int nDegen = __popc(degenMask); if (nDegen > HISTOGRAM_DEGEN_LIMIT) return true; else return false; #endif } #endif template <histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histogramKernel_stepImpl( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT end, OUTPUTTYPE zero, int nOut, INDEXT startidx, OUTPUTTYPE* bins, int* locks, OUTPUTTYPE* rvals, int* rkeys, int* doReduce, bool checkReduce, int* warpmutex) { int myKeys[nMultires]; OUTPUTTYPE vals[nMultires]; bool doWrite = true; if (laststeps){ if (startidx < end) { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } else { doWrite = false; #pragma unroll for (int r = 0; r < nMultires; r++){ vals[r] = zero; myKeys[r] = -1; } } } else { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } // See keyIndex-reasoning above int binSet = (threadIdx.x & ((1 << nBinSetslog2) - 1)); #if __CUDA_ARCH__ >= 200 /* if (laststeps){ *doReduce = false; } else*/ { if (checkReduce){ *doReduce = checkForReduction<nMultires>(myKeys, rkeys); if (histotype == histogram_generic || histotype == histogram_atomic_add){ __shared__ int tmp; tmp = 0; __syncthreads(); if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1); __syncthreads(); if (tmp > HBLOCK_SIZE / 2) *doReduce = true; else *doReduce = false; } //if (laststeps) *doReduce = false; /* __syncthreads(); bool tmpred = checkForReduction<nMultires>(myKeys, rkeys); if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred); __syncthreads();*/ } } #endif // TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?) // TODO: How to avoid bank-conflicts? Any way to avoid? #if __CUDA_ARCH__ >= 200 #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \ if (*doReduce){\ if (histotype == histogram_generic || histotype == histogram_atomic_add){\ bool Iwrite;\ TAKE_WARP_MUTEX(0);\ Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ if (Iwrite && doWrite) bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\ /*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\ else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\ GIVE_WARP_MUTEX(0);\ } else { \ bool Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex); \ }\ } else {\ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ }\ } } } while (0) #else #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ }\ } } while (0) #endif ONE_HS_STEP(0); ONE_HS_STEP(1); ONE_HS_STEP(2); ONE_HS_STEP(3); //#pragma unroll for (int resid = 4; resid < nMultires; resid++){ ONE_HS_STEP(resid); } #undef ONE_HS_STEP } template <int nBinSetslog2, histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histogramKernel_sharedbins_new( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int outStride, int nSteps) { extern __shared__ int cudahistogram_binstmp[]; OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp); int* locks = (int*)&bins[(nOut << nBinSetslog2)]; int* rkeys = NULL; OUTPUTTYPE* rvals = NULL; //__shared__ int warpmutex; //INIT_WARP_MUTEX2(warpmutex); #if __CUDA_ARCH__ >= 200 int warpId = threadIdx.x >> 5; if (histotype == histogram_generic) rkeys = &locks[(nOut << nBinSetslog2)]; else rkeys = locks; rvals = (OUTPUTTYPE*)&rkeys[32]; if (histotype == histogram_atomic_inc){ rkeys = &rkeys[warpId << 5]; //rvals = &rvals[warpId << 5]; } #endif const int nBinSets = 1 << nBinSetslog2; // Reset all bins to zero... for (int j = 0; j < ((nOut << nBinSetslog2) >> HBLOCK_SIZE_LOG2) + 1; j++) { int bin = (j << HBLOCK_SIZE_LOG2) + threadIdx.x; if (bin < (nOut << nBinSetslog2)){ bins[bin] = zero; } } #if HBLOCK_SIZE > 32 __syncthreads(); #endif int outidx = blockIdx.x; INDEXT startidx = (INDEXT)((outidx * nSteps) * HBLOCK_SIZE + start + threadIdx.x); /*__shared__*/ int doReduce; // local var - TODO: Is this safe?? doReduce = 0; #define MED_UNROLL_LOG2 2 #define MED_UNROLL (1 << MED_UNROLL_LOG2) int step; for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++) { //#pragma unroll //for (int substep = 0; substep < MED_UNROLL; substep++){ histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex); startidx += HBLOCK_SIZE; histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex); startidx += HBLOCK_SIZE; histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex); startidx += HBLOCK_SIZE; histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex); startidx += HBLOCK_SIZE; //} } step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2; for (; step < nSteps ; step++) { histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex); startidx += HBLOCK_SIZE; } #undef MED_UNROLL #undef MED_UNROLL_LOG2 #if HBLOCK_SIZE > 32 __syncthreads(); #endif // Finally put together the bins for (int j = 0; j < (nOut >> HBLOCK_SIZE_LOG2) + 1; j++) { int key = (j << HBLOCK_SIZE_LOG2) + threadIdx.x; if (key < nOut) { OUTPUTTYPE res = blockOut[key * outStride + outidx]; //int tmpBin = bin; #pragma unroll for (int k = 0; k < nBinSets; k++) { //tmpBin += nOut; res = sumfunObj(res, bins[(key << nBinSetslog2) + k]); } //printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin); blockOut[key * outStride + outidx] = res; } } } template <histogram_type histotype, typename OUTPUTTYPE> static int getMediumHistoTmpbufSize(int nOut, hipDeviceProp_t* props) { int nblocks = props->multiProcessorCount * 8; // NOTE: The other half is used by multireduce... return 2 * nblocks * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callHistogramKernelImpl( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, hipDeviceProp_t* props, hipStream_t stream, size_t* getTmpBufSize, void* tmpBuffer, bool outInDev, int cuda_arch) { INDEXT size = end - start; // Check if there is something to do actually... if (end <= start) { if (getTmpBufSize) *getTmpBufSize = 0; return; } int nblocks = props->multiProcessorCount * 8; // Assert that our grid is not too large! //MY_ASSERT(n < 65536 && "Sorry - currently we can't do such a big problems with histogram-kernel..."); // One entry for each output for each thread-block: //OUTPUTTYPE* tmpOut = (OUTPUTTYPE*)parallel_alloc(MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); OUTPUTTYPE* tmpOut; if (getTmpBufSize) { // NOTE: The other half is used by multireduce... *getTmpBufSize = 2 * nblocks * nOut * sizeof(OUTPUTTYPE); return; } int nsteps = size / ( nblocks * HBLOCK_SIZE ); if (nsteps * nblocks * HBLOCK_SIZE < size) nsteps++; if (nsteps > MAX_NHSTEPS) nsteps = MAX_NHSTEPS; if (tmpBuffer) { char* tmpptr = (char*)tmpBuffer; tmpOut = (OUTPUTTYPE*)tmpBuffer; tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)]; } else { hipMalloc((void**)&tmpOut, nblocks * nOut * sizeof(OUTPUTTYPE)); } /* For block size other that power of two: const dim3 grid = size / BLOCK_SIZE + ( size % BLOCK_SIZE == 0 ? 0 : 1 ); */ //MY_ASSERT(size > 0); //hipMemsetAsync(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE), CURRENT_STREAM() ); //hipMemset(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE) ); { #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps <<= 1; if (nsteps * initgrid.x * IBLOCK_SIZE < nblocks * nOut) initgrid.x++; } hipLaunchKernelGGL(( initKernel), dim3(initgrid),dim3(initblock),0,stream, tmpOut, zero, nblocks * nOut, nsteps); #undef IBLOCK_SIZE_LOG2 #undef IBLOCK_SIZE } int nKeysetslog2 = determineNKeySetsLog2(sizeof(OUTPUTTYPE), nOut, props); if (nKeysetslog2 < 0) nKeysetslog2 = 0; int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE)); // bins if (histotype == histogram_generic || cuda_arch < 130) extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int)); // locks if (cuda_arch >= 200) { // Reduction stuff: if (histotype == histogram_generic || histotype == histogram_atomic_add) { extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values } else { extSharedNeeded += (sizeof(int) << HBLOCK_SIZE_LOG2); // keys per warp of one thread } } /*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HBLOCK_SIZE); if (nOut < HBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HBLOCK_SIZE - nOut); if (cuda_arch < 130) extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/ //printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps); int nOrigBlocks = nblocks; INDEXT myStart = start; while(myStart < end) { bool lastStep = false; if (myStart + nsteps * nblocks * HBLOCK_SIZE > end) { size = end - myStart; nsteps = (size) / (nblocks * HBLOCK_SIZE); if (nsteps < 1) { lastStep = true; nsteps = 1; nblocks = size / HBLOCK_SIZE; if (nblocks * HBLOCK_SIZE < size) nblocks++; } } dim3 grid = nblocks; dim3 block = HBLOCK_SIZE; switch (nKeysetslog2) { case 0: if (lastStep) hipLaunchKernelGGL(( histogramKernel_sharedbins_new<0, histotype, nMultires, true>), dim3(grid), dim3(block), extSharedNeeded, stream, input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps); else hipLaunchKernelGGL(( histogramKernel_sharedbins_new<0, histotype, nMultires, false>), dim3(grid), dim3(block), extSharedNeeded, stream, input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps); break; /* case 1: histogramKernel_sharedbins_new<1, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 2: histogramKernel_sharedbins_new<2, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 3: histogramKernel_sharedbins_new<3, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 4: histogramKernel_sharedbins_new<4, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 5: histogramKernel_sharedbins_new<5, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break;*/ case -1: // TODO: Error? //assert(0); // "Sorry - not implemented yet" break; } myStart += nsteps * nblocks * HBLOCK_SIZE; } #if H_ERROR_CHECKS hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror = %s\n", hipGetErrorString( error )); #endif // OK - so now tmpOut contains our gold - we just need to dig it out now callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev); // Below same as host-code #if 0 { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); hipMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < n; i++) { res = sumfunObj(res, h_tmp[i + resIdx * n]); } out[resIdx] = res; } free(h_tmp); } #endif //parallel_free(tmpOut, MemType_DEV); if (!tmpBuffer) hipFree(tmpOut); } template <typename OUTTYPE> static bool binsFitIntoShared(int nOut, OUTTYPE zero, hipDeviceProp_t* props, int cuda_arch) { // Assume here we can only use 16kb of shared in total per SM // Also lets take minimal of 2 threads per functional unit active, in // order to be able to hide at least some latencies - for Fermi this means 32 * 2 = 64 // of active threads needed in total (Note: This is minimal and will hurt perf). // Also we run blocks of 32 threads and each block needs its own bin - therefore // we need in total 2 full bin-sets per SM plus 32 bins for the one for the working part // of the algorithm. // Due to these considerations we infer that we can fit it nicely in, if // (4 binsets x Nbins/binset + 32) x sizeof(OUTYPE) < 16kib - let's take here 16kb to have some room // for required parameters // Example: 64 doubles: 8bytes per number double => (4 * 64 + 32) * 8bytes = 288 * 8 bytes = 2304 bytes -> Easy // How many bins of doubles can we do with these limits? // ( 4 * x + 32) * 8bytes = 16000 bytes <=> 4x = 2000 - 32 => x = 2000/4 - 32/4 = 500 - 8 = 492 bins. // TODO: A possibly faster version of this would be to share one set of bins over as many warps as possible // for example, if we would use 512 threads = 16 warps, then this would be fine for hiding probably all major latencies // and we could get away with just one binset on SM: // ( x + 512 ) * 8bytes = 16000 bytes <=> x = 2000 - 512 = 1488 bins! With better latency-hiding // On the other hand this requires atomic operations on the shared memory, which could be somewhat slower on // arbitrary types, but all in all, this would seem to provide a better route. At least worth investigating... int shlimit = props->sharedMemPerBlock - 300; int limit = shlimit; // TODO: Pessimistic limit int need = (sizeof(zero) + sizeof(int)) * nOut; if (cuda_arch >= 200) need += HBLOCK_SIZE * sizeof(int) + 32 * sizeof(zero); if (need <= limit) return true; return false; } template <bool subHisto, histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histogramKernel_stepImplMulti( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT end, OUTPUTTYPE zero, int subsize, INDEXT startidx, OUTPUTTYPE* bins, int* locks, OUTPUTTYPE* rvals, int* rkeys, int* doReduce, bool checkReduce, int* warpmutex, int binOffset) { int myKeys[nMultires]; OUTPUTTYPE vals[nMultires]; bool doWrite = true; if (laststeps){ if (startidx < end) { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } else { doWrite = false; #pragma unroll for (int r = 0; r < nMultires; r++){ vals[r] = zero; myKeys[r] = -1; } } } else { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } #if __CUDA_ARCH__ >= 200 /* if (laststeps){ *doReduce = false; } else*/ { if (checkReduce){ *doReduce = checkForReduction<nMultires>(myKeys, rkeys); if (histotype == histogram_generic || histotype == histogram_atomic_add){ __shared__ int tmp; tmp = 0; __syncthreads(); if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1); __syncthreads(); if (tmp > HMBLOCK_SIZE / 2) *doReduce = true; else *doReduce = false; } //if (laststeps) *doReduce = false; /* __syncthreads(); bool tmpred = checkForReduction<nMultires>(myKeys, rkeys); if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred); __syncthreads();*/ } } #endif // TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?) // TODO: How to avoid bank-conflicts? Any way to avoid? #if __CUDA_ARCH__ >= 200 #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \ bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\ if (!Iwrite) keyIndex = 0; \ if (*doReduce){\ if (histotype == histogram_generic || histotype == histogram_atomic_add){\ TAKE_WARP_MUTEX(0);\ bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ if (Iwrite && Iwrite2) \ bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\ /*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\ else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\ GIVE_WARP_MUTEX(0);\ } else { \ bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && Iwrite2, warpmutex); \ }\ } else {\ if (!Iwrite) keyIndex = 0;\ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ }\ } } } while (0) #else #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \ bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\ if (!Iwrite) keyIndex = 0;\ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ }\ } } while (0) #endif ONE_HS_STEP(0); ONE_HS_STEP(1); ONE_HS_STEP(2); ONE_HS_STEP(3); //#pragma unroll for (int resid = 4; resid < nMultires; resid++){ ONE_HS_STEP(resid); } #undef ONE_HS_STEP } template <histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histogramKernel_multipass( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int outStride, int nSteps, int subsize) { extern __shared__ int cudahistogram_binstmp[]; OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp); int* locks = (int*)&bins[subsize]; int* rkeys = NULL; OUTPUTTYPE* rvals = NULL; //__shared__ int warpmutex; //INIT_WARP_MUTEX2(warpmutex); #if __CUDA_ARCH__ >= 200 int warpId = threadIdx.x >> 5; if (histotype == histogram_generic) rkeys = &locks[subsize]; else rkeys = locks; rvals = (OUTPUTTYPE*)&rkeys[32]; if (histotype == histogram_atomic_inc){ rkeys = &rkeys[warpId << 5]; //rvals = &rvals[warpId << 5]; } #endif // Reset all bins to zero... for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++) { int bin = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x; if (bin < subsize){ bins[bin] = zero; } } #if HMBLOCK_SIZE > 32 __syncthreads(); #endif int outidx = blockIdx.y; int binOffset = blockIdx.x * subsize; INDEXT startidx = (INDEXT)((outidx * nSteps) * HMBLOCK_SIZE + start + threadIdx.x); int doReduce; // local var - TODO: Is this safe?? doReduce = 0; #define MED_UNROLL_LOG2 2 #define MED_UNROLL (1 << MED_UNROLL_LOG2) int step; for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++) { histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; } step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2; for (; step < nSteps ; step++) { histogramKernel_stepImplMulti<true, histotype, 0, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; } #undef MED_UNROLL #undef MED_UNROLL_LOG2 #if HMBLOCK_SIZE > 32 __syncthreads(); #endif // Finally put together the bins for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++) { int key = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x; if (key < subsize) { OUTPUTTYPE res = blockOut[(key + binOffset) * outStride + outidx]; //int tmpBin = bin; res = sumfunObj(res, bins[key]); //printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin); blockOut[(key + binOffset) * outStride + outidx] = res; } } } static int determineSubHistoSize(int nOut, size_t outsize, histogram_type histotype, int cuda_arch, hipDeviceProp_t* props) { int shlimit = props->sharedMemPerBlock - 300; int neededPerKey = outsize; if (histotype == histogram_generic || cuda_arch < 130) neededPerKey += (sizeof(int)); // locks int neededConst = 0; if (cuda_arch >= 200) { // Reduction stuff: if (histotype == histogram_generic || histotype == histogram_atomic_add) { neededConst += (outsize + sizeof(int)) << 5; // reduction values } else { neededConst += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread } } int result = (shlimit - neededConst) / (2*neededPerKey); int res = 0; if (result >= 1<<16) { result >>= 16; res += 16; } if (result >= 1<< 8) { result >>= 8; res += 8; } if (result >= 1<< 4) { result >>= 4; res += 4; } if (result >= 1<< 2) { result >>= 2; res += 2; } if (result >= 1<< 1) { res += 1; } return (1 << res); } template <histogram_type histotype, typename OUTPUTTYPE> static int getMultipassBufSize(int nOut, hipDeviceProp_t* props, int cuda_arch) { int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props); int nDegenBlocks = nOut / subsize; if (subsize * nDegenBlocks < nOut) nDegenBlocks++; int nblocks = props->multiProcessorCount; if (nDegenBlocks < 8) nblocks = props->multiProcessorCount * 8 / nDegenBlocks; //int nblocks = props->multiProcessorCount * 8; // NOTE: The other half is used by multireduce... //printf("getMultipassBufSize(%d) = %d\n", nOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE)); return 2 * nblocks * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callHistogramKernelMultiPass( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, hipDeviceProp_t* props, hipStream_t stream, void* tmpBuffer, bool outInDev, int cuda_arch) { INDEXT size = end - start; if (end <= start) return; //int debugs = 0; int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props); int nDegenBlocks = nOut / subsize; if (subsize * nDegenBlocks < nOut) nDegenBlocks++; int nblocks = props->multiProcessorCount; if (nDegenBlocks < 8) nblocks = props->multiProcessorCount * 8 / nDegenBlocks; OUTPUTTYPE* tmpOut; int nsteps = size / ( nblocks * HMBLOCK_SIZE ); if (nsteps * nblocks * HMBLOCK_SIZE < size) nsteps++; if (nsteps > MAX_MULTISTEPS) nsteps = MAX_MULTISTEPS; //printf(" <debugstep = %d> ", debugs++); bool userBuffer = false; if (tmpBuffer) { char* tmpptr = (char*)tmpBuffer; tmpOut = (OUTPUTTYPE*)tmpBuffer; tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)]; userBuffer = true; //printf("tmpBuffer = &tmpptr[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE)); } else { hipMalloc((void**)&tmpOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE)); //printf("tmpOut = malloc(%d)\n", 2 * nblocks * nOut * sizeof(OUTPUTTYPE)); //tmpBuffer = (void*)&tmpOut[nblocks * nOut * sizeof(OUTPUTTYPE)]; //printf("tmpBuffer = &tmpOut[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE)); } #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps2 = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps2 <<= 1; if (nsteps2 * initgrid.x * IBLOCK_SIZE < nblocks * nOut) initgrid.x++; } hipLaunchKernelGGL(( initKernel), dim3(initgrid),dim3(initblock),0,stream, tmpOut, zero, nblocks * nOut, nsteps2); #undef IBLOCK_SIZE_LOG2 #undef IBLOCK_SIZE int extSharedNeeded = subsize * (sizeof(OUTPUTTYPE)); // bins if (histotype == histogram_generic || cuda_arch < 130) extSharedNeeded += subsize * (sizeof(int)); // locks if (cuda_arch >= 200) { // Reduction stuff: if (histotype == histogram_generic || histotype == histogram_atomic_add) { extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values } else { extSharedNeeded += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread } } //printf(" <debugstep(init) = %d> ", debugs++); /*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HMBLOCK_SIZE); if (nOut < HMBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HMBLOCK_SIZE - nOut); if (cuda_arch < 130) extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/ //printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps); int nOrigBlocks = nblocks; INDEXT myStart = start; while(myStart < end) { bool lastStep = false; if (myStart + nsteps * nblocks * HMBLOCK_SIZE > end) { size = end - myStart; nsteps = (size) / (nblocks * HMBLOCK_SIZE); if (nsteps < 1) { lastStep = true; nsteps = 1; nblocks = size / HMBLOCK_SIZE; if (nblocks * HMBLOCK_SIZE < size) nblocks++; } } dim3 grid; grid.y = nblocks; grid.x = nDegenBlocks; dim3 block = HMBLOCK_SIZE; //printf(" <debugstep(main) = %d> ", debugs++); if (lastStep) hipLaunchKernelGGL(( histogramKernel_multipass<histotype, nMultires, true>), dim3(grid), dim3(block), extSharedNeeded, stream, input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize); else hipLaunchKernelGGL(( histogramKernel_multipass<histotype, nMultires, false>), dim3(grid), dim3(block), extSharedNeeded, stream, input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize); myStart += nsteps * nblocks * HMBLOCK_SIZE; } #if H_ERROR_CHECKS hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror = %s\n", hipGetErrorString( error )); #endif // OK - so now tmpOut contains our gold - we just need to dig it out now //printf(" <debugstep(out) = %d> ", debugs++); //printf("callMultiReduce(%d, %d,...)\n", nOrigBlocks, nOut); callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev); //printf(" <debugstep(multireduce) = %d> ", debugs++); #if H_ERROR_CHECKS error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror(reduce) = %s\n", hipGetErrorString( error )); #endif // Below same as host-code #if 0 { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); hipMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < n; i++) { res = sumfunObj(res, h_tmp[i + resIdx * n]); } out[resIdx] = res; } free(h_tmp); } #endif //parallel_free(tmpOut, MemType_DEV); if (!userBuffer) hipFree(tmpOut); } template <bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histoKernel_smallBinStep( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT myStart, INDEXT end, OUTPUTTYPE* mySHBins) { int myKeys[nMultires]; if (lastSteps) { if (myStart < end) { OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2; mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]); } } } else { OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2; mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]); } } } template <bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histoKernel_smallBin( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int maxblocks, int nSteps) { // Take care with extern - In order to have two instances of this template the // type of the extern variables cannot change // (ie. cannot use "extern __shared__ OUTPUTTYPE bins[]") extern __shared__ int cudahistogram_allbinstmp[]; OUTPUTTYPE* allbins = (OUTPUTTYPE*)&(*cudahistogram_allbinstmp); OUTPUTTYPE* mySHBins = &allbins[threadIdx.x]; OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x]; INDEXT myStart = start + (INDEXT)((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + (INDEXT)threadIdx.x; for (int bin = 0; bin < nOut /*- nLocVars*/; bin++) mySHBins[bin << SMALL_BLOCK_SIZE_LOG2] = zero; // Run loops - unroll 8 steps manually int doNSteps = (nSteps) >> 3; for (int step = 0; step < doNSteps; step++) { histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 2*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 3*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 4*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 5*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 6*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 7*SMALL_BLOCK_SIZE, end, mySHBins); myStart += 8*SMALL_BLOCK_SIZE; } int nStepsLeft = (nSteps) - (doNSteps << 3); for (int step = 0; step < nStepsLeft; step++) { histoKernel_smallBinStep<true, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins); myStart += SMALL_BLOCK_SIZE; } // In the end combine results: #if SMALL_BLOCK_SIZE > 32 __syncthreads(); #endif // Do first shared stuff: int keyIndex = threadIdx.x; while (keyIndex < nOut) { OUTPUTTYPE* binResults = &allbins[keyIndex << SMALL_BLOCK_SIZE_LOG2]; OUTPUTTYPE result = ourOut[keyIndex]; for (int tidx = 0; tidx < SMALL_BLOCK_SIZE; tidx++){ result = sumfunObj(result, *binResults++); } ourOut[keyIndex] = result; keyIndex += SMALL_BLOCK_SIZE; } } static inline __device__ int resultToInt(int resultin){ return resultin; } static inline __device__ int resultToInt(long resultin){ return (int)resultin; } static inline __device__ int resultToInt(long long resultin){ return (int)resultin; } static inline __device__ int resultToInt(unsigned int resultin){ return (int)resultin; } static inline __device__ int resultToInt(unsigned long resultin){ return (int)resultin; } static inline __device__ int resultToInt(unsigned long long resultin){ return (int)resultin; } template<typename OUTPUTTYPE> static inline __device__ int resultToInt(OUTPUTTYPE resultin){ return 0; } static inline __device__ void intToResult(int resultin, int& resultOut){ resultOut = resultin; } static inline __device__ void intToResult(int resultin, long& resultOut){ resultOut = (long)resultin; } static inline __device__ void intToResult(int resultin, unsigned int& resultOut){ resultOut = (unsigned )resultin; } static inline __device__ void intToResult(int resultin, long long& resultOut){ resultOut = (long long)resultin; } static inline __device__ void intToResult(int resultin, unsigned long& resultOut){ resultOut = (unsigned long)resultin; } static inline __device__ void intToResult(int resultin, unsigned long long& resultOut){ resultOut = (unsigned long long)resultin; } template<typename OUTPUTTYPE> static inline __device__ void intToResult(int resultin, OUTPUTTYPE& resultout){ ; } template <bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histoKernel_smallBinByteOneStep( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT myStart, INDEXT end, volatile unsigned char* mySHBins, OUTPUTTYPE zero ) { if (lastSteps) { if (myStart < end) { OUTPUTTYPE myOut[nMultires]; int myKeys[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { // index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid] // Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops... int index = (((myKeys[res]) >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (myKeys[res] & 0x3); mySHBins[index]++; } } } else /*if (myStart < end)*/ { OUTPUTTYPE myOut[nMultires]; int myKeys[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { // index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid] // Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops... int key = myKeys[res]; int index = ((key >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (key & 0x3); mySHBins[index]++; } } } template <histogram_type histotype, bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histoKernel_smallBinByte( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int maxblocks, int nSteps) { // Ok - idea is as follows: When we have blocksize number of threads, thread tid's nth-bin is at: // index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4) // Example: // With 32 threads bins #7, #8 and #9 will be at (7/4=1, 7%4=3, 8/4=2, 8%4=4, 9/4=2, 9%4=1): // Bin #7 Bin #8 Bin #9 ... Bin #63 // tid | index index index ... index // ============== ======== ======== ======== // 0 35 256 257 ... 1923 // 1 39 260 261 ... 1927 // 2 43 264 265 ... 1931 // ... // 31 255 380 381 ... 2047 // Therefore there are blocksize x nOut number of 1-byte bins // Outputs are gathered from time to time to 32-bit bins // // Example2: // With 32 threads 7 bins // Bin #0 Bin #1 Bin #2 Bin #3 Bin #4 Bin #5 Bin #6 // tid | index index index index index index index // ============== ======== ======== ======== ======== ======== ======== // 0 0 1 2 3 128 129 130 // 1 4 5 6 7 132 133 134 // 2 8 9 10 11 136 137 138 // ... // 30 120 121 122 123 248 249 250 // 31 124 125 126 127 252 253 254 // // Example3: // index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4) // With 32 threads 3 bins // Bin #0 Bin #1 Bin #2 // tid | index index index // ============== ======== ======== // 0 0 1 2 // 1 4 5 6 // 2 8 9 10 // ... // 30 120 121 122 // 31 124 125 126 extern __shared__ unsigned char allbins2[]; volatile unsigned char* mySHBins = &allbins2[threadIdx.x << 2]; int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0); OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x]; #if __CUDA_ARCH__ >= 200 OUTPUTTYPE* resultbins = ourOut; #else OUTPUTTYPE* resultbins = (OUTPUTTYPE*)(&allbins2[padNOut << SMALL_BLOCK_SIZE_LOG2]); #endif INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + threadIdx.x); // Run loops //int nFullLoops = nSteps >> 7; // Clear bins { int* tmpSHBins = &((int*)allbins2)[threadIdx.x]; // There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++) tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0; // for (int tmpbin = (bin << 2); tmpbin < padNOut; tmpbin++) // mySHBins[tmpbin] = 0; #if __CUDA_ARCH__ < 200 int binid = threadIdx.x; while(binid < nOut) { resultbins[binid] = zero; binid += SMALL_BLOCK_SIZE; } #endif } #if SMALL_BLOCK_SIZE > 32 __syncthreads(); #endif const int looplim = (255 / nMultires) < 63 ? (255 / nMultires) : 63; for (int stepsRem = nSteps; stepsRem > 0; stepsRem -= looplim) { if (stepsRem > looplim) { #define MANUAL_UNROLL 1 #if MANUAL_UNROLL // Unroll manually // ("unexcpected control flow" construct with #pragma unroll) #define DO_STEP(NUM) do { if ((NUM) < looplim) { \ histoKernel_smallBinByteOneStep<lastSteps, nMultires>( \ input, xformObj, sumfunObj, myStart /*+ (NUM) * SMALL_BLOCK_SIZE*/, end,\ mySHBins, zero); myStart += SMALL_BLOCK_SIZE; \ } } while (0) #define DO_16_STEPS(N0) do { \ DO_STEP(N0 + 0); DO_STEP(N0 + 1); DO_STEP(N0 + 2); DO_STEP(N0 + 3); \ DO_STEP(N0 + 4); DO_STEP(N0 + 5); DO_STEP(N0 + 6); DO_STEP(N0 + 7); \ DO_STEP(N0 + 8); DO_STEP(N0 + 9); DO_STEP(N0 + 10); DO_STEP(N0 + 11); \ DO_STEP(N0 + 12); DO_STEP(N0 + 13); DO_STEP(N0 + 14); DO_STEP(N0 + 15); \ } while (0) DO_16_STEPS(0); DO_16_STEPS(16); DO_16_STEPS(32); DO_16_STEPS(48); #undef DO_16_STEPS #undef DO_STEP //myStart += looplim * SMALL_BLOCK_SIZE; #else for (int stepNum = 0; stepNum < looplim; stepNum++){ histoKernel_smallBinByteOneStep<lastSteps, nMultires>( input, xformObj, sumfunObj, myStart + stepNum * SMALL_BLOCK_SIZE, end, mySHBins, zero); } myStart += looplim * SMALL_BLOCK_SIZE; #endif // MANUAL_UNROLL #undef MANUAL_UNROLL } else { for (int stepNum = 0; stepNum < stepsRem; stepNum++){ histoKernel_smallBinByteOneStep<lastSteps, nMultires>( input, xformObj, sumfunObj, myStart + stepNum * SMALL_BLOCK_SIZE, end, mySHBins, zero); } myStart += looplim * SMALL_BLOCK_SIZE; } // Ok passes done - need to flush results together { # if SMALL_BLOCK_SIZE > 32 __syncthreads(); # endif int binid = threadIdx.x; while(binid < nOut) { // Start from own tid in order to avoid bank-conflicts: // index = tid * 4 + 4 * (bin / 4) * blocksize + (bin % 4) int index = (threadIdx.x << 2) + ((binid >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (binid & 0x3); //int res = (int)allbins2[index]; int res = resultToInt(resultbins[binid]); int ilimit = SMALL_BLOCK_SIZE - threadIdx.x; #pragma unroll for (int i=0; i < SMALL_BLOCK_SIZE; i++) { if (i == ilimit) index -= (SMALL_BLOCK_SIZE << 2); res += allbins2[index]; //allbins2[index] = 0; index += 4; } intToResult(res, resultbins[binid]); binid += SMALL_BLOCK_SIZE; } # if SMALL_BLOCK_SIZE > 32 __syncthreads(); # endif // zero the bins { int* tmpSHBins = &((int*)allbins2)[threadIdx.x]; // There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++) tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0; } # if SMALL_BLOCK_SIZE > 32 __syncthreads(); # endif } } // In the end combine results: #if __CUDA_ARCH__ < 200 #if SMALL_BLOCK_SIZE > 32 __syncthreads(); #endif int keyIndex = threadIdx.x; while (keyIndex < nOut) { OUTPUTTYPE result = ourOut[keyIndex]; //result = result + resultbins[keyIndex]; result = sumfunObj(result, *(OUTPUTTYPE*)(&resultbins[keyIndex])); ourOut[keyIndex] = result; keyIndex += SMALL_BLOCK_SIZE; } #endif } template <histogram_type histotype, typename OUTPUTTYPE> static int getSmallBinBufSize(int nOut, hipDeviceProp_t* props) { int maxblocks = props->multiProcessorCount * 3; maxblocks *= 2; if (nOut < 200) maxblocks *= 4; maxblocks *= 4; return (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callSmallBinHisto( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, hipDeviceProp_t* props, int cuda_arch, hipStream_t stream, int* getTmpBufSize, void* tmpBuffer, bool outInDev) { INDEXT size = end - start; if (end <= start) { if (getTmpBufSize) *getTmpBufSize = 0; return; } int maxblocks = props->multiProcessorCount * 3; if (size > 2*1024*1024 || getTmpBufSize){ maxblocks *= 2; // High occupancy requires lots of blocks if (nOut < 200) maxblocks *= 4; } // TODO: Magic constants.. // With low bin-counts and large problems it seems beneficial to use // more blocks... if (nOut <= 128 || size > 2*4096*4096 || getTmpBufSize) maxblocks *= 4; //printf("maxblocks = %d\n", maxblocks); OUTPUTTYPE* tmpOut; if (getTmpBufSize) { *getTmpBufSize = (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE); return; } if (tmpBuffer) tmpOut = (OUTPUTTYPE*)tmpBuffer; else hipMalloc((void**)&tmpOut, (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE)); #if H_ERROR_CHECKS /*assert(getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, props) >= (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE));*/ #endif // hipMemset(tmpOut, 0, sizeof(OUTPUTTYPE) * nOut * (maxblocks+1)); { #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((maxblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps <<= 1; if (nsteps * initgrid.x * IBLOCK_SIZE < maxblocks * nOut) initgrid.x++; } hipLaunchKernelGGL(( initKernel), dim3(initgrid), dim3(initblock), 0, stream, tmpOut, zero, maxblocks * nOut, nsteps); #undef IBLOCK_SIZE_LOG2 #undef IBLOCK_SIZE } int sharedNeeded; if (histotype == histogram_atomic_inc) { int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0); sharedNeeded = (padNOut << SMALL_BLOCK_SIZE_LOG2); if (cuda_arch < 200) sharedNeeded += (nOut << 2); } else { int typesize = sizeof(OUTPUTTYPE); sharedNeeded = (nOut * typesize) << SMALL_BLOCK_SIZE_LOG2; //printf("Small-bin, generic, Shared needed = %d\n", sharedNeeded); } // Determine number of local variables // SMALL_LOCALLIMIT is total local size available for one block: int nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2); if (nSteps * maxblocks * SMALL_BLOCK_SIZE < size) nSteps++; if (nSteps > MAX_SMALL_STEPS) nSteps = MAX_SMALL_STEPS; int nFullSteps = size / (nSteps * maxblocks * SMALL_BLOCK_SIZE); dim3 grid = maxblocks; dim3 block = SMALL_BLOCK_SIZE; for (int i = 0; i < nFullSteps; i++) { if (histotype == histogram_atomic_inc) hipLaunchKernelGGL(( histoKernel_smallBinByte<histotype, false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); else hipLaunchKernelGGL(( histoKernel_smallBin<false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); start += nSteps * maxblocks * SMALL_BLOCK_SIZE; #if H_ERROR_CHECKS hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror = %s\n", hipGetErrorString( error )); #endif } size = end - start; if (size > 0) { // Do what steps we still can do without checks nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2); if (nSteps > 0) { if (histotype == histogram_atomic_inc) hipLaunchKernelGGL(( histoKernel_smallBinByte<histotype, false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); else hipLaunchKernelGGL(( histoKernel_smallBin<false, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); start += nSteps * maxblocks * SMALL_BLOCK_SIZE; } } size = end - start; if (size > 0) { // Last step here: int nblocks = size >> SMALL_BLOCK_SIZE_LOG2; if (nblocks >= maxblocks) nblocks = maxblocks; else if ((nblocks << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++; nSteps = size / (nblocks << SMALL_BLOCK_SIZE_LOG2); if (nSteps * nblocks * SMALL_BLOCK_SIZE < size) { nSteps++; nblocks = size / (nSteps << SMALL_BLOCK_SIZE_LOG2); if (((nSteps * nblocks) << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++; } grid.x = nblocks; if (histotype == histogram_atomic_inc) hipLaunchKernelGGL(( histoKernel_smallBinByte<histotype, true, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); else hipLaunchKernelGGL(( histoKernel_smallBin<true, nMultires>), dim3(grid), dim3(block), sharedNeeded, stream, input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); } #if H_ERROR_CHECKS hipError_t error = hipGetLastError(); if (error != hipSuccess) printf("Cudaerror = %s\n", hipGetErrorString( error )); #endif // Finally put together the result: enum hipMemcpyKind fromOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyHostToDevice; enum hipMemcpyKind toOut = outInDev ? hipMemcpyDeviceToDevice : hipMemcpyDeviceToHost; if (stream != 0) hipMemcpyAsync(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream); else hipMemcpy(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut); // Let's do so that one block handles one bin grid.x = nOut; //grid.x = nOut >> SMALL_BLOCK_SIZE_LOG2; //if ((grid.x << SMALL_BLOCK_SIZE_LOG2) < nOut) grid.x++; block.x = GATHER_BLOCK_SIZE; hipLaunchKernelGGL(( gatherKernel), dim3(grid), dim3(block), 0, stream, sumfunObj, tmpOut, nOut, maxblocks, zero); // TODO: Use async copy for the results as well? if (outInDev && stream != 0) hipMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream); else hipMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut); #if 0 { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(maxblocks * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); hipMemcpy(h_tmp, tmpOut, maxblocks*nOut*sizeof(OUTPUTTYPE), hipMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < maxblocks; i++) { res = sumfunObj(res, h_tmp[i * nOut + resIdx]); } out[resIdx] = sumfunObj(res, out[resIdx]); } free(h_tmp); } #endif if (!tmpBuffer) hipFree(tmpOut); } template <histogram_type histotype, typename OUTPUTTYPE> static inline bool smallBinLimit(int nOut, OUTPUTTYPE zero, hipDeviceProp_t* props, int cuda_arch) { int shlimit = props->sharedMemPerBlock - 300; int typeSize = sizeof(OUTPUTTYPE); if (histotype == histogram_atomic_inc) if ((((4 * nOut) << 5) + (cuda_arch < 200 ? nOut * 16 : 0)) < shlimit) return true; if (((4 * nOut * typeSize) << 5) < shlimit) return true; return false; } __global__ void detectCudaArchKernel(int* res) { int result; #if __CUDA_ARCH__ >= 210 result = 210; #elif __CUDA_ARCH__ >= 200 result = 200; #elif __CUDA_ARCH__ >= 130 result = 130; #elif __CUDA_ARCH__ >= 120 result = 120; #elif __CUDA_ARCH__ >= 110 result = 110; #else result = 100; #endif if (threadIdx.x == 0) *res = result; } static int DetectCudaArch(void) { // The only way to know from host-code, which device architecture our kernels have been generated // against, is to run a kernel that actually checks it.. :) dim3 grid = 1; //dim3 block = 32; // TODO: Allow static storage so that we can ask just once for the arch??? // NOTE: This function implies synchromization between CPU and GPU - so use static here... static int result = 0; //int result = 0; if (result == 0) { void* tmpBuf; hipMalloc(&tmpBuf, sizeof(int)); hipLaunchKernelGGL(( detectCudaArchKernel), dim3(grid), dim3(grid), 0, 0, (int*)tmpBuf); hipMemcpy(&result, tmpBuf, sizeof(int), hipMemcpyDeviceToHost); hipFree(tmpBuf); //printf("Detected CUDA_ARCH = %d\n", result); } return result; } static bool runMultiPass(int nOut, hipDeviceProp_t* props, int cuda_arch, size_t outsize, histogram_type histotype) { int subsize = determineSubHistoSize(nOut, outsize, histotype, cuda_arch, props); if (cuda_arch < 120){ if (subsize <= 0 || nOut > 2 * subsize) return false; return true; } else { if (subsize <= 0 || nOut > 16 * subsize) return false; return true; } } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> hipError_t callHistogramKernel( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, hipStream_t stream, void* tmpBuffer, bool allowMultiPass) { int devId; hipDeviceProp_t props; hipError_t cudaErr = hipGetDevice( &devId ); if (cudaErr != 0) return cudaErr; //assert(!cudaErr); cudaErr = hipGetDeviceProperties( &props, devId ); if (cudaErr != 0) return cudaErr; int cuda_arch = DetectCudaArch(); enum hipFuncCache_t old; hipDeviceGetCacheConfig(&old); hipDeviceSetCacheConfig(hipFuncCachePreferShared); if (nOut <= 0) return hipSuccess; // 100 Mib printf-limit should be enough... // hipDeviceSetLimit(hipLimitPrintfFifoSize, 1024 * 1024 * 100); if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch)) { callSmallBinHisto<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev); } else if (binsFitIntoShared(nOut, zero, &props, cuda_arch)) { callHistogramKernelImpl<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, NULL, tmpBuffer, outInDev, cuda_arch); } else if (allowMultiPass && runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype)) { callHistogramKernelMultiPass<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, tmpBuffer, outInDev, cuda_arch); } else { callHistogramKernelLargeNBins<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev); } hipDeviceSetCacheConfig(old); return hipSuccess; } template <typename nDimIndexFun, int nDim, typename USERINPUTTYPE, typename INDEXT, typename OUTPUTTYPE> class wrapHistoInput { public: nDimIndexFun userIndexFun; INDEXT starts[nDim]; //int ends[nDim]; INDEXT sizes[nDim]; __host__ __device__ void operator() (USERINPUTTYPE input, INDEXT i, int* result_index, OUTPUTTYPE* results, int nresults) const { int coords[nDim]; int tmpi = i; #pragma unroll for (int d=0; d < nDim; d++) { // Example of how this logic works - imagine a cube of (10,100,1000), and take index 123 456 // newI = 123 456 / 10 = 12 345, offset = 123 456 - 123 450 = 6 (this is our first coordinate!), // newI = 12 345 / 100 = 123, offset = 12 345 - 12 300 = 45 (this is our second coordinate!), // newI = 123 / 1000 = 0, offset = 123 - 0 = 123 (this is our last coordinate!) // Result = [123, 45, 6] INDEXT newI = tmpi / sizes[d]; INDEXT offset = tmpi - newI * sizes[d]; coords[d] = starts[d] + offset; tmpi = newI; } // Now just call wrapped functor with right coordinate values userIndexFun(input, coords, result_index, results, nresults); } }; template <histogram_type histotype, int nMultires, int nDim, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> hipError_t callHistogramKernelNDim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT* starts, INDEXT* ends, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, hipStream_t stream, void* tmpBuffer, bool allowMultiPass) { wrapHistoInput<TRANSFORMFUNTYPE, nDim, INPUTTYPE, INDEXT, OUTPUTTYPE> wrapInput; INDEXT start = 0; INDEXT size = 1; for (int d = 0; d < nDim; d++) { wrapInput.starts[d] = starts[d]; wrapInput.sizes[d] = ends[d] - starts[d]; // Example: starts = [3, 10, 23], sizes = [10, 100, 1000] // start = 3 * 1 = 3, size = 10 // start = 3 + 10 * 10 = 103, size = 10*100 = 1000 // start = 103 + 1000*23 = 23 103, size = 1000*1000 = 1 000 000 start += starts[d] * size; size *= wrapInput.sizes[d]; if (ends[d] <= starts[d]) return hipSuccess; } wrapInput.userIndexFun = xformObj; INDEXT end = start + size; return callHistogramKernel<histotype, nMultires> (input, wrapInput, sumfunObj, start, end, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> hipError_t callHistogramKernel2Dim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT x0, INDEXT x1, INDEXT y0, INDEXT y1, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, hipStream_t stream, void* tmpBuffer, bool allowMultiPass) { INDEXT starts[2] = { x0, y0 }; INDEXT ends[2] = { x1, y1 }; return callHistogramKernelNDim<histotype, nMultires, 2> (input, xformObj, sumfunObj, starts, ends, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass); } struct histogram_defaultXform { __host__ __device__ void operator() (int* input, int i, int* result_index, int* results, int nresults) const { //int idata = input[i]; #pragma unroll for (int resIndex = 0; resIndex < nresults; resIndex++) { *result_index++ = *input++; *results++ = 1; } } }; template <typename OUTPUTTYPE> struct histogram_defaultSum { __host__ __device__ OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const { return i1 + i2; } }; template <typename INPUTTYPE, typename OUTPUTTYPE> struct histogram_dummyXform { __host__ __device__ void operator() (INPUTTYPE* input, int i, int* result_index, OUTPUTTYPE* results, int nresults) const { //int idata = input[i]; int index = i; (void)input; #pragma unroll for (int resIndex = 0; resIndex < nresults; resIndex++) { *result_index++ = index++; *results++ = 1;//*input++; } } }; template <typename OUTPUTTYPE> struct histogram_dummySum { __host__ __device__ OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const { return i1; } }; template <histogram_type histotype, typename OUTPUTTYPE> int getHistogramBufSize(OUTPUTTYPE zero, int nOut) { int result = 0; int devId; hipDeviceProp_t props; hipError_t cudaErr = hipGetDevice( &devId ); if (cudaErr != 0) return -1; //assert(!cudaErr); cudaErr = hipGetDeviceProperties( &props, devId ); if (cudaErr != 0) return -1; int cuda_arch = DetectCudaArch(); if (nOut <= 0) return 0; if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch)) { result = getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, &props); } else if (binsFitIntoShared(nOut, zero, &props, cuda_arch)) { result = getMediumHistoTmpbufSize<histotype, OUTPUTTYPE>(nOut, &props); } else if (runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype)) { result = getMultipassBufSize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch); } else { result = getLargeBinTmpbufsize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch); } return result; } // undef everything #undef H_ERROR_CHECKS #undef HBLOCK_SIZE_LOG2 #undef HBLOCK_SIZE #undef HMBLOCK_SIZE_LOG2 #undef HMBLOCK_SIZE #undef LBLOCK_SIZE_LOG2 #undef LBLOCK_SIZE #undef GATHER_BLOCK_SIZE_LOG2 #undef GATHER_BLOCK_SIZE #undef LBLOCK_WARPS #undef RBLOCK_SIZE #undef RMAXSTEPS #undef NHSTEPSPERKEY #undef MAX_NHSTEPS #undef MAX_MULTISTEPS #undef MAX_NLHSTEPS #undef STRATEGY_CHECK_INTERVAL_LOG2 #undef STRATEGY_CHECK_INTERVAL #undef HASH_COLLISION_STEPS #undef USE_JENKINS_HASH #undef LARGE_NBIN_CHECK_INTERVAL_LOG2 #undef LARGE_NBIN_CHECK_INTERVAL #undef SMALL_BLOCK_SIZE_LOG2 #undef SMALL_BLOCK_SIZE #undef MAX_SMALL_STEPS #undef USE_ATOMICS_HASH #undef USE_BALLOT_HISTOGRAM #undef TAKE_WARP_MUTEX #undef GIVE_WARP_MUTEX #undef FREE_MUTEX_ID #if USE_MEDIUM_PATH #undef MEDIUM_BLOCK_SIZE_LOG2 #undef MEDIUM_BLOCK_SIZE #endif #undef USE_MEDIUM_PATH
5412ddabee4891115aeffaf82c6265107a09b417.cu
#include <stdint.h> #include <stdlib.h> #include <string.h> #include <cutil.h> #include "util.h" #include "ref_2dhisto.h" #define H_ERROR_CHECKS 0 #if H_ERROR_CHECKS #include <assert.h> #include <stdio.h> #endif #define HBLOCK_SIZE_LOG2 7 #define HBLOCK_SIZE (1 << HBLOCK_SIZE_LOG2) // = 32 #define HMBLOCK_SIZE_LOG2 8 #define HMBLOCK_SIZE (1 << HMBLOCK_SIZE_LOG2) // = 32 #define LBLOCK_SIZE_LOG2 5 #define LBLOCK_SIZE (1 << LBLOCK_SIZE_LOG2) // = 256 #define LBLOCK_WARPS (LBLOCK_SIZE >> 5) #define USE_MEDIUM_PATH 1 #if USE_MEDIUM_PATH // For now only MEDIUM_BLOCK_SIZE_LOG2 == LBLOCK_SIZE_LOG2 works # define MEDIUM_BLOCK_SIZE_LOG2 8 # define MEDIUM_BLOCK_SIZE (1 << MEDIUM_BLOCK_SIZE_LOG2) // 128 # define MBLOCK_WARPS (MEDIUM_BLOCK_SIZE >> 5) #define MED_THREAD_DEGEN 16 #endif #define RBLOCK_SIZE 64 #define RMAXSTEPS 80 #define NHSTEPSPERKEY 32 #define MAX_NHSTEPS 1024 #define MAX_MULTISTEPS 1024 #define MAX_NLHSTEPS 2048 #define GATHER_BLOCK_SIZE_LOG2 6 #define GATHER_BLOCK_SIZE (1 << GATHER_BLOCK_SIZE_LOG2) #define STRATEGY_CHECK_INTERVAL_LOG2 7 #define STRATEGY_CHECK_INTERVAL (1 << STRATEGY_CHECK_INTERVAL_LOG2) #define HISTOGRAM_DEGEN_LIMIT 20 #define HASH_COLLISION_STEPS 2 const int numActiveUpperLimit = 24; #define USE_JENKINS_HASH 0 #define LARGE_NBIN_CHECK_INTERVAL_LOG2 5 #define LARGE_NBIN_CHECK_INTERVAL (1 << LARGE_NBIN_CHECK_INTERVAL_LOG2) #define SMALL_BLOCK_SIZE_LOG2 6 #define SMALL_BLOCK_SIZE (1 << SMALL_BLOCK_SIZE_LOG2) #define MAX_SMALL_STEPS 2040 #if __CUDA_ARCH__ >= 120 #define USE_ATOMICS_HASH 0 #else #define USE_ATOMICS_HASH 0 #endif #if (__CUDA_ARCH__ >= 200) # define USE_BALLOT_HISTOGRAM 1 #else # define USE_BALLOT_HISTOGRAM 0 #endif #ifndef __device__ #define __device__ #endif #ifndef __host__ #define __host__ #endif #ifndef __shared__ #define __shared__ #endif static unsigned int* d_Data = NULL; static unsigned int* d_Histogram = NULL; enum histogram_type { histogram_generic, histogram_atomic_inc, histogram_atomic_add, }; template <histogram_type histotype, typename OUTPUTTYPE> static int getHistogramBufSize(OUTPUTTYPE zero, int nOut); template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static cudaError_t callHistogramKernel( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev = false, cudaStream_t stream = 0, void* tmpBuffer = NULL, bool allowMultiPass = true); template <histogram_type histotype, int nMultires, int nDim, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static cudaError_t callHistogramKernelNDim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT* starts, INDEXT* ends, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev = false, cudaStream_t stream = 0, void* tmpBuffer = NULL, bool allowMultiPass = true); template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> cudaError_t callHistogramKernel2Dim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT x0, INDEXT x1, INDEXT y0, INDEXT y1, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, cudaStream_t stream, void* tmpBuffer, bool allowMultiPass = true); struct test_xform { __host__ __device__ void operator() (unsigned int* input, int i, int* res_idx, unsigned int* res, int nres) const { *res_idx++ = input[i]; *res++ = 1; } }; // Sum-functor to be used for reduction - just a normal sum of two integers struct test_sumfun { __device__ __host__ unsigned int operator() (unsigned int res1, unsigned int res2) const{ return res1 + res2; } }; __global__ void computeHistogram(unsigned int *buffer, int size, unsigned int *histo ) { __shared__ unsigned int temp[1024]; temp[threadIdx.x + 0] = 0; temp[threadIdx.x + 256] = 0; temp[threadIdx.x + 512] = 0; temp[threadIdx.x + 768] = 0; __syncthreads(); int i = threadIdx.x + blockIdx.x * blockDim.x; int offset = blockDim.x * gridDim.x; while (i < size) { atomicAdd( &temp[buffer[i]], 1); i += offset; } __syncthreads(); atomicAdd( &(histo[threadIdx.x + 0]), temp[threadIdx.x + 0] ); atomicAdd( &(histo[threadIdx.x + 256]), temp[threadIdx.x + 256] ); atomicAdd( &(histo[threadIdx.x + 512]), temp[threadIdx.x + 512] ); atomicAdd( &(histo[threadIdx.x + 768]), temp[threadIdx.x + 768] ); } extern "C" void opt_init(unsigned int** h_Data, int width, int height) { cudaMalloc((void **)&d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int)); cudaMemset( d_Histogram, 0,HISTO_HEIGHT * HISTO_WIDTH * sizeof( unsigned int )); unsigned int *data = new unsigned int[width*height]; for(int j = 0;j<height;++j) { memcpy(data+j*width, h_Data[j], sizeof(unsigned int)*width); } cudaMalloc((void **)&d_Data, width*height*sizeof(unsigned int)); cudaMemcpy(d_Data, data, width*height*sizeof(unsigned int), cudaMemcpyHostToDevice); delete []data; } extern "C" void opt_2dhisto(int size) { test_xform xform; test_sumfun sum; callHistogramKernel<histogram_atomic_inc, 1>(d_Data, xform, sum, 0, size, 0U, d_Histogram, HISTO_HEIGHT * HISTO_WIDTH, true); } extern "C" void opt_free() { cudaFree(d_Histogram); cudaFree(d_Data); } extern "C" void opt_copyFromDevice(unsigned char* output) { unsigned int* h_Histogram = new unsigned int[HISTO_HEIGHT * HISTO_WIDTH]; cudaMemcpy(h_Histogram, d_Histogram, HISTO_HEIGHT * HISTO_WIDTH * sizeof(unsigned int), cudaMemcpyDeviceToHost); for(int i = 0;i<HISTO_HEIGHT * HISTO_WIDTH;++i) { output[i] = h_Histogram[i]>255?255:h_Histogram[i]; } delete[] h_Histogram; } //#include <stdio.h> template <typename OUTPUTTYPE, typename SUMFUNTYPE> __global__ void multireduceKernel(OUTPUTTYPE* input, int n, int nOut, int nsteps, SUMFUNTYPE sumFun, OUTPUTTYPE zero, int stride, OUTPUTTYPE* initialValues) { int tid = threadIdx.x; int bidx = blockIdx.x; int bidy = blockIdx.y; OUTPUTTYPE myout = zero; int i; for (i = 0; i < nsteps; i++) { int subIndex = bidx * RBLOCK_SIZE + tid; int cidx = subIndex + i * RBLOCK_SIZE * gridDim.x; if (cidx < n) { // printf("t(%2d)b(%3d,%2d) r(%d)\n", tid, bidx, bidy, cidx + bidy * stride); myout = sumFun(myout, input[cidx + bidy * stride]); } } __shared__ OUTPUTTYPE tmp[RBLOCK_SIZE / 2]; for (int curLimit = RBLOCK_SIZE / 2; curLimit > 0; curLimit >>= 1) { // First write out the current result for threads above the limit if (tid >= curLimit && tid < (curLimit << 1)) tmp[tid - curLimit] = myout; // Otherwise wait for the write the complete and add that value to our result __syncthreads(); if (tid < curLimit) myout = sumFun(myout, tmp[tid]); // IMPORTANT: Wait before new loop for the read to complete __syncthreads(); } // Done! myout contains the result for our block for thread 0!! if (tid == 0) { // NOTE: If gridDim == 1 then we have finally reached the last iteration and // can write the result into the final result-value array // (ie. The same as initialvalue-array) if (gridDim.x == 1) { OUTPUTTYPE initVal = initialValues[bidy]; initialValues[bidy] = sumFun(initVal, myout); // And we are DONE! } else { // printf("t(%2d)b(%3d,%2d) w(%d)\n", tid, bidx, bidy, bidx + bidy * stride); initialValues[bidx + bidy * stride] = myout; } } } template <typename OUTPUTTYPE, typename SUMFUNTYPE> static void callMultiReduce( int arrLen, int nOut, OUTPUTTYPE* h_results, OUTPUTTYPE* input, SUMFUNTYPE sumFunObj, OUTPUTTYPE zero, cudaStream_t stream, void* tmpbuf, bool outInDev) { int n = arrLen; // Set-up yet another temp buffer: (TODO: Pool alloc somehow?) OUTPUTTYPE* resultTemp = NULL; // TODO: Why do we need such a large temporary array? // Shouldn't sizeof(OUTPUTTYPE) * nOut * xblocks be enough?? if (tmpbuf) { resultTemp = (OUTPUTTYPE*)tmpbuf; } else { cudaMalloc((void**)&resultTemp, sizeof(OUTPUTTYPE) * nOut * arrLen); #if H_ERROR_CHECKS //printf("resultTemp = %p\n", resultTemp); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror0 = %s\n", cudaGetErrorString( error )); #endif } OUTPUTTYPE* output = resultTemp; enum cudaMemcpyKind fromOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice; enum cudaMemcpyKind toOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost; // Copy initial values: do { int steps = (n + (RBLOCK_SIZE - 1)) / RBLOCK_SIZE; if (steps > RMAXSTEPS) steps = RMAXSTEPS; int yblocks = nOut; int xblocks = (n + (steps * RBLOCK_SIZE - 1)) / (steps * RBLOCK_SIZE); const dim3 block = RBLOCK_SIZE; const dim3 grid(xblocks, yblocks, 1); if (xblocks == 1) // LAST ONE to start { //printf("cudaMemcpy(%p, %p, %d, %d);\n", output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut); if (stream != 0) cudaMemcpyAsync(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut, stream); else cudaMemcpy(output, h_results, sizeof(OUTPUTTYPE) * nOut, fromOut); } #if H_ERROR_CHECKS cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror1 = %s\n", cudaGetErrorString( error )); #endif // Then the actual kernel call multireduceKernel<<<grid, block, 0, stream>>>(input, n, nOut, steps, sumFunObj, zero, arrLen, output); #if H_ERROR_CHECKS error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror2 = %s\n", cudaGetErrorString( error )); #endif if (xblocks > 1) { // Swap pointers: OUTPUTTYPE* tmpptr = output; output = input; input = tmpptr; } n = xblocks; } while(n > 1); // Then copy back the results: //cudaMemcpyAsync(h_results, resultTemp, sizeof(OUTPUTTYPE) * nOut, cudaMemcpyDeviceToHost, CURRENT_STREAM()); // TODO: Support async copy here?? if (outInDev && stream != 0) cudaMemcpyAsync(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut, stream); else cudaMemcpy(h_results, output, sizeof(OUTPUTTYPE) * nOut, toOut); #if H_ERROR_CHECKS cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror3 = %s\n", cudaGetErrorString( error )); #endif if (!tmpbuf) { cudaFree(resultTemp); } #if H_ERROR_CHECKS error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror4 = %s\n", cudaGetErrorString( error )); #endif } template <typename SUMFUNTYPE, typename OUTPUTTYPE> __global__ void gatherKernel(SUMFUNTYPE sumfunObj, OUTPUTTYPE* blockOut, int nOut, int nEntries, OUTPUTTYPE zero) { //int resIdx = threadIdx.x + blockDim.x * blockIdx.x; int resIdx = blockIdx.x; if (resIdx < nOut) { // Let's divide the nEntries first evenly on all threads and read 4 entries in a row int locEntries = (nEntries) >> (GATHER_BLOCK_SIZE_LOG2); // Note: Original array entry is stored in resIdx + nOut * nEntries! OUTPUTTYPE res = zero; if (threadIdx.x == 0) res = blockOut[resIdx + nOut * nEntries]; // Shift starting ptr: blockOut = &blockOut[resIdx]; int locIdx = threadIdx.x * locEntries; for (int i=0; i < locEntries/4; i++) { OUTPUTTYPE x1 = blockOut[nOut * (locIdx + (i << 2))]; OUTPUTTYPE x2 = blockOut[nOut * (locIdx + (i << 2) + 1)]; OUTPUTTYPE x3 = blockOut[nOut * (locIdx + (i << 2) + 2)]; OUTPUTTYPE x4 = blockOut[nOut * (locIdx + (i << 2) + 3)]; res = sumfunObj(res, x1); res = sumfunObj(res, x2); res = sumfunObj(res, x3); res = sumfunObj(res, x4); } // Then do the rest for (int j = (locEntries/4)*4; j < locEntries; j++) { OUTPUTTYPE x1 = blockOut[nOut * (locIdx + j)]; res = sumfunObj(res, x1); } // Still handle rest starting from index "locEntries * BLOCK_SIZE": locIdx = threadIdx.x + (locEntries << GATHER_BLOCK_SIZE_LOG2); if (locIdx < nEntries) res = sumfunObj(res, blockOut[nOut * locIdx]); // Ok - all that is left is to do the final parallel reduction between threads: { __shared__ OUTPUTTYPE data[GATHER_BLOCK_SIZE]; //volatile OUTPUTTYPE* data = (volatile OUTPUTTYPE*)&dataTmp[0]; // TODO Compiler complains with volatile from this - why? //error: no operator "=" matches these operands // operand types are: volatile myTestType_s = myTestType // Silly - does not happen with built-in types (nice...) data[threadIdx.x] = res; #if GATHER_BLOCK_SIZE == 512 __syncthreads(); if (threadIdx.x < 256) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 256]); #endif #if GATHER_BLOCK_SIZE >= 256 __syncthreads(); if (threadIdx.x < 128) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 128]); #endif #if GATHER_BLOCK_SIZE >= 128 __syncthreads(); if (threadIdx.x < 64) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 64]); __syncthreads(); #endif #if GATHER_BLOCK_SIZE >= 64 __syncthreads(); if (threadIdx.x < 32) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 32]); #endif __syncthreads(); if (threadIdx.x < 16) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 16]); __syncthreads(); if (threadIdx.x < 8) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 8]); __syncthreads(); if (threadIdx.x < 4) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 4]); __syncthreads(); if (threadIdx.x < 2) data[threadIdx.x] = sumfunObj(data[threadIdx.x], data[threadIdx.x + 2]); __syncthreads(); if (threadIdx.x < 1) *blockOut = sumfunObj(data[threadIdx.x], data[threadIdx.x + 1]); } } } #define FREE_MUTEX_ID 0xffeecafe #define TAKE_WARP_MUTEX(ID) do { \ int warpIdWAM = threadIdx.x >> 5; \ __shared__ volatile int lockVarWarpAtomicMutex;\ bool doneWAM = false;\ bool allDone = false; \ while(!allDone){ \ __syncthreads(); \ if (!doneWAM) lockVarWarpAtomicMutex = warpIdWAM; \ __syncthreads(); \ if (lockVarWarpAtomicMutex == FREE_MUTEX_ID) allDone = true; \ __syncthreads(); \ if (lockVarWarpAtomicMutex == warpIdWAM){ /* We Won */ // User code comes here #define GIVE_WARP_MUTEX(ID) doneWAM = true; \ lockVarWarpAtomicMutex = FREE_MUTEX_ID; \ } \ } \ __syncthreads(); \ } while(0) // NOTE: Init must be called from divergent-free code (or with exited warps) #define INIT_WARP_MUTEX2(MUTEX) do { MUTEX = FREE_MUTEX_ID; __syncthreads(); } while(0) #if 0 && __CUDA_ARCH__ >= 120 // TODO: NOT WORKING THIS CODEPATH - find out why #define TAKE_WARP_MUTEX2(MUTEX) do { \ int warpIdWAM = 1000000 + threadIdx.x / 32; \ bool doneWAM = false;\ while(!doneWAM){ \ int old = -2; \ if (threadIdx.x % 32 == 0) \ old = atomicCAS(&MUTEX, FREE_MUTEX_ID, warpIdWAM); \ if (__any(old == FREE_MUTEX_ID)){ /* We Won */ // User code comes here #define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \ atomicExch(&MUTEX, FREE_MUTEX_ID); \ } \ } \ } while(0) #else #define TAKE_WARP_MUTEX2(MUTEX) do { \ int warpIdWAM = 1000000 + threadIdx.x / 32; \ bool doneWAM = false;\ bool allDone = false; \ while(!allDone){ \ __syncthreads(); \ if (!doneWAM) MUTEX = warpIdWAM; \ __syncthreads(); \ if (MUTEX == FREE_MUTEX_ID) allDone = true; \ if (MUTEX == warpIdWAM){ /* We Won */ // User code comes here #define GIVE_WARP_MUTEX2(MUTEX) doneWAM = true; \ MUTEX = FREE_MUTEX_ID; \ } \ } \ } while(0) #endif #if USE_BALLOT_HISTOGRAM template <typename OUTPUTTYPE> static inline __device__ OUTPUTTYPE mySillyPopCount(unsigned int mymask, OUTPUTTYPE zero) { return zero; } static inline __device__ int mySillyPopCount(unsigned int mymask, int zero) { return (int)__popc(mymask); } static inline __device__ unsigned int mySillyPopCount(unsigned int mymask, unsigned int zero) { return (unsigned int)__popc(mymask); } static inline __device__ long long mySillyPopCount(unsigned int mymask, long long zero) { return (long long)__popc(mymask); } static inline __device__ unsigned long long mySillyPopCount(unsigned int mymask, unsigned long long zero) { return (unsigned long long)__popc(mymask); } template <histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ bool ballot_makeUnique( SUMFUNTYPE sumfunObj, int myKey, OUTPUTTYPE* myOut, OUTPUTTYPE* s_vals, int* s_keys, int* nSameKeys) { unsigned int mymask; /* #if HBLOCK_SIZE != 32 #error Please use threadblocks of 32 threads #endif*/ //startKey = s_keys[startIndex]; // First dig out for each thread who are the other threads that have the same key as us... //int i = 0; if (checkNSame) { unsigned int donemask = 0; int startIndex = 32 - 1; int startKey = s_keys[startIndex]; *nSameKeys = 0; while (~donemask != 0 /*&& i++ < 32*/) { unsigned int mask = __ballot(myKey == startKey); if (myKey == startKey) mymask = mask; donemask |= mask; { int nSame = __popc(mask); if (nSame > *nSameKeys) *nSameKeys = nSame; } startIndex = 31 - __clz(~donemask); //if (myKey == 0) printf("Startindex = %d, donemask = 0x%08x, mask = 0x%08x\n", startIndex, donemask, mask); if (startIndex >= 0) startKey = s_keys[startIndex]; } } else { unsigned int donemask = 0; int startIndex = 32 - 1; while (startIndex >= 0) { int startKey = s_keys[startIndex]; unsigned int mask = __ballot(myKey == startKey); if (myKey == startKey) mymask = mask; donemask |= mask; startIndex = 31 - __clz(~donemask); } } // Ok now mymask contains those threads - now we just reduce locally - all threads run at the same // time, but reducing threads lose always half of them with each iteration - it would help // to work with more than 32 entries, but the algorithm seems to get tricky there. { // Compute the left side of the mask and the right side. rmask first will contain our thread index, but // we zero it out immediately unsigned int lmask = (mymask >> (threadIdx.x & 31)) << (threadIdx.x & 31); int IamNth = __popc(lmask) - 1; bool Iwrite = IamNth == 0; if (histotype == histogram_atomic_inc) { // Fast-path for atomic inc *myOut = mySillyPopCount(mymask, *myOut); return Iwrite && (myKey >= 0); } else { unsigned int rmask = mymask & (~lmask); // Now compute which number is our thread in the subarray of those threads that have the same key // starting from the left (ie. index == 31). So for thread 31 this will be always zero. int nextIdx = 31 - __clz(rmask); s_vals[(threadIdx.x & 31)] = *myOut; //if (myKey == 0) printf("tid = %02d, IamNth = %02d, mask = 0x%08x, rmask = 0x%08x \n", threadIdx.x, IamNth, mymask, rmask); //bool done = __all(nextIdx < 0); // TODO: Unroll 5? while (!__all(nextIdx < 0)) { // Reduce towards those threads that have lower IamNth // Our thread reads the next one if our internal ID is even if ((IamNth & 0x1) == 0) { if (nextIdx >= 0){ // if (myKey == 0) printf("tid:%02d, add with %02d\n", threadIdx.x, nextIdx); *myOut = sumfunObj(*myOut, s_vals[nextIdx]); } // And writes to the shared memory if our internal ID is third on every 4-long subarray: if ((IamNth & 0x3) == 2) { // if (myKey == 0) printf("Tid %02d, store\n", threadIdx.x); s_vals[(threadIdx.x & 31)] = *myOut; } } // Now the beautiful part: Kill every other bit in the rmask bitfield. How, you ask? // Using ballot: Every bit we want to kill has IamNth odd, or conversely, we only // want to keep those bits that have IamNth even... rmask &= __ballot((IamNth & 0x1) == 0); nextIdx = 31 - __clz(rmask); // if (myKey == 0) printf("tid = %02d, next = %02d, key = %d\n", threadIdx.x, rmask, nextIdx, myKey); IamNth >>= 1; //printf("i = %d\n", i); } // And voila, we are done - write out the result: return Iwrite && (myKey >= 0); } } } #endif template <bool laststeps, typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void myAtomicWarpAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, bool Iwrite, int* warpmutex) { // Taken from http://forums.nvidia.com/index.php?showtopic=72925 // This is a tad slow, but allows arbitrary operation // For writes of 16 bytes or less AtomicCAS could be faster // (See CUDA programming guide) TAKE_WARP_MUTEX(0); //__shared__ int warpmutex; //INIT_WARP_MUTEX2(*warpmutex); //TAKE_WARP_MUTEX2(*warpmutex); bool write = Iwrite; #define MU_TEMP_MAGIC 0xffffaaaa *keyAddr = MU_TEMP_MAGIC; while (1) { // Vote whose turn is it - remember, one thread does succeed always!: if (write) *keyAddr = threadIdx.x; if (*keyAddr == MU_TEMP_MAGIC) break; if (*keyAddr == threadIdx.x) // We won! { // Do arbitrary atomic op: *addr = sumfunObj(*addr, val); write = false; *keyAddr = MU_TEMP_MAGIC; } } GIVE_WARP_MUTEX(0); //GIVE_WARP_MUTEX2(*warpmutex); #undef MU_TEMP_MAGIC } template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void myAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj) { // Taken from http://forums.nvidia.com/index.php?showtopic=72925 // This is a tad slow, but allows arbitrary operation // For writes of 16 bytes or less AtomicCAS could be faster // (See CUDA programming guide) bool write = true; #define MU_TEMP_MAGIC 0xffffaaaa *keyAddr = MU_TEMP_MAGIC; while (1) { // Vote whose turn is it - remember, one thread does succeed always!: if (write ) *keyAddr = threadIdx.x; if (*keyAddr == MU_TEMP_MAGIC) break; if (*keyAddr == threadIdx.x) // We won! { // Do arbitrary atomic op: *addr = sumfunObj(*addr, val); write = false; *keyAddr = MU_TEMP_MAGIC; } } #undef MU_TEMP_MAGIC } /*static __inline__ __device__ unsigned long long int atomicAdd(unsigned long long int *address, unsigned long long int val) { return __ullAtomicAdd(address, val); }*/ template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val) { //*addr = val; } template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, int val) { //*addr = val; } #if 0 template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, float val) { //*addr = val; } #endif template <typename OUTPUTTYPE> static inline __device__ void atomicAdd(OUTPUTTYPE* addr, unsigned int val) { //*addr = val; } template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void myAtomicAddStats(OUTPUTTYPE* addr, OUTPUTTYPE val, volatile int* keyAddr, SUMFUNTYPE sumfunObj, int* nSameOut, bool Iwrite) { // Taken from http://forums.nvidia.com/index.php?showtopic=72925 bool write = true; *keyAddr = 0xffffffff; while (Iwrite) { // Vote whose turn is it - remember, one thread does succeed always!: if (write ) *keyAddr = threadIdx.x; if (*keyAddr == 0xffffffff) break; if (*keyAddr == threadIdx.x) // We won! { // Do arbitrary atomic op: *addr = sumfunObj(*addr, val); write = false; *keyAddr = 0xffffffff; } else { *nSameOut = *nSameOut + 1; } } { // Then find max __shared__ int nSame[HBLOCK_SIZE]; nSame[threadIdx.x] = *nSameOut; #define TMPMAX(A,B) (A) > (B) ? (A) : (B) #define tidx threadIdx.x if (tidx < 16) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 16]); if (tidx < 8) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 8]); if (tidx < 4) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 4]); if (tidx < 2) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 2]); if (tidx < 1) nSame[tidx] = TMPMAX(nSame[tidx] , nSame[tidx + 1]); #undef TMPMAX #undef tidx // Broadcast to all threads *nSameOut = nSame[0]; } } // TODO: Make unique within one warp? template<histogram_type histotype, bool checkNSame, typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ bool reduceToUnique(OUTPUTTYPE* res, int myKey, int* nSame, SUMFUNTYPE sumfunObj, int* keys, OUTPUTTYPE* outputs) { keys[(threadIdx.x & 31)] = myKey; #if USE_BALLOT_HISTOGRAM return ballot_makeUnique<histotype, checkNSame>(sumfunObj, myKey, res, outputs, keys, nSame); #else { int i; bool writeResult = myKey >= 0; int myIdx = (threadIdx.x & 31) + 1; outputs[(threadIdx.x & 31)] = *res; // The assumption for sanity of this loop here is that all the data is in registers or shared memory and // hence this loop will not actually be __that__ slow.. Also it helps if the data is spread out (ie. there are // a lot of different indices here) for (i = 1; i < 32 && writeResult; i++) { if (myIdx >= 32) myIdx = 0; // Is my index the same as the index on the index-list? if (keys[myIdx] == myKey /*&& threadIdx.x != myIdx*/) { if (checkNSame) (*nSame)++; // If yes, then we can sum up the result using users sum-functor *res = sumfunObj(*res, outputs[myIdx]); // But if somebody else is summing up this index already, we don't need to (wasted effort done here) if (myIdx < threadIdx.x) writeResult = false; } myIdx++; } // Ok - we are done - now we can proceed in writing the result (if some other thread isn't doing it already) if (checkNSame) { // Manual reduce int tid = threadIdx.x; keys[tid] = *nSame; if (tid < 16) keys[tid] = keys[tid] > keys[tid + 16] ? keys[tid] : keys[tid+16]; if (tid < 8) keys[tid] = keys[tid] > keys[tid + 8] ? keys[tid] : keys[tid+8]; if (tid < 4) keys[tid] = keys[tid] > keys[tid + 4] ? keys[tid] : keys[tid+4]; if (tid < 2) keys[tid] = keys[tid] > keys[tid + 2] ? keys[tid] : keys[tid+2]; if (tid < 1) keys[tid] = keys[tid] > keys[tid + 1] ? keys[tid] : keys[tid+1]; *nSame = keys[0]; } return writeResult; } #endif } static inline __host__ __device__ void checkStrategyFun(bool *reduce, int nSame, int nSameTot, int step, int nBinSetslog2) { #if __CUDA_ARCH__ >= 200 #define STR_LIMIT 12 #else #define STR_LIMIT 24 #endif // TODO: Fix average case - a lot of things to tune here... if ((nSameTot > STR_LIMIT * step || nSame > STR_LIMIT)) *reduce = true; else *reduce = false; #undef STR_LIMIT } // Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up) template <typename SUMFUNTYPE> static inline __device__ void wrapAtomicAdd2(float* addr, float val, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 200 atomicAdd(addr, val); #else myAtomicAdd(addr, val, key, sumFunObj); #endif } template <typename SUMFUNTYPE,typename OUTPUTTYPE> static inline __device__ void wrapAtomicAdd2(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj) { atomicAdd(addr, val); } // Special case for floats (atomicAdd works only from __CUDA_ARCH__ 200 and up) template <bool laststeps, typename SUMFUNTYPE> static inline __device__ void wrapAtomicAdd2Warp(float* addr, float val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 200 if (Iwrite) atomicAdd(addr, val); #else myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex); #endif } template <bool laststeps, typename SUMFUNTYPE,typename OUTPUTTYPE> static inline __device__ void wrapAtomicAdd2Warp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { if (Iwrite) atomicAdd(addr, val); } template <typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicAdd(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2(addr, val, key, sumFunObj); #else myAtomicAdd(addr, val, key, sumFunObj); #endif } template <typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicInc(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2((int*)addr, 1, key, sumFunObj); #else //myAtomicAdd((int*)addr, 1, key, sumFunObj); #endif } template <typename SUMFUNTYPE> static inline __device__ void wrapAtomicInc(int* addr, int* key, SUMFUNTYPE sumFunObj) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2(addr, 1, key, sumFunObj); #else myAtomicAdd(addr, 1, key, sumFunObj); #endif } template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicAddWarp(OUTPUTTYPE* addr, OUTPUTTYPE val, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2Warp<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex); #else myAtomicWarpAdd<laststeps>(addr, val, key, sumFunObj, Iwrite, warpmutex); #endif } template <bool laststeps, typename OUTPUTTYPE, typename SUMFUNTYPE> static inline __device__ void wrapAtomicIncWarp(OUTPUTTYPE* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2Warp<laststeps>((int*)addr, 1, key, sumFunObj, Iwrite, warpmutex); #else //myAtomicAdd((int*)addr, 1, key, sumFunObj); #endif } template <bool laststeps, typename SUMFUNTYPE> static inline __device__ void wrapAtomicIncWarp(int* addr, int* key, SUMFUNTYPE sumFunObj, bool Iwrite, int* warpmutex) { //*addr = val; #if __CUDA_ARCH__ >= 120 wrapAtomicAdd2Warp<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex); #else myAtomicWarpAdd<laststeps>(addr, 1, key, sumFunObj, Iwrite, warpmutex); #endif } // TODO: Consider the following: // First private hash for each warp - later, share hash-tables between warps // Try also: private hashes for some threads of one warp etc template <typename OUTPUTTYPE> struct myHash { int* keys; #if !USE_ATOMICS_HASH int* locks; #endif OUTPUTTYPE* vals; OUTPUTTYPE* myBlockOut; }; template <typename OUTPUTTYPE> static inline __device__ void InitHash(struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE zero, int hashSizelog2) { int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2; int* myEntry = &hash->keys[threadIdx.x]; for (int i = 0; i < nloops; i++) { *myEntry = -1; myEntry += LBLOCK_SIZE; } if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2)) { *myEntry = -1; } // Done } #if 0 // OLD code template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void FlushHash(struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2) { int nloops = (1 << hashSizelog2) >> LBLOCK_SIZE_LOG2; OUTPUTTYPE* myVal = &hash->vals[threadIdx.x]; int* key = &hash->keys[threadIdx.x]; for (int i = 0; i < nloops; i++) { int keyIndex = *key; if (keyIndex >= 0) { hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]); *key = -1; } key += LBLOCK_SIZE; myVal += LBLOCK_SIZE; } if ((nloops << LBLOCK_SIZE_LOG2) + threadIdx.x < (1 << hashSizelog2)) { int keyIndex = *key; if (keyIndex >= 0){ hash->myBlockOut[keyIndex] = sumfunObj(*myVal, hash->myBlockOut[keyIndex]); *key = -1; } } } #endif // 0 // See: http://www.burtleburtle.net/bob/hash/doobs.html // Mix by Bob Jenkins #define HISTO_JENKINS_MIX(A, B, C) \ do { \ A -= B; A -= C; A ^= (C>>13); \ B -= C; B -= A; B ^= (A<<8); \ C -= A; C -= B; C ^= (B>>13); \ A -= B; A -= C; A ^= (C>>12); \ B -= C; B -= A; B ^= (A<<16); \ C -= A; C -= B; C ^= (B>>5); \ A -= B; A -= C; A ^= (C>>3); \ B -= C; B -= A; B ^= (A<<10); \ C -= A; C -= B; C ^= (B>>15); \ } while (0) static inline __device__ unsigned int histogramHashFunction(int key) { #if USE_JENKINS_HASH unsigned int a = (unsigned int)key; unsigned int c,b; // TODO: What are good constants? b = 0x9e3779b9; c = 0xf1232345; HISTO_JENKINS_MIX(a, b, c); return c; #else // Golden ratio hash return (0x9e3779b9u * (unsigned int)key); #endif } #if USE_ATOMICS_HASH template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique) { if (unique) { if (Iwrite) { hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]); } return; } unsigned int hashkey = histogramHashFunction(myKey); volatile __shared__ bool hashFull; int index = (int)(hashkey >> (32 - hashSizelog2)); bool Iamdone = !Iwrite; bool IFlush = Iwrite; hashFull = true; while (hashFull) { // Mark here hash full, and if any thread has problems finding // free entry in hash, then that thread sets hashFull to nonzero if (threadIdx.x == 0) hashFull = false; // Do atomic-part int old = -2; int expect = -1; while (!Iamdone && !hashFull) { old = atomicCAS(&hash->keys[index], expect, -3); if (old == expect) // We won! { int key = old; if (key == -1 || key == myKey) { if (key == -1) { hash->vals[index] = res; } else { hash->vals[index] = sumfunObj(res, hash->vals[index]); IFlush = false; } hash->keys[index] = myKey; Iamdone = true; } else { hashFull = true; hash->keys[index] = key; expect = -1; } } else { if (old != myKey) { hashFull = true; expect = -1; } else { expect = old; } } } if (IFlush && Iamdone) { OUTPUTTYPE* myVal = &hash->vals[index]; int* key = &hash->keys[index]; // TODO: Workaround - get rid of if. Where do the extra flushes come from? if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]); //hash->myBlockOut[myKey] = sumfunObj(*myVal, hash->myBlockOut[myKey]); *key = -1; } } } #else template <typename SUMFUNTYPE, typename OUTPUTTYPE> static inline __device__ void AddToHash(OUTPUTTYPE res, int myKey, struct myHash<OUTPUTTYPE> *hash, SUMFUNTYPE sumfunObj, int hashSizelog2, bool Iwrite, bool unique) { if (unique) { if (Iwrite) { hash->myBlockOut[myKey] = sumfunObj(res, hash->myBlockOut[myKey]); } return; } unsigned int hashkey = histogramHashFunction(myKey); volatile __shared__ int hashFull; int index = (int)(hashkey >> (32 - hashSizelog2)); bool Iamdone = false; bool IFlush = Iwrite; // TODO: syncthreads()... hashFull = -10; while (hashFull != 0) { volatile int* lock = &hash->locks[index]; bool write = Iwrite; #define TMP_LOCK_MAGIC 0xfffffffe *lock = TMP_LOCK_MAGIC; // Mark here hash full, and if any thread has problems finding // free entry in hash, then that thread sets hashFull to nonzero if (threadIdx.x == 0) hashFull = 0; // Do atomic-part while (1) { if (!Iamdone && write) *lock = threadIdx.x; if (*lock == TMP_LOCK_MAGIC) break; if (*lock == threadIdx.x) // We won! { int key = hash->keys[index]; if (key == -1) { hash->keys[index] = myKey; hash->vals[index] = res; Iamdone = true; } else if (key == myKey) { hash->vals[index] = sumfunObj(res, hash->vals[index]); Iamdone = true; IFlush = false; } else { hashFull = 1; } // Do arbitrary atomic op: write = false; *lock = TMP_LOCK_MAGIC; } } if (IFlush) { OUTPUTTYPE* myVal = &hash->vals[index]; int* key = &hash->keys[index]; // TODO: Workaround - get rid of if. Where do the extra flushes come from? if (*key >= 0) hash->myBlockOut[*key] = sumfunObj(*myVal, hash->myBlockOut[*key]); *key = -1; } } #undef TMP_LOCK_MAGIC } #endif template <histogram_type histotype, int nMultires, bool reduce, bool checkStrategy, bool laststep, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histo_largenbin_step(INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, OUTPUTTYPE zero, INDEXT* myStart, INDEXT end, struct myHash<OUTPUTTYPE> *hash, OUTPUTTYPE* blockOut, int nOut, int stepNum, int stepsleft, int* nSameTot, bool* reduceOut, int hashSizelog2, OUTPUTTYPE* rOuts, int* rKeys) { if (!laststep) { if (checkStrategy) { int myKeys[nMultires]; int nSame = 0; OUTPUTTYPE res[nMultires]; xformObj(input, *myStart, &myKeys[0], &res[0], nMultires); // TODO: Unroll? addtoHash is a big function.. Hmm but, unrolling would enable registers probably bool Iwrite; #define ADD_ONE_RESULT(RESIDX, NSAME, CHECK) \ do { if (RESIDX < nMultires) { \ Iwrite = reduceToUnique<histotype, CHECK> \ (&res[RESIDX % nMultires], myKeys[RESIDX % nMultires], NSAME, sumfunObj, rKeys, rOuts); \ if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; \ AddToHash(res[RESIDX % nMultires], myKeys[RESIDX % nMultires], hash, sumfunObj, hashSizelog2, Iwrite, true); \ } } while (0) ADD_ONE_RESULT(0, &nSame, true); ADD_ONE_RESULT(1, NULL, false); ADD_ONE_RESULT(2, NULL, false); ADD_ONE_RESULT(3, NULL, false); #undef ADD_ONE_RESULT //#pragma unroll for (int resid = 4; resid < nMultires; resid++) { bool Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts); if ((threadIdx.x) < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, true); } *nSameTot += nSame; checkStrategyFun(reduceOut, nSame, *nSameTot, stepNum, 0); *myStart += LBLOCK_SIZE; } else { INDEXT startLim = *myStart + ((LBLOCK_SIZE << LARGE_NBIN_CHECK_INTERVAL_LOG2) - LBLOCK_SIZE); for (; *myStart < startLim; *myStart += LBLOCK_SIZE) { int myKeys[nMultires]; OUTPUTTYPE res[nMultires]; xformObj(input, *myStart, &myKeys[0], &res[0], nMultires); //#pragma unroll bool Iwrite = true; #define ADD_ONE_RESULT(RES) \ do { if (RES < nMultires) { \ if (reduce){ Iwrite = reduceToUnique<histotype, false>(&res[RES % nMultires], \ myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \ if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1;} \ AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, \ sumfunObj, hashSizelog2, Iwrite, reduce); \ } } while (0) ADD_ONE_RESULT(0); ADD_ONE_RESULT(1); ADD_ONE_RESULT(2); ADD_ONE_RESULT(3); #undef ADD_ONE_RESULT for (int resid = 4; resid < nMultires; resid++) { bool Iwrite = true; if (reduce){ Iwrite = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts); if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite, reduce); } } } } else // These are the last steps then { for (int substep = 0; substep < stepsleft; substep++) { int myKeys[nMultires]; OUTPUTTYPE res[nMultires]; bool Iwrite = false; if (*myStart < end) { Iwrite = true; xformObj(input, *myStart, &myKeys[0], &res[0], nMultires); } else { #pragma unroll for (int resid = 0; resid < nMultires; resid++) { res[resid] = zero; myKeys[resid] = 0; } } //#pragma unroll { bool Iwrite2 = Iwrite; #define ADD_ONE_RESULT(RES) \ do { if (RES < nMultires) { \ if (reduce){ Iwrite2 = reduceToUnique<histotype, false> \ (&res[RES % nMultires], myKeys[RES % nMultires], NULL, sumfunObj, rKeys, rOuts); \ if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } \ AddToHash(res[RES % nMultires], myKeys[RES % nMultires], hash, sumfunObj, hashSizelog2, Iwrite2, reduce); \ } } while(0) ADD_ONE_RESULT(0); ADD_ONE_RESULT(1); ADD_ONE_RESULT(2); ADD_ONE_RESULT(3); #undef ADD_ONE_RESULT for (int resid = 4; resid < nMultires; resid++) { //bool Iwrite2 = true; if (reduce){ Iwrite2 = reduceToUnique<histotype, false>(&res[resid], myKeys[resid], NULL, sumfunObj, rKeys, rOuts); if (threadIdx.x < (1 << hashSizelog2)) hash->keys[threadIdx.x] = -1; } AddToHash(res[resid], myKeys[resid], hash, sumfunObj, hashSizelog2, Iwrite2, reduce); } } *myStart += LBLOCK_SIZE; } } } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histo_kernel_largeNBins( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int nSteps, int hashSizelog2) { extern __shared__ int keys[]; #if USE_ATOMICS_HASH OUTPUTTYPE* vals = (OUTPUTTYPE*)(&keys[1 << hashSizelog2]); if (hashSizelog2 < LBLOCK_SIZE_LOG2) vals = &keys[1 << LBLOCK_SIZE_LOG2]; #else int* locks = &keys[1 << hashSizelog2]; if (hashSizelog2 < LBLOCK_SIZE_LOG2) locks = &keys[1 << LBLOCK_SIZE_LOG2]; OUTPUTTYPE* vals = (OUTPUTTYPE*)(&locks[1 << hashSizelog2]); #endif /*int* rKeys = (int*)(&vals[1 << hashSizelog2]); OUTPUTTYPE* rOuts = (OUTPUTTYPE*)(&rKeys[LBLOCK_SIZE]);*/ int* rKeys = &keys[0]; OUTPUTTYPE* rOuts = vals; struct myHash<OUTPUTTYPE> hash; hash.keys = keys; #if !USE_ATOMICS_HASH hash.locks = locks; #endif hash.vals = vals; // Where do we put the results from our warp (block)? hash.myBlockOut = &blockOut[nOut * blockIdx.x]; INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << LBLOCK_SIZE_LOG2) + threadIdx.x); // Assert that myStart is not out of bounds! int nFullSteps = nSteps >> LARGE_NBIN_CHECK_INTERVAL_LOG2; bool reduce = false; InitHash(&hash, zero, hashSizelog2); int nSameTot = 0; for (int fstep = 0; fstep < nFullSteps; fstep++) { int stepNum = fstep << LARGE_NBIN_CHECK_INTERVAL_LOG2; histo_largenbin_step<histotype, nMultires, true, true, false,INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); if (reduce) { histo_largenbin_step<histotype, nMultires, true, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); } else { histo_largenbin_step<histotype, nMultires, false, false, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum + 1, 0, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); } } // Last steps int nstepsleft = nSteps - (nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2); if (nstepsleft > 0) { int stepNum = nFullSteps << LARGE_NBIN_CHECK_INTERVAL_LOG2; if (reduce) histo_largenbin_step<histotype, nMultires, true, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); else histo_largenbin_step<histotype, nMultires, false, false, true, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, zero, &myStart, end, &hash, blockOut, nOut, stepNum, nstepsleft, &nSameTot, &reduce, hashSizelog2, rOuts, rKeys); } // Flush values still in hash //FlushHash(&hash, sumfunObj, hashSizelog2); } #if USE_MEDIUM_PATH // template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histo_kernel_mediumNBins( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int nSteps) { #if __CUDA_ARCH__ >= 120 OUTPUTTYPE* ourOut = &blockOut[nOut * (threadIdx.x % MED_THREAD_DEGEN) * blockIdx.x]; INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << MEDIUM_BLOCK_SIZE_LOG2) + threadIdx.x); bool reduce = false; int nSameTot = 0; for (int step = 0; step < nSteps - 1; step++) { bool check = false; int myKey[nMultires]; OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKey[0], &myOut[0],nMultires); // TODO: magic constant if ((step & 63) == 0) check = true; { int nSame; __shared__ int keys[MEDIUM_BLOCK_SIZE]; __shared__ OUTPUTTYPE rOut[MEDIUM_BLOCK_SIZE]; int warpIdx = threadIdx.x >> 5; int* wkeys = &keys[warpIdx << 5]; OUTPUTTYPE* wOut = &rOut[warpIdx << 5]; bool Iwrite; #define ADD_ONE_RESULT(RESID) \ do { if (RESID < nMultires) { \ if (reduce || check){ \ if (check) Iwrite = reduceToUnique<histotype, true> \ (&myOut[RESID % nMultires], myKey[RESID % nMultires], \ &nSame, sumfunObj, wkeys, wOut); \ else Iwrite = reduceToUnique<histotype, false> \ (&myOut[RESID % nMultires], myKey[RESID % nMultires], NULL, sumfunObj, \ wkeys, wOut); \ if (Iwrite) \ atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \ if (check){ \ nSameTot += nSame; \ checkStrategyFun(&reduce, nSame, nSameTot, step, 0); \ check = false; \ } \ } else { \ if (histotype == histogram_atomic_inc) \ atomicAdd(&ourOut[myKey[RESID % nMultires]], 1); \ else if (histotype == histogram_atomic_add) \ atomicAdd(&ourOut[myKey[RESID % nMultires]], myOut[RESID % nMultires]); \ } } \ } while(0) ADD_ONE_RESULT(0); ADD_ONE_RESULT(1); ADD_ONE_RESULT(2); ADD_ONE_RESULT(3); //#pragma unroll for (int resid = 4; resid < nMultires; resid++) { ADD_ONE_RESULT(resid); } } myStart += MEDIUM_BLOCK_SIZE; } if (myStart < end) { int myKey[nMultires]; OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKey[0], &myOut[0],nMultires); for (int resid = 0; resid < nMultires; resid++) { if (histotype == histogram_atomic_inc) { atomicAdd(&ourOut[myKey[resid]], 1); } else if (histotype == histogram_atomic_add) { atomicAdd(&ourOut[myKey[resid]], myOut[resid]); } } } #endif // __CUDA_ARCH__ } #endif // USE_MEDIUM_PATH static int determineHashSizeLog2(size_t outSize, int* nblocks, cudaDeviceProp* props) { // TODO: Magic hat-constant 500 reserved for inputs, how to compute? int sharedTot = (props->sharedMemPerBlock - 500) /* / LBLOCK_WARPS*/; //int sharedTot = 32000; // How many blocks of 32 keys could we have? //int nb32Max = sharedTot / (32 * outSize); // But ideally we should run at least 4 active blocks per SM, // How can we balance this? Well - with very low ablock-values (a), // we perform bad, but after 4, adding more // will help less and less, whereas adding more to the hash always helps! #if USE_ATOMICS_HASH outSize += sizeof(int); #else outSize += sizeof(int); #endif int naMax = sharedTot / (32 * outSize); while (naMax > numActiveUpperLimit) naMax >>= 1; int nb32 = sharedTot / (32 * outSize * naMax); // Now we have "number of pieces", use it to compute some nice power-of-two hash-size int hashSize = nb32 * 32; unsigned int res = 0; if (hashSize >= 1<<16) { hashSize >>= 16; res += 16; } if (hashSize >= 1<< 8) { hashSize >>= 8; res += 8; } if (hashSize >= 1<< 4) { hashSize >>= 4; res += 4; } if (hashSize >= 1<< 2) { hashSize >>= 2; res += 2; } if (hashSize >= 1<< 1) { res += 1; } // Now res holds the log2 of hash size => n active blocksMEDIUM_BLOCK_SIZE_LOG2 = sharedTot / (outSize << res); *nblocks = (sharedTot / (outSize << res)) * props->multiProcessorCount; if (*nblocks > props->multiProcessorCount * 8) *nblocks = props->multiProcessorCount * 8; return res; } template <typename OUTPUTTYPE> __global__ void initKernel(OUTPUTTYPE* tmpOut, OUTPUTTYPE zeroVal, int tmpOutSize, int steps) { int idx = blockIdx.x * blockDim.x * steps + threadIdx.x; for (int step = 0; step < steps; step++) { if (idx < tmpOutSize) tmpOut[idx] = zeroVal; idx += blockDim.x; } } template <histogram_type histotype, typename OUTPUTTYPE> static int getLargeBinTmpbufsize(int nOut, cudaDeviceProp* props, int cuda_arch) { int nblocks; int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props); int arrLen = nblocks; #if USE_MEDIUM_PATH if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add)) arrLen *= MED_THREAD_DEGEN; #endif return (arrLen + 1) * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callHistogramKernelLargeNBins( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, cudaDeviceProp* props, int cuda_arch, cudaStream_t stream, int* getTmpBufSize, void* tmpBuffer, bool outInDev) { int nblocks; int hashSizelog2 = determineHashSizeLog2(sizeof(OUTPUTTYPE), &nblocks, props); INDEXT size = end - start; // Check if there is something to do actually... if (end <= start) { if (getTmpBufSize) getTmpBufSize = 0; return; } dim3 block = LBLOCK_SIZE; dim3 grid = nblocks; int arrLen = nblocks; #if USE_MEDIUM_PATH if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add)) arrLen *= MED_THREAD_DEGEN; #endif INDEXT nSteps = size / (INDEXT)( LBLOCK_SIZE * nblocks); OUTPUTTYPE* tmpOut; //int n = nblocks; if (getTmpBufSize) { *getTmpBufSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE); return; } if (tmpBuffer){ tmpOut = (OUTPUTTYPE*)tmpBuffer; } else { size_t allocSize = (arrLen + 1) * nOut * sizeof(OUTPUTTYPE); cudaMalloc((void**)&tmpOut, allocSize); } //printf("Using hash-based histogram: hashsize = %d, nblocksToT = %d\n", (1 << hashSizelog2), nblocks); #if USE_ATOMICS_HASH int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int)); #else int extSharedNeeded = (1 << hashSizelog2) * (sizeof(OUTPUTTYPE) + sizeof(int) * 2); #endif // The shared memory here is needed for the reduction code (ie. reduce to unique) // TODO: new hash-code could probably reuse the memory reserved for the hash-table, // it would just need to reinit the keys to -1 after use - think about it. if (cuda_arch >= 200 && histotype == histogram_atomic_inc) { if (hashSizelog2 < LBLOCK_SIZE_LOG2) extSharedNeeded += (sizeof(int) << (LBLOCK_SIZE_LOG2 - hashSizelog2)); } else { if (hashSizelog2 < LBLOCK_SIZE_LOG2) extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << (LBLOCK_SIZE_LOG2 - hashSizelog2)); } //printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps); { #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((arrLen * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps <<= 1; if (nsteps * initgrid.x * IBLOCK_SIZE < arrLen * nOut) initgrid.x++; } initKernel<<<initgrid,initblock,0,stream>>>(tmpOut, zero, arrLen * nOut, nsteps); } //int medExtShared = nOut; //const int shLimit = 0; //const int shLimit = 0;//16000 / 2; // Codepath below is a lot faster for random bins, a tad faster for real use-case // and a lot slower for degenerate key-distributions #if USE_MEDIUM_PATH if (cuda_arch >= 120 && (histotype == histogram_atomic_inc || histotype == histogram_atomic_add)) { const dim3 block = MEDIUM_BLOCK_SIZE; dim3 grid = nblocks; INDEXT nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks); INDEXT nFullSteps = 1; if (nSteps <= 0) { nFullSteps = 0; nblocks = (size >> MEDIUM_BLOCK_SIZE_LOG2); if ((nblocks << MEDIUM_BLOCK_SIZE_LOG2) < size) nblocks++; } if (nSteps > MAX_NLHSTEPS) { nFullSteps = size / ( MEDIUM_BLOCK_SIZE * nblocks * MAX_NLHSTEPS); nSteps = MAX_NLHSTEPS; } for (INDEXT step = 0; step < nFullSteps; step++) { histo_kernel_mediumNBins<histotype, nMultires><<<grid, block, 0, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps); start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps); } size = end - start; nSteps = size / (INDEXT)( MEDIUM_BLOCK_SIZE * nblocks); if (nSteps > 0) { histo_kernel_mediumNBins<histotype, nMultires><<<grid, block, 0, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps); start += (MEDIUM_BLOCK_SIZE * (INDEXT)nblocks * nSteps); size = end - start; } if (size > 0) { int ntblocks = size / ( MEDIUM_BLOCK_SIZE ); if (ntblocks * MEDIUM_BLOCK_SIZE < size) ntblocks++; grid.x = ntblocks; histo_kernel_mediumNBins<histotype, nMultires><<<grid, block, 0, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1); } } else #endif // USE_MEDIUM_PATH { INDEXT nFullSteps = 1; if (nSteps <= 0) { nFullSteps = 0; nblocks = (size >> LBLOCK_SIZE_LOG2); if ((nblocks << LBLOCK_SIZE_LOG2) < size) nblocks++; } if (nSteps > MAX_NLHSTEPS) { nFullSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks * MAX_NLHSTEPS); nSteps = MAX_NLHSTEPS; } for (int step = 0; step < nFullSteps; step++) { histo_kernel_largeNBins<histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2); start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps); } size = end - start; nSteps = size / ( LBLOCK_SIZE * (INDEXT)nblocks); if (nSteps > 0) { histo_kernel_largeNBins<histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, nSteps, hashSizelog2); start += (LBLOCK_SIZE * (INDEXT)nblocks * nSteps); size = end - start; } if (size > 0) { int ntblocks = size / ( LBLOCK_SIZE ); if (ntblocks * LBLOCK_SIZE < size) ntblocks++; grid.x = ntblocks; histo_kernel_largeNBins<histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, 1, hashSizelog2); } } #if H_ERROR_CHECKS cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror = %s\n", cudaGetErrorString( error )); #endif // OK - so now tmpOut contains our gold - we just need to dig it out now enum cudaMemcpyKind fromOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice; enum cudaMemcpyKind toOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost; if (stream != 0) cudaMemcpyAsync(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream); else cudaMemcpy(&tmpOut[arrLen * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut); grid.x = nOut; //grid.x = nOut >> LBLOCK_SIZE_LOG2; //if ((grid.x << LBLOCK_SIZE_LOG2) < nOut) grid.x++; block.x = GATHER_BLOCK_SIZE; gatherKernel<<<grid, block, 0, stream>>>(sumfunObj, tmpOut, nOut, arrLen /** LBLOCK_WARPS*/, zero); // TODO: Async copy here also??? if (outInDev && stream != 0) cudaMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream); else cudaMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut); // CPU-code path for debugging here: /* { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(nblocks * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); cudaMemcpy(h_tmp, tmpOut, nblocks*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < nblocks; i++) { res = sumfunObj(res, h_tmp[i * nOut + resIdx]); } out[resIdx] = sumfunObj(res, out[resIdx]); } free(h_tmp); } */ if (!tmpBuffer) cudaFree(tmpOut); } static int determineNKeySetsLog2(size_t size_out, int nOut, cudaDeviceProp* props) { // 32 threads per block, one block shares one binset // Go for 2x occupancy = 64 active threads per block // Hence if we have NBinSets, then we need tot_size x nOut x NBinSets x 2 bytes of shared // On sm_20 we have 48 000 bytes and on sm_1x 16 000 // Hence nbinsets = SharedMem / (2 * tot_size * nOut) // For example sm_20, 16 int bins: // nbinsets = 48000 / 2 * 4 * 16 = 48000 / 2*64 = 48000 / 128 = 375... // More than enough, but is it enough active threadblocks?? int nBytesShared = 16000; size_t sizetot = size_out + sizeof(int); int nBinSets = nBytesShared / (sizetot * 2 * nOut); // NOTE: Disabling for now - advantages seem nonexistent // if (nBinSets >= 32) return 5; // if (nBinSets >= 16) return 4; // if (nBinSets >= 8) return 3; // if (nBinSets >= 4) return 2; // if (nBinSets >= 2) return 1; if (nBinSets >= 1) return 0; return -1; } #if __CUDA_ARCH__ >= 200 template <int nMultires> static inline __device__ bool checkForReduction (int* myKeys, int* rkeys) { // Idea - if there is a large number of degenerate entries then we don't need to check them all for degeneracy // TODO: Implement the wonderful idea //return ((threadIdx.x >> 5) & 3) < 3; #if 1 bool myKeyDegenerate; //TAKE_WARP_MUTEX(0); rkeys[threadIdx.x & 31] = myKeys[0]; // Check two thirds myKeyDegenerate = (myKeys[0] == (rkeys[(threadIdx.x + 1) & 31])) /*|| (myKeys[0] == (rkeys[(threadIdx.x + 8) & 31]))*/; //GIVE_WARP_MUTEX(0); unsigned int degenMask = __ballot(myKeyDegenerate); // Estimate number of degenerate keys - if all are degenerate, the estimate is accurate int nDegen = __popc(degenMask); if (nDegen > HISTOGRAM_DEGEN_LIMIT) return true; else return false; #endif } #endif template <histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histogramKernel_stepImpl( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT end, OUTPUTTYPE zero, int nOut, INDEXT startidx, OUTPUTTYPE* bins, int* locks, OUTPUTTYPE* rvals, int* rkeys, int* doReduce, bool checkReduce, int* warpmutex) { int myKeys[nMultires]; OUTPUTTYPE vals[nMultires]; bool doWrite = true; if (laststeps){ if (startidx < end) { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } else { doWrite = false; #pragma unroll for (int r = 0; r < nMultires; r++){ vals[r] = zero; myKeys[r] = -1; } } } else { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } // See keyIndex-reasoning above int binSet = (threadIdx.x & ((1 << nBinSetslog2) - 1)); #if __CUDA_ARCH__ >= 200 /* if (laststeps){ *doReduce = false; } else*/ { if (checkReduce){ *doReduce = checkForReduction<nMultires>(myKeys, rkeys); if (histotype == histogram_generic || histotype == histogram_atomic_add){ __shared__ int tmp; tmp = 0; __syncthreads(); if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1); __syncthreads(); if (tmp > HBLOCK_SIZE / 2) *doReduce = true; else *doReduce = false; } //if (laststeps) *doReduce = false; /* __syncthreads(); bool tmpred = checkForReduction<nMultires>(myKeys, rkeys); if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred); __syncthreads();*/ } } #endif // TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?) // TODO: How to avoid bank-conflicts? Any way to avoid? #if __CUDA_ARCH__ >= 200 #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \ if (*doReduce){\ if (histotype == histogram_generic || histotype == histogram_atomic_add){\ bool Iwrite;\ TAKE_WARP_MUTEX(0);\ Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ if (Iwrite && doWrite) bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\ /*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\ else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\ GIVE_WARP_MUTEX(0);\ } else { \ bool Iwrite = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex); \ }\ } else {\ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ }\ } } } while (0) #else #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = doWrite == false ? 0 : (myKeys[(RESID % nMultires)] << nBinSetslog2) + binSet; \ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, doWrite, warpmutex);\ }\ } } while (0) #endif ONE_HS_STEP(0); ONE_HS_STEP(1); ONE_HS_STEP(2); ONE_HS_STEP(3); //#pragma unroll for (int resid = 4; resid < nMultires; resid++){ ONE_HS_STEP(resid); } #undef ONE_HS_STEP } template <int nBinSetslog2, histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histogramKernel_sharedbins_new( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int outStride, int nSteps) { extern __shared__ int cudahistogram_binstmp[]; OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp); int* locks = (int*)&bins[(nOut << nBinSetslog2)]; int* rkeys = NULL; OUTPUTTYPE* rvals = NULL; //__shared__ int warpmutex; //INIT_WARP_MUTEX2(warpmutex); #if __CUDA_ARCH__ >= 200 int warpId = threadIdx.x >> 5; if (histotype == histogram_generic) rkeys = &locks[(nOut << nBinSetslog2)]; else rkeys = locks; rvals = (OUTPUTTYPE*)&rkeys[32]; if (histotype == histogram_atomic_inc){ rkeys = &rkeys[warpId << 5]; //rvals = &rvals[warpId << 5]; } #endif const int nBinSets = 1 << nBinSetslog2; // Reset all bins to zero... for (int j = 0; j < ((nOut << nBinSetslog2) >> HBLOCK_SIZE_LOG2) + 1; j++) { int bin = (j << HBLOCK_SIZE_LOG2) + threadIdx.x; if (bin < (nOut << nBinSetslog2)){ bins[bin] = zero; } } #if HBLOCK_SIZE > 32 __syncthreads(); #endif int outidx = blockIdx.x; INDEXT startidx = (INDEXT)((outidx * nSteps) * HBLOCK_SIZE + start + threadIdx.x); /*__shared__*/ int doReduce; // local var - TODO: Is this safe?? doReduce = 0; #define MED_UNROLL_LOG2 2 #define MED_UNROLL (1 << MED_UNROLL_LOG2) int step; for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++) { //#pragma unroll //for (int substep = 0; substep < MED_UNROLL; substep++){ histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex); startidx += HBLOCK_SIZE; histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex); startidx += HBLOCK_SIZE; histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex); startidx += HBLOCK_SIZE; histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex); startidx += HBLOCK_SIZE; //} } step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2; for (; step < nSteps ; step++) { histogramKernel_stepImpl<histotype, nBinSetslog2, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, nOut, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex); startidx += HBLOCK_SIZE; } #undef MED_UNROLL #undef MED_UNROLL_LOG2 #if HBLOCK_SIZE > 32 __syncthreads(); #endif // Finally put together the bins for (int j = 0; j < (nOut >> HBLOCK_SIZE_LOG2) + 1; j++) { int key = (j << HBLOCK_SIZE_LOG2) + threadIdx.x; if (key < nOut) { OUTPUTTYPE res = blockOut[key * outStride + outidx]; //int tmpBin = bin; #pragma unroll for (int k = 0; k < nBinSets; k++) { //tmpBin += nOut; res = sumfunObj(res, bins[(key << nBinSetslog2) + k]); } //printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin); blockOut[key * outStride + outidx] = res; } } } template <histogram_type histotype, typename OUTPUTTYPE> static int getMediumHistoTmpbufSize(int nOut, cudaDeviceProp* props) { int nblocks = props->multiProcessorCount * 8; // NOTE: The other half is used by multireduce... return 2 * nblocks * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callHistogramKernelImpl( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, cudaDeviceProp* props, cudaStream_t stream, size_t* getTmpBufSize, void* tmpBuffer, bool outInDev, int cuda_arch) { INDEXT size = end - start; // Check if there is something to do actually... if (end <= start) { if (getTmpBufSize) *getTmpBufSize = 0; return; } int nblocks = props->multiProcessorCount * 8; // Assert that our grid is not too large! //MY_ASSERT(n < 65536 && "Sorry - currently we can't do such a big problems with histogram-kernel..."); // One entry for each output for each thread-block: //OUTPUTTYPE* tmpOut = (OUTPUTTYPE*)parallel_alloc(MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); OUTPUTTYPE* tmpOut; if (getTmpBufSize) { // NOTE: The other half is used by multireduce... *getTmpBufSize = 2 * nblocks * nOut * sizeof(OUTPUTTYPE); return; } int nsteps = size / ( nblocks * HBLOCK_SIZE ); if (nsteps * nblocks * HBLOCK_SIZE < size) nsteps++; if (nsteps > MAX_NHSTEPS) nsteps = MAX_NHSTEPS; if (tmpBuffer) { char* tmpptr = (char*)tmpBuffer; tmpOut = (OUTPUTTYPE*)tmpBuffer; tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)]; } else { cudaMalloc((void**)&tmpOut, nblocks * nOut * sizeof(OUTPUTTYPE)); } /* For block size other that power of two: const dim3 grid = size / BLOCK_SIZE + ( size % BLOCK_SIZE == 0 ? 0 : 1 ); */ //MY_ASSERT(size > 0); //cudaMemsetAsync(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE), CURRENT_STREAM() ); //cudaMemset(tmpOut, 0xFF, n * nOut * sizeof(OUTPUTTYPE) ); { #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps <<= 1; if (nsteps * initgrid.x * IBLOCK_SIZE < nblocks * nOut) initgrid.x++; } initKernel<<<initgrid,initblock,0,stream>>>(tmpOut, zero, nblocks * nOut, nsteps); #undef IBLOCK_SIZE_LOG2 #undef IBLOCK_SIZE } int nKeysetslog2 = determineNKeySetsLog2(sizeof(OUTPUTTYPE), nOut, props); if (nKeysetslog2 < 0) nKeysetslog2 = 0; int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE)); // bins if (histotype == histogram_generic || cuda_arch < 130) extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int)); // locks if (cuda_arch >= 200) { // Reduction stuff: if (histotype == histogram_generic || histotype == histogram_atomic_add) { extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values } else { extSharedNeeded += (sizeof(int) << HBLOCK_SIZE_LOG2); // keys per warp of one thread } } /*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HBLOCK_SIZE); if (nOut < HBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HBLOCK_SIZE - nOut); if (cuda_arch < 130) extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/ //printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps); int nOrigBlocks = nblocks; INDEXT myStart = start; while(myStart < end) { bool lastStep = false; if (myStart + nsteps * nblocks * HBLOCK_SIZE > end) { size = end - myStart; nsteps = (size) / (nblocks * HBLOCK_SIZE); if (nsteps < 1) { lastStep = true; nsteps = 1; nblocks = size / HBLOCK_SIZE; if (nblocks * HBLOCK_SIZE < size) nblocks++; } } dim3 grid = nblocks; dim3 block = HBLOCK_SIZE; switch (nKeysetslog2) { case 0: if (lastStep) histogramKernel_sharedbins_new<0, histotype, nMultires, true><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps); else histogramKernel_sharedbins_new<0, histotype, nMultires, false><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps); break; /* case 1: histogramKernel_sharedbins_new<1, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 2: histogramKernel_sharedbins_new<2, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 3: histogramKernel_sharedbins_new<3, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 4: histogramKernel_sharedbins_new<4, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break; case 5: histogramKernel_sharedbins_new<5, histotype, nMultires><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, n, nsteps); break;*/ case -1: // TODO: Error? //assert(0); // "Sorry - not implemented yet" break; } myStart += nsteps * nblocks * HBLOCK_SIZE; } #if H_ERROR_CHECKS cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror = %s\n", cudaGetErrorString( error )); #endif // OK - so now tmpOut contains our gold - we just need to dig it out now callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev); // Below same as host-code #if 0 { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); cudaMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < n; i++) { res = sumfunObj(res, h_tmp[i + resIdx * n]); } out[resIdx] = res; } free(h_tmp); } #endif //parallel_free(tmpOut, MemType_DEV); if (!tmpBuffer) cudaFree(tmpOut); } template <typename OUTTYPE> static bool binsFitIntoShared(int nOut, OUTTYPE zero, cudaDeviceProp* props, int cuda_arch) { // Assume here we can only use 16kb of shared in total per SM // Also lets take minimal of 2 threads per functional unit active, in // order to be able to hide at least some latencies - for Fermi this means 32 * 2 = 64 // of active threads needed in total (Note: This is minimal and will hurt perf). // Also we run blocks of 32 threads and each block needs its own bin - therefore // we need in total 2 full bin-sets per SM plus 32 bins for the one for the working part // of the algorithm. // Due to these considerations we infer that we can fit it nicely in, if // (4 binsets x Nbins/binset + 32) x sizeof(OUTYPE) < 16kib - let's take here 16kb to have some room // for required parameters // Example: 64 doubles: 8bytes per number double => (4 * 64 + 32) * 8bytes = 288 * 8 bytes = 2304 bytes -> Easy // How many bins of doubles can we do with these limits? // ( 4 * x + 32) * 8bytes = 16000 bytes <=> 4x = 2000 - 32 => x = 2000/4 - 32/4 = 500 - 8 = 492 bins. // TODO: A possibly faster version of this would be to share one set of bins over as many warps as possible // for example, if we would use 512 threads = 16 warps, then this would be fine for hiding probably all major latencies // and we could get away with just one binset on SM: // ( x + 512 ) * 8bytes = 16000 bytes <=> x = 2000 - 512 = 1488 bins! With better latency-hiding // On the other hand this requires atomic operations on the shared memory, which could be somewhat slower on // arbitrary types, but all in all, this would seem to provide a better route. At least worth investigating... int shlimit = props->sharedMemPerBlock - 300; int limit = shlimit; // TODO: Pessimistic limit int need = (sizeof(zero) + sizeof(int)) * nOut; if (cuda_arch >= 200) need += HBLOCK_SIZE * sizeof(int) + 32 * sizeof(zero); if (need <= limit) return true; return false; } template <bool subHisto, histogram_type histotype, int nBinSetslog2, int nMultires, bool laststeps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histogramKernel_stepImplMulti( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT end, OUTPUTTYPE zero, int subsize, INDEXT startidx, OUTPUTTYPE* bins, int* locks, OUTPUTTYPE* rvals, int* rkeys, int* doReduce, bool checkReduce, int* warpmutex, int binOffset) { int myKeys[nMultires]; OUTPUTTYPE vals[nMultires]; bool doWrite = true; if (laststeps){ if (startidx < end) { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } else { doWrite = false; #pragma unroll for (int r = 0; r < nMultires; r++){ vals[r] = zero; myKeys[r] = -1; } } } else { xformObj(input, startidx, &myKeys[0], &vals[0], nMultires); } #if __CUDA_ARCH__ >= 200 /* if (laststeps){ *doReduce = false; } else*/ { if (checkReduce){ *doReduce = checkForReduction<nMultires>(myKeys, rkeys); if (histotype == histogram_generic || histotype == histogram_atomic_add){ __shared__ int tmp; tmp = 0; __syncthreads(); if (*doReduce && ((threadIdx.x & 31) == 0)) atomicAdd(&tmp, 1); __syncthreads(); if (tmp > HMBLOCK_SIZE / 2) *doReduce = true; else *doReduce = false; } //if (laststeps) *doReduce = false; /* __syncthreads(); bool tmpred = checkForReduction<nMultires>(myKeys, rkeys); if ((threadIdx.x & 31) == 0) atomicExch(doReduce, (int)tmpred); __syncthreads();*/ } } #endif // TODO: Unroll this later - nvcc (at least older versions) can't unroll atomics (?) // TODO: How to avoid bank-conflicts? Any way to avoid? #if __CUDA_ARCH__ >= 200 #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \ bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\ if (!Iwrite) keyIndex = 0; \ if (*doReduce){\ if (histotype == histogram_generic || histotype == histogram_atomic_add){\ TAKE_WARP_MUTEX(0);\ bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ if (Iwrite && Iwrite2) \ bins[keyIndex] = sumfunObj(bins[keyIndex], vals[(RESID % nMultires)]);\ /*if (histotype == histogram_generic) myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);\ else wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && doWrite, warpmutex);*/\ GIVE_WARP_MUTEX(0);\ } else { \ bool Iwrite2 = reduceToUnique<histotype, false>(&vals[(RESID % nMultires)], myKeys[(RESID % nMultires)], NULL, sumfunObj, rkeys, rvals);\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite && Iwrite2, warpmutex); \ }\ } else {\ if (!Iwrite) keyIndex = 0;\ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ }\ } } } while (0) #else #define ONE_HS_STEP(RESID) do { if ((RESID) < nMultires) { \ int keyIndex = (myKeys[(RESID % nMultires)] - binOffset); \ bool Iwrite = keyIndex >= 0 && keyIndex < subsize && doWrite;\ if (!Iwrite) keyIndex = 0;\ if (histotype == histogram_generic)\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_add)\ wrapAtomicAddWarp<laststeps>(&bins[keyIndex], *(&vals[(RESID % nMultires)]), &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else if (histotype == histogram_atomic_inc)\ wrapAtomicIncWarp<laststeps>(&bins[keyIndex], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ else{\ myAtomicWarpAdd<laststeps>(&bins[keyIndex], vals[(RESID % nMultires)], &locks[keyIndex], sumfunObj, Iwrite, warpmutex);\ }\ } } while (0) #endif ONE_HS_STEP(0); ONE_HS_STEP(1); ONE_HS_STEP(2); ONE_HS_STEP(3); //#pragma unroll for (int resid = 4; resid < nMultires; resid++){ ONE_HS_STEP(resid); } #undef ONE_HS_STEP } template <histogram_type histotype, int nMultires, bool lastSteps, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histogramKernel_multipass( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int outStride, int nSteps, int subsize) { extern __shared__ int cudahistogram_binstmp[]; OUTPUTTYPE* bins = (OUTPUTTYPE*)&(*cudahistogram_binstmp); int* locks = (int*)&bins[subsize]; int* rkeys = NULL; OUTPUTTYPE* rvals = NULL; //__shared__ int warpmutex; //INIT_WARP_MUTEX2(warpmutex); #if __CUDA_ARCH__ >= 200 int warpId = threadIdx.x >> 5; if (histotype == histogram_generic) rkeys = &locks[subsize]; else rkeys = locks; rvals = (OUTPUTTYPE*)&rkeys[32]; if (histotype == histogram_atomic_inc){ rkeys = &rkeys[warpId << 5]; //rvals = &rvals[warpId << 5]; } #endif // Reset all bins to zero... for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++) { int bin = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x; if (bin < subsize){ bins[bin] = zero; } } #if HMBLOCK_SIZE > 32 __syncthreads(); #endif int outidx = blockIdx.y; int binOffset = blockIdx.x * subsize; INDEXT startidx = (INDEXT)((outidx * nSteps) * HMBLOCK_SIZE + start + threadIdx.x); int doReduce; // local var - TODO: Is this safe?? doReduce = 0; #define MED_UNROLL_LOG2 2 #define MED_UNROLL (1 << MED_UNROLL_LOG2) int step; for (step = 0; step < (nSteps >> MED_UNROLL_LOG2); step++) { histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, true, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; histogramKernel_stepImplMulti<true, histotype, 0, nMultires, false, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, false, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; } step = (nSteps >> MED_UNROLL_LOG2) << MED_UNROLL_LOG2; for (; step < nSteps ; step++) { histogramKernel_stepImplMulti<true, histotype, 0, nMultires, lastSteps, INPUTTYPE, TRANSFORMFUNTYPE, SUMFUNTYPE, OUTPUTTYPE> (input, xformObj, sumfunObj, end, zero, subsize, startidx, bins, locks, rvals, rkeys, &doReduce, (step & 7) == 0, &warpmutex, binOffset); startidx += HMBLOCK_SIZE; } #undef MED_UNROLL #undef MED_UNROLL_LOG2 #if HMBLOCK_SIZE > 32 __syncthreads(); #endif // Finally put together the bins for (int j = 0; j < (subsize >> HMBLOCK_SIZE_LOG2) + 1; j++) { int key = (j << HMBLOCK_SIZE_LOG2) + threadIdx.x; if (key < subsize) { OUTPUTTYPE res = blockOut[(key + binOffset) * outStride + outidx]; //int tmpBin = bin; res = sumfunObj(res, bins[key]); //printf("tid:%02d, write out bin: %02d, \n", threadIdx.x, bin); blockOut[(key + binOffset) * outStride + outidx] = res; } } } static int determineSubHistoSize(int nOut, size_t outsize, histogram_type histotype, int cuda_arch, cudaDeviceProp* props) { int shlimit = props->sharedMemPerBlock - 300; int neededPerKey = outsize; if (histotype == histogram_generic || cuda_arch < 130) neededPerKey += (sizeof(int)); // locks int neededConst = 0; if (cuda_arch >= 200) { // Reduction stuff: if (histotype == histogram_generic || histotype == histogram_atomic_add) { neededConst += (outsize + sizeof(int)) << 5; // reduction values } else { neededConst += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread } } int result = (shlimit - neededConst) / (2*neededPerKey); int res = 0; if (result >= 1<<16) { result >>= 16; res += 16; } if (result >= 1<< 8) { result >>= 8; res += 8; } if (result >= 1<< 4) { result >>= 4; res += 4; } if (result >= 1<< 2) { result >>= 2; res += 2; } if (result >= 1<< 1) { res += 1; } return (1 << res); } template <histogram_type histotype, typename OUTPUTTYPE> static int getMultipassBufSize(int nOut, cudaDeviceProp* props, int cuda_arch) { int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props); int nDegenBlocks = nOut / subsize; if (subsize * nDegenBlocks < nOut) nDegenBlocks++; int nblocks = props->multiProcessorCount; if (nDegenBlocks < 8) nblocks = props->multiProcessorCount * 8 / nDegenBlocks; //int nblocks = props->multiProcessorCount * 8; // NOTE: The other half is used by multireduce... //printf("getMultipassBufSize(%d) = %d\n", nOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE)); return 2 * nblocks * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callHistogramKernelMultiPass( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, cudaDeviceProp* props, cudaStream_t stream, void* tmpBuffer, bool outInDev, int cuda_arch) { INDEXT size = end - start; if (end <= start) return; //int debugs = 0; int subsize = determineSubHistoSize(nOut, sizeof(OUTPUTTYPE), histotype, cuda_arch, props); int nDegenBlocks = nOut / subsize; if (subsize * nDegenBlocks < nOut) nDegenBlocks++; int nblocks = props->multiProcessorCount; if (nDegenBlocks < 8) nblocks = props->multiProcessorCount * 8 / nDegenBlocks; OUTPUTTYPE* tmpOut; int nsteps = size / ( nblocks * HMBLOCK_SIZE ); if (nsteps * nblocks * HMBLOCK_SIZE < size) nsteps++; if (nsteps > MAX_MULTISTEPS) nsteps = MAX_MULTISTEPS; //printf(" <debugstep = %d> ", debugs++); bool userBuffer = false; if (tmpBuffer) { char* tmpptr = (char*)tmpBuffer; tmpOut = (OUTPUTTYPE*)tmpBuffer; tmpBuffer = (void*)&tmpptr[nblocks * nOut * sizeof(OUTPUTTYPE)]; userBuffer = true; //printf("tmpBuffer = &tmpptr[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE)); } else { cudaMalloc((void**)&tmpOut, 2 * nblocks * nOut * sizeof(OUTPUTTYPE)); //printf("tmpOut = malloc(%d)\n", 2 * nblocks * nOut * sizeof(OUTPUTTYPE)); //tmpBuffer = (void*)&tmpOut[nblocks * nOut * sizeof(OUTPUTTYPE)]; //printf("tmpBuffer = &tmpOut[%d]\n", nblocks * nOut * sizeof(OUTPUTTYPE)); } #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((nblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps2 = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps2 <<= 1; if (nsteps2 * initgrid.x * IBLOCK_SIZE < nblocks * nOut) initgrid.x++; } initKernel<<<initgrid,initblock,0,stream>>>(tmpOut, zero, nblocks * nOut, nsteps2); #undef IBLOCK_SIZE_LOG2 #undef IBLOCK_SIZE int extSharedNeeded = subsize * (sizeof(OUTPUTTYPE)); // bins if (histotype == histogram_generic || cuda_arch < 130) extSharedNeeded += subsize * (sizeof(int)); // locks if (cuda_arch >= 200) { // Reduction stuff: if (histotype == histogram_generic || histotype == histogram_atomic_add) { extSharedNeeded += ((sizeof(OUTPUTTYPE) + sizeof(int)) << 5); // reduction values } else { extSharedNeeded += (sizeof(int) << HMBLOCK_SIZE_LOG2); // keys per warp of one thread } } //printf(" <debugstep(init) = %d> ", debugs++); /*int extSharedNeeded = ((nOut << nKeysetslog2)) * (sizeof(OUTPUTTYPE) + sizeof(int)) + (sizeof(OUTPUTTYPE) * HMBLOCK_SIZE); if (nOut < HMBLOCK_SIZE) extSharedNeeded += sizeof(int) * (HMBLOCK_SIZE - nOut); if (cuda_arch < 130) extSharedNeeded += ((nOut << nKeysetslog2)) * (sizeof(int));*/ //printf("binsets = %d, steps = %d\n", (1 << nKeysetslog2), nsteps); int nOrigBlocks = nblocks; INDEXT myStart = start; while(myStart < end) { bool lastStep = false; if (myStart + nsteps * nblocks * HMBLOCK_SIZE > end) { size = end - myStart; nsteps = (size) / (nblocks * HMBLOCK_SIZE); if (nsteps < 1) { lastStep = true; nsteps = 1; nblocks = size / HMBLOCK_SIZE; if (nblocks * HMBLOCK_SIZE < size) nblocks++; } } dim3 grid; grid.y = nblocks; grid.x = nDegenBlocks; dim3 block = HMBLOCK_SIZE; //printf(" <debugstep(main) = %d> ", debugs++); if (lastStep) histogramKernel_multipass<histotype, nMultires, true><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize); else histogramKernel_multipass<histotype, nMultires, false><<<grid, block, extSharedNeeded, stream>>>( input, xformObj, sumfunObj, myStart, end, zero, tmpOut, nOut, nOrigBlocks, nsteps, subsize); myStart += nsteps * nblocks * HMBLOCK_SIZE; } #if H_ERROR_CHECKS cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror = %s\n", cudaGetErrorString( error )); #endif // OK - so now tmpOut contains our gold - we just need to dig it out now //printf(" <debugstep(out) = %d> ", debugs++); //printf("callMultiReduce(%d, %d,...)\n", nOrigBlocks, nOut); callMultiReduce(nOrigBlocks, nOut, out, tmpOut, sumfunObj, zero, stream, tmpBuffer, outInDev); //printf(" <debugstep(multireduce) = %d> ", debugs++); #if H_ERROR_CHECKS error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror(reduce) = %s\n", cudaGetErrorString( error )); #endif // Below same as host-code #if 0 { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(n * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); cudaMemcpy(h_tmp, tmpOut, n*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < n; i++) { res = sumfunObj(res, h_tmp[i + resIdx * n]); } out[resIdx] = res; } free(h_tmp); } #endif //parallel_free(tmpOut, MemType_DEV); if (!userBuffer) cudaFree(tmpOut); } template <bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histoKernel_smallBinStep( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT myStart, INDEXT end, OUTPUTTYPE* mySHBins) { int myKeys[nMultires]; if (lastSteps) { if (myStart < end) { OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2; mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]); } } } else { OUTPUTTYPE myOut[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { int index = (myKeys[res]) << SMALL_BLOCK_SIZE_LOG2; mySHBins[index] = sumfunObj(mySHBins[index], myOut[res]); } } } template <bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histoKernel_smallBin( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int maxblocks, int nSteps) { // Take care with extern - In order to have two instances of this template the // type of the extern variables cannot change // (ie. cannot use "extern __shared__ OUTPUTTYPE bins[]") extern __shared__ int cudahistogram_allbinstmp[]; OUTPUTTYPE* allbins = (OUTPUTTYPE*)&(*cudahistogram_allbinstmp); OUTPUTTYPE* mySHBins = &allbins[threadIdx.x]; OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x]; INDEXT myStart = start + (INDEXT)((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + (INDEXT)threadIdx.x; for (int bin = 0; bin < nOut /*- nLocVars*/; bin++) mySHBins[bin << SMALL_BLOCK_SIZE_LOG2] = zero; // Run loops - unroll 8 steps manually int doNSteps = (nSteps) >> 3; for (int step = 0; step < doNSteps; step++) { histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 2*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 3*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 4*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 5*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 6*SMALL_BLOCK_SIZE, end, mySHBins); histoKernel_smallBinStep<lastSteps, nMultires>(input, xformObj, sumfunObj, myStart + 7*SMALL_BLOCK_SIZE, end, mySHBins); myStart += 8*SMALL_BLOCK_SIZE; } int nStepsLeft = (nSteps) - (doNSteps << 3); for (int step = 0; step < nStepsLeft; step++) { histoKernel_smallBinStep<true, nMultires>(input, xformObj, sumfunObj, myStart, end, mySHBins); myStart += SMALL_BLOCK_SIZE; } // In the end combine results: #if SMALL_BLOCK_SIZE > 32 __syncthreads(); #endif // Do first shared stuff: int keyIndex = threadIdx.x; while (keyIndex < nOut) { OUTPUTTYPE* binResults = &allbins[keyIndex << SMALL_BLOCK_SIZE_LOG2]; OUTPUTTYPE result = ourOut[keyIndex]; for (int tidx = 0; tidx < SMALL_BLOCK_SIZE; tidx++){ result = sumfunObj(result, *binResults++); } ourOut[keyIndex] = result; keyIndex += SMALL_BLOCK_SIZE; } } static inline __device__ int resultToInt(int resultin){ return resultin; } static inline __device__ int resultToInt(long resultin){ return (int)resultin; } static inline __device__ int resultToInt(long long resultin){ return (int)resultin; } static inline __device__ int resultToInt(unsigned int resultin){ return (int)resultin; } static inline __device__ int resultToInt(unsigned long resultin){ return (int)resultin; } static inline __device__ int resultToInt(unsigned long long resultin){ return (int)resultin; } template<typename OUTPUTTYPE> static inline __device__ int resultToInt(OUTPUTTYPE resultin){ return 0; } static inline __device__ void intToResult(int resultin, int& resultOut){ resultOut = resultin; } static inline __device__ void intToResult(int resultin, long& resultOut){ resultOut = (long)resultin; } static inline __device__ void intToResult(int resultin, unsigned int& resultOut){ resultOut = (unsigned )resultin; } static inline __device__ void intToResult(int resultin, long long& resultOut){ resultOut = (long long)resultin; } static inline __device__ void intToResult(int resultin, unsigned long& resultOut){ resultOut = (unsigned long)resultin; } static inline __device__ void intToResult(int resultin, unsigned long long& resultOut){ resultOut = (unsigned long long)resultin; } template<typename OUTPUTTYPE> static inline __device__ void intToResult(int resultin, OUTPUTTYPE& resultout){ ; } template <bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static inline __device__ void histoKernel_smallBinByteOneStep( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT myStart, INDEXT end, volatile unsigned char* mySHBins, OUTPUTTYPE zero ) { if (lastSteps) { if (myStart < end) { OUTPUTTYPE myOut[nMultires]; int myKeys[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { // index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid] // Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops... int index = (((myKeys[res]) >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (myKeys[res] & 0x3); mySHBins[index]++; } } } else /*if (myStart < end)*/ { OUTPUTTYPE myOut[nMultires]; int myKeys[nMultires]; xformObj(input, myStart, &myKeys[0], &myOut[0], nMultires); #pragma unroll for (int res = 0; res < nMultires; res++) { // index = tid * 4 + (key / 4) * blockSize * 4 + (key % 4) - mySHBins points to allbins[4 x tid] // Complex indexing cost: 2x bit-shift + bitwise and + addition = 4 ops... int key = myKeys[res]; int index = ((key >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (key & 0x3); mySHBins[index]++; } } } template <histogram_type histotype, bool lastSteps, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> __global__ void histoKernel_smallBinByte( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* blockOut, int nOut, int maxblocks, int nSteps) { // Ok - idea is as follows: When we have blocksize number of threads, thread tid's nth-bin is at: // index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4) // Example: // With 32 threads bins #7, #8 and #9 will be at (7/4=1, 7%4=3, 8/4=2, 8%4=4, 9/4=2, 9%4=1): // Bin #7 Bin #8 Bin #9 ... Bin #63 // tid | index index index ... index // ============== ======== ======== ======== // 0 35 256 257 ... 1923 // 1 39 260 261 ... 1927 // 2 43 264 265 ... 1931 // ... // 31 255 380 381 ... 2047 // Therefore there are blocksize x nOut number of 1-byte bins // Outputs are gathered from time to time to 32-bit bins // // Example2: // With 32 threads 7 bins // Bin #0 Bin #1 Bin #2 Bin #3 Bin #4 Bin #5 Bin #6 // tid | index index index index index index index // ============== ======== ======== ======== ======== ======== ======== // 0 0 1 2 3 128 129 130 // 1 4 5 6 7 132 133 134 // 2 8 9 10 11 136 137 138 // ... // 30 120 121 122 123 248 249 250 // 31 124 125 126 127 252 253 254 // // Example3: // index = tid * 4 + (bin / 4) * blocksize * 4 + (bin % 4) // With 32 threads 3 bins // Bin #0 Bin #1 Bin #2 // tid | index index index // ============== ======== ======== // 0 0 1 2 // 1 4 5 6 // 2 8 9 10 // ... // 30 120 121 122 // 31 124 125 126 extern __shared__ unsigned char allbins2[]; volatile unsigned char* mySHBins = &allbins2[threadIdx.x << 2]; int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0); OUTPUTTYPE* ourOut = &blockOut[nOut * blockIdx.x]; #if __CUDA_ARCH__ >= 200 OUTPUTTYPE* resultbins = ourOut; #else OUTPUTTYPE* resultbins = (OUTPUTTYPE*)(&allbins2[padNOut << SMALL_BLOCK_SIZE_LOG2]); #endif INDEXT myStart = start + (INDEXT)(((blockIdx.x * nSteps) << SMALL_BLOCK_SIZE_LOG2) + threadIdx.x); // Run loops //int nFullLoops = nSteps >> 7; // Clear bins { int* tmpSHBins = &((int*)allbins2)[threadIdx.x]; // There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++) tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0; // for (int tmpbin = (bin << 2); tmpbin < padNOut; tmpbin++) // mySHBins[tmpbin] = 0; #if __CUDA_ARCH__ < 200 int binid = threadIdx.x; while(binid < nOut) { resultbins[binid] = zero; binid += SMALL_BLOCK_SIZE; } #endif } #if SMALL_BLOCK_SIZE > 32 __syncthreads(); #endif const int looplim = (255 / nMultires) < 63 ? (255 / nMultires) : 63; for (int stepsRem = nSteps; stepsRem > 0; stepsRem -= looplim) { if (stepsRem > looplim) { #define MANUAL_UNROLL 1 #if MANUAL_UNROLL // Unroll manually // ("unexcpected control flow" construct with #pragma unroll) #define DO_STEP(NUM) do { if ((NUM) < looplim) { \ histoKernel_smallBinByteOneStep<lastSteps, nMultires>( \ input, xformObj, sumfunObj, myStart /*+ (NUM) * SMALL_BLOCK_SIZE*/, end,\ mySHBins, zero); myStart += SMALL_BLOCK_SIZE; \ } } while (0) #define DO_16_STEPS(N0) do { \ DO_STEP(N0 + 0); DO_STEP(N0 + 1); DO_STEP(N0 + 2); DO_STEP(N0 + 3); \ DO_STEP(N0 + 4); DO_STEP(N0 + 5); DO_STEP(N0 + 6); DO_STEP(N0 + 7); \ DO_STEP(N0 + 8); DO_STEP(N0 + 9); DO_STEP(N0 + 10); DO_STEP(N0 + 11); \ DO_STEP(N0 + 12); DO_STEP(N0 + 13); DO_STEP(N0 + 14); DO_STEP(N0 + 15); \ } while (0) DO_16_STEPS(0); DO_16_STEPS(16); DO_16_STEPS(32); DO_16_STEPS(48); #undef DO_16_STEPS #undef DO_STEP //myStart += looplim * SMALL_BLOCK_SIZE; #else for (int stepNum = 0; stepNum < looplim; stepNum++){ histoKernel_smallBinByteOneStep<lastSteps, nMultires>( input, xformObj, sumfunObj, myStart + stepNum * SMALL_BLOCK_SIZE, end, mySHBins, zero); } myStart += looplim * SMALL_BLOCK_SIZE; #endif // MANUAL_UNROLL #undef MANUAL_UNROLL } else { for (int stepNum = 0; stepNum < stepsRem; stepNum++){ histoKernel_smallBinByteOneStep<lastSteps, nMultires>( input, xformObj, sumfunObj, myStart + stepNum * SMALL_BLOCK_SIZE, end, mySHBins, zero); } myStart += looplim * SMALL_BLOCK_SIZE; } // Ok passes done - need to flush results together { # if SMALL_BLOCK_SIZE > 32 __syncthreads(); # endif int binid = threadIdx.x; while(binid < nOut) { // Start from own tid in order to avoid bank-conflicts: // index = tid * 4 + 4 * (bin / 4) * blocksize + (bin % 4) int index = (threadIdx.x << 2) + ((binid >> 2) << (SMALL_BLOCK_SIZE_LOG2 + 2)) + (binid & 0x3); //int res = (int)allbins2[index]; int res = resultToInt(resultbins[binid]); int ilimit = SMALL_BLOCK_SIZE - threadIdx.x; #pragma unroll for (int i=0; i < SMALL_BLOCK_SIZE; i++) { if (i == ilimit) index -= (SMALL_BLOCK_SIZE << 2); res += allbins2[index]; //allbins2[index] = 0; index += 4; } intToResult(res, resultbins[binid]); binid += SMALL_BLOCK_SIZE; } # if SMALL_BLOCK_SIZE > 32 __syncthreads(); # endif // zero the bins { int* tmpSHBins = &((int*)allbins2)[threadIdx.x]; // There are nOut x BLOCK_SIZE byte-sized bins so nOut x BLOCKISIZE/4 int-sized ones for (int bin = 0; bin < (padNOut >> 2) /*- nLocVars*/; bin++) tmpSHBins[bin << (SMALL_BLOCK_SIZE_LOG2)] = 0; } # if SMALL_BLOCK_SIZE > 32 __syncthreads(); # endif } } // In the end combine results: #if __CUDA_ARCH__ < 200 #if SMALL_BLOCK_SIZE > 32 __syncthreads(); #endif int keyIndex = threadIdx.x; while (keyIndex < nOut) { OUTPUTTYPE result = ourOut[keyIndex]; //result = result + resultbins[keyIndex]; result = sumfunObj(result, *(OUTPUTTYPE*)(&resultbins[keyIndex])); ourOut[keyIndex] = result; keyIndex += SMALL_BLOCK_SIZE; } #endif } template <histogram_type histotype, typename OUTPUTTYPE> static int getSmallBinBufSize(int nOut, cudaDeviceProp* props) { int maxblocks = props->multiProcessorCount * 3; maxblocks *= 2; if (nOut < 200) maxblocks *= 4; maxblocks *= 4; return (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> static void callSmallBinHisto( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, cudaDeviceProp* props, int cuda_arch, cudaStream_t stream, int* getTmpBufSize, void* tmpBuffer, bool outInDev) { INDEXT size = end - start; if (end <= start) { if (getTmpBufSize) *getTmpBufSize = 0; return; } int maxblocks = props->multiProcessorCount * 3; if (size > 2*1024*1024 || getTmpBufSize){ maxblocks *= 2; // High occupancy requires lots of blocks if (nOut < 200) maxblocks *= 4; } // TODO: Magic constants.. // With low bin-counts and large problems it seems beneficial to use // more blocks... if (nOut <= 128 || size > 2*4096*4096 || getTmpBufSize) maxblocks *= 4; //printf("maxblocks = %d\n", maxblocks); OUTPUTTYPE* tmpOut; if (getTmpBufSize) { *getTmpBufSize = (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE); return; } if (tmpBuffer) tmpOut = (OUTPUTTYPE*)tmpBuffer; else cudaMalloc((void**)&tmpOut, (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE)); #if H_ERROR_CHECKS /*assert(getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, props) >= (maxblocks + 1) * nOut * sizeof(OUTPUTTYPE));*/ #endif // cudaMemset(tmpOut, 0, sizeof(OUTPUTTYPE) * nOut * (maxblocks+1)); { #define IBLOCK_SIZE_LOG2 7 #define IBLOCK_SIZE (1 << IBLOCK_SIZE_LOG2) int initPaddedSize = ((maxblocks * nOut) + (IBLOCK_SIZE) - 1) & (~((IBLOCK_SIZE) - 1)); const dim3 initblock = IBLOCK_SIZE; dim3 initgrid = initPaddedSize >> ( IBLOCK_SIZE_LOG2 ); int nsteps = 1; while (initgrid.x > (1 << 14)) { initgrid.x >>= 1; nsteps <<= 1; if (nsteps * initgrid.x * IBLOCK_SIZE < maxblocks * nOut) initgrid.x++; } initKernel<<<initgrid, initblock, 0, stream>>>(tmpOut, zero, maxblocks * nOut, nsteps); #undef IBLOCK_SIZE_LOG2 #undef IBLOCK_SIZE } int sharedNeeded; if (histotype == histogram_atomic_inc) { int padNOut = nOut + (((nOut & 0x3) != 0) ? (4 - (nOut & 0x3)) : 0); sharedNeeded = (padNOut << SMALL_BLOCK_SIZE_LOG2); if (cuda_arch < 200) sharedNeeded += (nOut << 2); } else { int typesize = sizeof(OUTPUTTYPE); sharedNeeded = (nOut * typesize) << SMALL_BLOCK_SIZE_LOG2; //printf("Small-bin, generic, Shared needed = %d\n", sharedNeeded); } // Determine number of local variables // SMALL_LOCALLIMIT is total local size available for one block: int nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2); if (nSteps * maxblocks * SMALL_BLOCK_SIZE < size) nSteps++; if (nSteps > MAX_SMALL_STEPS) nSteps = MAX_SMALL_STEPS; int nFullSteps = size / (nSteps * maxblocks * SMALL_BLOCK_SIZE); dim3 grid = maxblocks; dim3 block = SMALL_BLOCK_SIZE; for (int i = 0; i < nFullSteps; i++) { if (histotype == histogram_atomic_inc) histoKernel_smallBinByte<histotype, false, nMultires><<<grid, block, sharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); else histoKernel_smallBin<false, nMultires><<<grid, block, sharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); start += nSteps * maxblocks * SMALL_BLOCK_SIZE; #if H_ERROR_CHECKS cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror = %s\n", cudaGetErrorString( error )); #endif } size = end - start; if (size > 0) { // Do what steps we still can do without checks nSteps = size / (maxblocks << SMALL_BLOCK_SIZE_LOG2); if (nSteps > 0) { if (histotype == histogram_atomic_inc) histoKernel_smallBinByte<histotype, false, nMultires><<<grid, block, sharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); else histoKernel_smallBin<false, nMultires><<<grid, block, sharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); start += nSteps * maxblocks * SMALL_BLOCK_SIZE; } } size = end - start; if (size > 0) { // Last step here: int nblocks = size >> SMALL_BLOCK_SIZE_LOG2; if (nblocks >= maxblocks) nblocks = maxblocks; else if ((nblocks << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++; nSteps = size / (nblocks << SMALL_BLOCK_SIZE_LOG2); if (nSteps * nblocks * SMALL_BLOCK_SIZE < size) { nSteps++; nblocks = size / (nSteps << SMALL_BLOCK_SIZE_LOG2); if (((nSteps * nblocks) << SMALL_BLOCK_SIZE_LOG2) < size) nblocks++; } grid.x = nblocks; if (histotype == histogram_atomic_inc) histoKernel_smallBinByte<histotype, true, nMultires><<<grid, block, sharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); else histoKernel_smallBin<true, nMultires><<<grid, block, sharedNeeded, stream>>>( input, xformObj, sumfunObj, start, end, zero, tmpOut, nOut, maxblocks, nSteps); } #if H_ERROR_CHECKS cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) printf("Cudaerror = %s\n", cudaGetErrorString( error )); #endif // Finally put together the result: enum cudaMemcpyKind fromOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyHostToDevice; enum cudaMemcpyKind toOut = outInDev ? cudaMemcpyDeviceToDevice : cudaMemcpyDeviceToHost; if (stream != 0) cudaMemcpyAsync(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut, stream); else cudaMemcpy(&tmpOut[maxblocks * nOut], out, sizeof(OUTPUTTYPE) * nOut, fromOut); // Let's do so that one block handles one bin grid.x = nOut; //grid.x = nOut >> SMALL_BLOCK_SIZE_LOG2; //if ((grid.x << SMALL_BLOCK_SIZE_LOG2) < nOut) grid.x++; block.x = GATHER_BLOCK_SIZE; gatherKernel<<<grid, block, 0, stream>>>(sumfunObj, tmpOut, nOut, maxblocks, zero); // TODO: Use async copy for the results as well? if (outInDev && stream != 0) cudaMemcpyAsync(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut, stream); else cudaMemcpy(out, tmpOut, nOut*sizeof(OUTPUTTYPE), toOut); #if 0 { int resIdx; int i; OUTPUTTYPE* h_tmp = (OUTPUTTYPE*)malloc(maxblocks * nOut * sizeof(OUTPUTTYPE)); //parallel_copy(h_tmp, MemType_HOST, tmpOut, MemType_DEV, n * nOut * sizeof(OUTPUTTYPE)); cudaMemcpy(h_tmp, tmpOut, maxblocks*nOut*sizeof(OUTPUTTYPE), cudaMemcpyDeviceToHost); for (resIdx = 0; resIdx < nOut; resIdx++) { OUTPUTTYPE res = out[resIdx]; for (i = 0; i < maxblocks; i++) { res = sumfunObj(res, h_tmp[i * nOut + resIdx]); } out[resIdx] = sumfunObj(res, out[resIdx]); } free(h_tmp); } #endif if (!tmpBuffer) cudaFree(tmpOut); } template <histogram_type histotype, typename OUTPUTTYPE> static inline bool smallBinLimit(int nOut, OUTPUTTYPE zero, cudaDeviceProp* props, int cuda_arch) { int shlimit = props->sharedMemPerBlock - 300; int typeSize = sizeof(OUTPUTTYPE); if (histotype == histogram_atomic_inc) if ((((4 * nOut) << 5) + (cuda_arch < 200 ? nOut * 16 : 0)) < shlimit) return true; if (((4 * nOut * typeSize) << 5) < shlimit) return true; return false; } __global__ void detectCudaArchKernel(int* res) { int result; #if __CUDA_ARCH__ >= 210 result = 210; #elif __CUDA_ARCH__ >= 200 result = 200; #elif __CUDA_ARCH__ >= 130 result = 130; #elif __CUDA_ARCH__ >= 120 result = 120; #elif __CUDA_ARCH__ >= 110 result = 110; #else result = 100; #endif if (threadIdx.x == 0) *res = result; } static int DetectCudaArch(void) { // The only way to know from host-code, which device architecture our kernels have been generated // against, is to run a kernel that actually checks it.. :) dim3 grid = 1; //dim3 block = 32; // TODO: Allow static storage so that we can ask just once for the arch??? // NOTE: This function implies synchromization between CPU and GPU - so use static here... static int result = 0; //int result = 0; if (result == 0) { void* tmpBuf; cudaMalloc(&tmpBuf, sizeof(int)); detectCudaArchKernel<<<grid, grid>>>((int*)tmpBuf); cudaMemcpy(&result, tmpBuf, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(tmpBuf); //printf("Detected CUDA_ARCH = %d\n", result); } return result; } static bool runMultiPass(int nOut, cudaDeviceProp* props, int cuda_arch, size_t outsize, histogram_type histotype) { int subsize = determineSubHistoSize(nOut, outsize, histotype, cuda_arch, props); if (cuda_arch < 120){ if (subsize <= 0 || nOut > 2 * subsize) return false; return true; } else { if (subsize <= 0 || nOut > 16 * subsize) return false; return true; } } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> cudaError_t callHistogramKernel( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT start, INDEXT end, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, cudaStream_t stream, void* tmpBuffer, bool allowMultiPass) { int devId; cudaDeviceProp props; cudaError_t cudaErr = cudaGetDevice( &devId ); if (cudaErr != 0) return cudaErr; //assert(!cudaErr); cudaErr = cudaGetDeviceProperties( &props, devId ); if (cudaErr != 0) return cudaErr; int cuda_arch = DetectCudaArch(); enum cudaFuncCache old; cudaThreadGetCacheConfig(&old); cudaThreadSetCacheConfig(cudaFuncCachePreferShared); if (nOut <= 0) return cudaSuccess; // 100 Mib printf-limit should be enough... // cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 1024 * 1024 * 100); if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch)) { callSmallBinHisto<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev); } else if (binsFitIntoShared(nOut, zero, &props, cuda_arch)) { callHistogramKernelImpl<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, NULL, tmpBuffer, outInDev, cuda_arch); } else if (allowMultiPass && runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype)) { callHistogramKernelMultiPass<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, stream, tmpBuffer, outInDev, cuda_arch); } else { callHistogramKernelLargeNBins<histotype, nMultires>(input, xformObj, sumfunObj, start, end, zero, out, nOut, &props, cuda_arch, stream, NULL, tmpBuffer, outInDev); } cudaThreadSetCacheConfig(old); return cudaSuccess; } template <typename nDimIndexFun, int nDim, typename USERINPUTTYPE, typename INDEXT, typename OUTPUTTYPE> class wrapHistoInput { public: nDimIndexFun userIndexFun; INDEXT starts[nDim]; //int ends[nDim]; INDEXT sizes[nDim]; __host__ __device__ void operator() (USERINPUTTYPE input, INDEXT i, int* result_index, OUTPUTTYPE* results, int nresults) const { int coords[nDim]; int tmpi = i; #pragma unroll for (int d=0; d < nDim; d++) { // Example of how this logic works - imagine a cube of (10,100,1000), and take index 123 456 // newI = 123 456 / 10 = 12 345, offset = 123 456 - 123 450 = 6 (this is our first coordinate!), // newI = 12 345 / 100 = 123, offset = 12 345 - 12 300 = 45 (this is our second coordinate!), // newI = 123 / 1000 = 0, offset = 123 - 0 = 123 (this is our last coordinate!) // Result = [123, 45, 6] INDEXT newI = tmpi / sizes[d]; INDEXT offset = tmpi - newI * sizes[d]; coords[d] = starts[d] + offset; tmpi = newI; } // Now just call wrapped functor with right coordinate values userIndexFun(input, coords, result_index, results, nresults); } }; template <histogram_type histotype, int nMultires, int nDim, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> cudaError_t callHistogramKernelNDim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT* starts, INDEXT* ends, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, cudaStream_t stream, void* tmpBuffer, bool allowMultiPass) { wrapHistoInput<TRANSFORMFUNTYPE, nDim, INPUTTYPE, INDEXT, OUTPUTTYPE> wrapInput; INDEXT start = 0; INDEXT size = 1; for (int d = 0; d < nDim; d++) { wrapInput.starts[d] = starts[d]; wrapInput.sizes[d] = ends[d] - starts[d]; // Example: starts = [3, 10, 23], sizes = [10, 100, 1000] // start = 3 * 1 = 3, size = 10 // start = 3 + 10 * 10 = 103, size = 10*100 = 1000 // start = 103 + 1000*23 = 23 103, size = 1000*1000 = 1 000 000 start += starts[d] * size; size *= wrapInput.sizes[d]; if (ends[d] <= starts[d]) return cudaSuccess; } wrapInput.userIndexFun = xformObj; INDEXT end = start + size; return callHistogramKernel<histotype, nMultires> (input, wrapInput, sumfunObj, start, end, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass); } template <histogram_type histotype, int nMultires, typename INPUTTYPE, typename TRANSFORMFUNTYPE, typename SUMFUNTYPE, typename OUTPUTTYPE, typename INDEXT> cudaError_t callHistogramKernel2Dim( INPUTTYPE input, TRANSFORMFUNTYPE xformObj, SUMFUNTYPE sumfunObj, INDEXT x0, INDEXT x1, INDEXT y0, INDEXT y1, OUTPUTTYPE zero, OUTPUTTYPE* out, int nOut, bool outInDev, cudaStream_t stream, void* tmpBuffer, bool allowMultiPass) { INDEXT starts[2] = { x0, y0 }; INDEXT ends[2] = { x1, y1 }; return callHistogramKernelNDim<histotype, nMultires, 2> (input, xformObj, sumfunObj, starts, ends, zero, out, nOut, outInDev, stream, tmpBuffer, allowMultiPass); } struct histogram_defaultXform { __host__ __device__ void operator() (int* input, int i, int* result_index, int* results, int nresults) const { //int idata = input[i]; #pragma unroll for (int resIndex = 0; resIndex < nresults; resIndex++) { *result_index++ = *input++; *results++ = 1; } } }; template <typename OUTPUTTYPE> struct histogram_defaultSum { __host__ __device__ OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const { return i1 + i2; } }; template <typename INPUTTYPE, typename OUTPUTTYPE> struct histogram_dummyXform { __host__ __device__ void operator() (INPUTTYPE* input, int i, int* result_index, OUTPUTTYPE* results, int nresults) const { //int idata = input[i]; int index = i; (void)input; #pragma unroll for (int resIndex = 0; resIndex < nresults; resIndex++) { *result_index++ = index++; *results++ = 1;//*input++; } } }; template <typename OUTPUTTYPE> struct histogram_dummySum { __host__ __device__ OUTPUTTYPE operator() (OUTPUTTYPE i1, OUTPUTTYPE i2) const { return i1; } }; template <histogram_type histotype, typename OUTPUTTYPE> int getHistogramBufSize(OUTPUTTYPE zero, int nOut) { int result = 0; int devId; cudaDeviceProp props; cudaError_t cudaErr = cudaGetDevice( &devId ); if (cudaErr != 0) return -1; //assert(!cudaErr); cudaErr = cudaGetDeviceProperties( &props, devId ); if (cudaErr != 0) return -1; int cuda_arch = DetectCudaArch(); if (nOut <= 0) return 0; if (smallBinLimit<histotype>(nOut, zero, &props, cuda_arch)) { result = getSmallBinBufSize<histotype, OUTPUTTYPE>(nOut, &props); } else if (binsFitIntoShared(nOut, zero, &props, cuda_arch)) { result = getMediumHistoTmpbufSize<histotype, OUTPUTTYPE>(nOut, &props); } else if (runMultiPass(nOut, &props, cuda_arch, sizeof(OUTPUTTYPE), histotype)) { result = getMultipassBufSize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch); } else { result = getLargeBinTmpbufsize<histotype, OUTPUTTYPE>(nOut, &props, cuda_arch); } return result; } // undef everything #undef H_ERROR_CHECKS #undef HBLOCK_SIZE_LOG2 #undef HBLOCK_SIZE #undef HMBLOCK_SIZE_LOG2 #undef HMBLOCK_SIZE #undef LBLOCK_SIZE_LOG2 #undef LBLOCK_SIZE #undef GATHER_BLOCK_SIZE_LOG2 #undef GATHER_BLOCK_SIZE #undef LBLOCK_WARPS #undef RBLOCK_SIZE #undef RMAXSTEPS #undef NHSTEPSPERKEY #undef MAX_NHSTEPS #undef MAX_MULTISTEPS #undef MAX_NLHSTEPS #undef STRATEGY_CHECK_INTERVAL_LOG2 #undef STRATEGY_CHECK_INTERVAL #undef HASH_COLLISION_STEPS #undef USE_JENKINS_HASH #undef LARGE_NBIN_CHECK_INTERVAL_LOG2 #undef LARGE_NBIN_CHECK_INTERVAL #undef SMALL_BLOCK_SIZE_LOG2 #undef SMALL_BLOCK_SIZE #undef MAX_SMALL_STEPS #undef USE_ATOMICS_HASH #undef USE_BALLOT_HISTOGRAM #undef TAKE_WARP_MUTEX #undef GIVE_WARP_MUTEX #undef FREE_MUTEX_ID #if USE_MEDIUM_PATH #undef MEDIUM_BLOCK_SIZE_LOG2 #undef MEDIUM_BLOCK_SIZE #endif #undef USE_MEDIUM_PATH
9dd8f41f32fd8504f134341faa98c3e91d9abe6c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This is a simple test program to measure the memcopy bandwidth of the GPU. * It can measure device to device copy bandwidth, host to device copy bandwidth * for pageable and pinned memory, and device to host copy bandwidth for pageable * and pinned memory. * * Usage: * ./bandwidthTest [option]... */ // CUDA runtime #include <hip/hip_runtime.h> // includes #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization #include <hip/hip_runtime.h> #include <memory> #include <iostream> #include <cassert> static char *sSDKsample = "CUDA Bandwidth Test"; // defines, project #define MEMCOPY_ITERATIONS 10 #define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M #define DEFAULT_INCREMENT (1 << 22) //4 M #define CACHE_CLEAR_SIZE (1 << 24) //16 M //shmoo mode defines #define SHMOO_MEMSIZE_MAX (1 << 26) //64 M #define SHMOO_MEMSIZE_START (1 << 10) //1 KB #define SHMOO_INCREMENT_1KB (1 << 10) //1 KB #define SHMOO_INCREMENT_2KB (1 << 11) //2 KB #define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB #define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_INCREMENT_1MB (1 << 20) //1 MB #define SHMOO_INCREMENT_2MB (1 << 21) //2 MB #define SHMOO_INCREMENT_4MB (1 << 22) //4 MB #define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB #define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB #define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_LIMIT_1MB (1 << 20) //1 MB #define SHMOO_LIMIT_16MB (1 << 24) //16 MB #define SHMOO_LIMIT_32MB (1 << 25) //32 MB //enums, project enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE }; enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE }; enum printMode { USER_READABLE, CSV }; enum memoryMode { PINNED, PAGEABLE }; const char *sMemoryCopyKind[] = { "Device to Host", "Host to Device", "Device to Device", NULL }; const char *sMemoryMode[] = { "PINNED", "PAGEABLE", NULL }; // if true, use CPU based timing for everything static bool bDontUseGPUTiming; int *pArgc = NULL; char **pArgv = NULL; //////////////////////////////////////////////////////////////////////////////// // declaration, forward int runTest(const int argc, const char **argv); void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testDeviceToDeviceTransfer(unsigned int memSize); void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printHelp(void); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; // set logfile name and start logs printf("[%s] - Starting...\n", sSDKsample); int iRetVal = runTest(argc, (const char **)argv); if (iRetVal < 0) { checkCudaErrors(hipSetDevice(0)); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); } // finish printf("%s\n", (iRetVal==0) ? "Result = PASS" : "Result = FAIL"); exit((iRetVal==0) ? EXIT_SUCCESS : EXIT_FAILURE); } /////////////////////////////////////////////////////////////////////////////// //Parse args, run the appropriate tests /////////////////////////////////////////////////////////////////////////////// int runTest(const int argc, const char **argv) { int start = DEFAULT_SIZE; int end = DEFAULT_SIZE; int startDevice = 0; int endDevice = 0; int increment = DEFAULT_INCREMENT; testMode mode = QUICK_MODE; bool htod = false; bool dtoh = false; bool dtod = false; bool wc = false; char *modeStr; char *device = NULL; printMode printmode = USER_READABLE; char *memModeStr = NULL; memoryMode memMode = PINNED; //process command line args if (checkCmdLineFlag(argc, argv, "help")) { printHelp(); return 0; } if (checkCmdLineFlag(argc, argv, "csv")) { printmode = CSV; } if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr)) { if (strcmp(memModeStr, "pageable") == 0) { memMode = PAGEABLE; } else if (strcmp(memModeStr, "pinned") == 0) { memMode = PINNED; } else { printf("Invalid memory mode - valid modes are pageable or pinned\n"); printf("See --help for more information\n"); return -1000; } } else { //default - pinned memory memMode = PINNED; } if (getCmdLineArgumentString(argc, argv, "device", &device)) { int deviceCount; hipError_t error_id = hipGetDeviceCount(&deviceCount); if (error_id != hipSuccess) { printf("hipGetDeviceCount returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); exit(EXIT_FAILURE); } if (deviceCount == 0) { printf("!!!!!No devices found!!!!!\n"); return -2000; } if (strcmp(device, "all") == 0) { printf("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n"); startDevice = 0; endDevice = deviceCount-1; } else { startDevice = endDevice = atoi(device); if (startDevice >= deviceCount || startDevice < 0) { printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0); startDevice = endDevice = 0; } } } printf("Running on...\n\n"); for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { hipDeviceProp_t deviceProp; hipError_t error_id = hipGetDeviceProperties(&deviceProp, currentDevice); if (error_id == hipSuccess) { printf(" Device %d: %s\n", currentDevice, deviceProp.name); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); checkCudaErrors(hipSetDevice(currentDevice)); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_FAILURE); } } else { printf("hipGetDeviceProperties returned %d\n-> %s\n", (int)error_id, hipGetErrorString(error_id)); checkCudaErrors(hipSetDevice(currentDevice)); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_FAILURE); } } if (getCmdLineArgumentString(argc, argv, "mode", &modeStr)) { //figure out the mode if (strcmp(modeStr, "quick") == 0) { printf(" Quick Mode\n\n"); mode = QUICK_MODE; } else if (strcmp(modeStr, "shmoo") == 0) { printf(" Shmoo Mode\n\n"); mode = SHMOO_MODE; } else if (strcmp(modeStr, "range") == 0) { printf(" Range Mode\n\n"); mode = RANGE_MODE; } else { printf("Invalid mode - valid modes are quick, range, or shmoo\n"); printf("See --help for more information\n"); return -3000; } } else { //default mode - quick printf(" Quick Mode\n\n"); mode = QUICK_MODE; } if (checkCmdLineFlag(argc, argv, "htod")) { htod = true; } if (checkCmdLineFlag(argc, argv, "dtoh")) { dtoh = true; } if (checkCmdLineFlag(argc, argv, "dtod")) { dtod = true; } #if CUDART_VERSION >= 2020 if (checkCmdLineFlag(argc, argv, "wc")) { wc = true; } #endif if (checkCmdLineFlag(argc, argv, "cputiming")) { bDontUseGPUTiming = true; } if (!htod && !dtoh && !dtod) { //default: All htod = true; dtoh = true; dtod = true; } if (RANGE_MODE == mode) { if (checkCmdLineFlag(argc, (const char **)argv, "start")) { start = getCmdLineArgumentInt(argc, argv, "start"); if (start <= 0) { printf("Illegal argument - start must be greater than zero\n"); return -4000; } } else { printf("Must specify a starting size in range mode\n"); printf("See --help for more information\n"); return -5000; } if (checkCmdLineFlag(argc, (const char **)argv, "end")) { end = getCmdLineArgumentInt(argc, argv, "end"); if (end <= 0) { printf("Illegal argument - end must be greater than zero\n"); return -6000; } if (start > end) { printf("Illegal argument - start is greater than end\n"); return -7000; } } else { printf("Must specify an end size in range mode.\n"); printf("See --help for more information\n"); return -8000; } if (checkCmdLineFlag(argc, argv, "increment")) { increment = getCmdLineArgumentInt(argc, argv, "increment"); if (increment <= 0) { printf("Illegal argument - increment must be greater than zero\n"); return -9000; } } else { printf("Must specify an increment in user mode\n"); printf("See --help for more information\n"); return -10000; } } if (htod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } if (dtoh) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc); } if (dtod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } // Ensure that we reset all CUDA Devices in question for (int nDevice = startDevice; nDevice <= endDevice; nDevice++) { hipSetDevice(nDevice); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); } return 0; } /////////////////////////////////////////////////////////////////////////////// // Run a bandwidth test /////////////////////////////////////////////////////////////////////////////// void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { switch (mode) { case QUICK_MODE: testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc); break; case RANGE_MODE: testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc); break; case SHMOO_MODE: testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc); break; default: break; } } ////////////////////////////////////////////////////////////////////// // Run a quick mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc); } /////////////////////////////////////////////////////////////////////// // Run a range mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies we're going to run unsigned int count = 1 + ((end - start) / increment); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { hipSetDevice(currentDevice); //run each of the copies for (unsigned int i = 0; i < count; i++) { memSizes[i] = start + i * increment; switch (kind) { case DEVICE_TO_HOST: bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]); break; } } } // Complete the bandwidth computation on all the devices //print results if (printmode == CSV) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } ////////////////////////////////////////////////////////////////////////////// // Intense shmoo mode - covers a large range of values with varying increments ////////////////////////////////////////////////////////////////////////////// void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies to make unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB) + ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB) + ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB) + ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB) + ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB) + ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB) + ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { hipSetDevice(currentDevice); //Run the shmoo int iteration = 0; unsigned int memSize = 0; while (memSize <= SHMOO_MEMSIZE_MAX) { if (memSize < SHMOO_LIMIT_20KB) { memSize += SHMOO_INCREMENT_1KB; } else if (memSize < SHMOO_LIMIT_50KB) { memSize += SHMOO_INCREMENT_2KB; } else if (memSize < SHMOO_LIMIT_100KB) { memSize += SHMOO_INCREMENT_10KB; } else if (memSize < SHMOO_LIMIT_1MB) { memSize += SHMOO_INCREMENT_100KB; } else if (memSize < SHMOO_LIMIT_16MB) { memSize += SHMOO_INCREMENT_1MB; } else if (memSize < SHMOO_LIMIT_32MB) { memSize += SHMOO_INCREMENT_2MB; } else { memSize += SHMOO_INCREMENT_4MB; } memSizes[iteration] = memSize; switch (kind) { case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer(memSizes[iteration], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer(memSizes[iteration], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer(memSizes[iteration]); break; } iteration++; printf("."); } } // Complete the bandwidth computation on all the devices //print results printf("\n"); if (CSV == printmode) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } /////////////////////////////////////////////////////////////////////////////// // test the bandwidth of a device to host memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; unsigned char *h_idata = NULL; unsigned char *h_odata = NULL; hipEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); //allocate host memory if (PINNED == memMode) { //pinned memory mode - use special function to get OS-pinned memory #if CUDART_VERSION >= 2020 checkCudaErrors(hipHostMalloc((void **)&h_idata, memSize, (wc) ? hipHostMallocWriteCombined : 0)); checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0)); #else checkCudaErrors(hipHostMalloc((void **)&h_idata, memSize)); checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_idata = (unsigned char *)malloc(memSize); h_odata = (unsigned char *)malloc(memSize); if (h_idata == 0 || h_odata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } // allocate device memory unsigned char *d_idata; checkCudaErrors(hipMalloc((void **) &d_idata, memSize)); //initialize the device memory checkCudaErrors(hipMemcpy(d_idata, h_idata, memSize, hipMemcpyHostToDevice)); //copy data from GPU to Host sdkStartTimer(&timer); checkCudaErrors(hipEventRecord(start, 0)); if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpyAsync(h_odata, d_idata, memSize, hipMemcpyDeviceToHost, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpy(h_odata, d_idata, memSize, hipMemcpyDeviceToHost)); } } checkCudaErrors(hipEventRecord(stop, 0)); // make sure GPU has finished copying checkCudaErrors(hipDeviceSynchronize()); //get the the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(hipHostFree(h_idata)); checkCudaErrors(hipHostFree(h_odata)); } else { free(h_idata); free(h_odata); } checkCudaErrors(hipFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a host to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; hipEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); //allocate host memory unsigned char *h_odata = NULL; if (PINNED == memMode) { #if CUDART_VERSION >= 2020 //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize, (wc) ? hipHostMallocWriteCombined : 0)); #else //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(hipHostMalloc((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_odata = (unsigned char *)malloc(memSize); if (h_odata == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } } unsigned char *h_cacheClear1 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); unsigned char *h_cacheClear2 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); if (h_cacheClear1 == 0 || h_cacheClear1 == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_odata[i] = (unsigned char)(i & 0xff); } for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++) { h_cacheClear1[i] = (unsigned char)(i & 0xff); h_cacheClear2[i] = (unsigned char)(0xff - (i & 0xff)); } //allocate device memory unsigned char *d_idata; checkCudaErrors(hipMalloc((void **) &d_idata, memSize)); sdkStartTimer(&timer); checkCudaErrors(hipEventRecord(start, 0)); //copy host memory to device memory if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpyAsync(d_idata, h_odata, memSize, hipMemcpyHostToDevice, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpy(d_idata, h_odata, memSize, hipMemcpyHostToDevice)); } } checkCudaErrors(hipEventRecord(stop, 0)); checkCudaErrors(hipDeviceSynchronize()); //total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } sdkResetTimer(&timer); //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(hipHostFree(h_odata)); } else { free(h_odata); } free(h_cacheClear1); free(h_cacheClear2); checkCudaErrors(hipFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a device to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToDeviceTransfer(unsigned int memSize) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; hipEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); //allocate host memory unsigned char *h_idata = (unsigned char *)malloc(memSize); if (h_idata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the host memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } //allocate device memory unsigned char *d_idata; checkCudaErrors(hipMalloc((void **) &d_idata, memSize)); unsigned char *d_odata; checkCudaErrors(hipMalloc((void **) &d_odata, memSize)); //initialize memory checkCudaErrors(hipMemcpy(d_idata, h_idata, memSize, hipMemcpyHostToDevice)); //run the memcopy sdkStartTimer(&timer); checkCudaErrors(hipEventRecord(start, 0)); for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(hipMemcpy(d_odata, d_idata, memSize, hipMemcpyDeviceToDevice)); } checkCudaErrors(hipEventRecord(stop, 0)); //Since device to device memory copies are non-blocking, //hipDeviceSynchronize() is required in order to get //proper timing. checkCudaErrors(hipDeviceSynchronize()); //get the the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(hipEventElapsedTime(&elapsedTimeInMs, start, stop)); if (bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = 2.0f * ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory sdkDeleteTimer(&timer); free(h_idata); checkCudaErrors(hipEventDestroy(stop)); checkCudaErrors(hipEventDestroy(start)); checkCudaErrors(hipFree(d_idata)); checkCudaErrors(hipFree(d_odata)); return bandwidthInMBs; } ///////////////////////////////////////////////////////// //print results in an easily read format //////////////////////////////////////////////////////// void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs); printf(" %s Memory Transfers\n", sMemoryMode[memMode]); if (wc) { printf(" Write-Combined Memory Writes are Enabled"); } printf(" Transfer Size (Bytes)\tBandwidth(MB/s)\n"); unsigned int i; for (i = 0; i < (count - 1); i++) { printf(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } printf(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } /////////////////////////////////////////////////////////////////////////// //print results in a database format /////////////////////////////////////////////////////////////////////////// void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { std::string sConfig; // log config information if (kind == DEVICE_TO_DEVICE) { sConfig += "D2D"; } else { if (kind == DEVICE_TO_HOST) { sConfig += "D2H"; } else if (kind == HOST_TO_DEVICE) { sConfig += "H2D"; } if (memMode == PAGEABLE) { sConfig += "-Paged"; } else if (memMode == PINNED) { sConfig += "-Pinned"; if (wc) { sConfig += "-WriteCombined"; } } } unsigned int i; double dSeconds = 0.0; for (i = 0; i < count; i++) { dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20)); printf("bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n", sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs); } } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { printf("Usage: bandwidthTest [OPTION]...\n"); printf("Test the bandwidth for device to host, host to device, and device to device transfers\n"); printf("\n"); printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n"); printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n"); printf("\n"); printf("Options:\n"); printf("--help\tDisplay this help menu\n"); printf("--csv\tPrint results as a CSV\n"); printf("--device=[deviceno]\tSpecify the device device to be used\n"); printf(" all - compute cumulative bandwidth on all the devices\n"); printf(" 0,1,2,...,n - Specify any particular device to be used\n"); printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n"); printf(" pageable - pageable memory\n"); printf(" pinned - non-pageable system memory\n"); printf("--mode=[MODE]\tSpecify the mode to use\n"); printf(" quick - performs a quick measurement\n"); printf(" range - measures a user-specified range of values\n"); printf(" shmoo - performs an intense shmoo of a large range of values\n"); printf("--htod\tMeasure host to device transfers\n"); printf("--dtoh\tMeasure device to host transfers\n"); printf("--dtod\tMeasure device to device transfers\n"); #if CUDART_VERSION >= 2020 printf("--wc\tAllocate pinned memory as write-combined\n"); #endif printf("--cputiming\tForce CPU-based timing always\n"); printf("Range mode options\n"); printf("--start=[SIZE]\tStarting transfer size in bytes\n"); printf("--end=[SIZE]\tEnding transfer size in bytes\n"); printf("--increment=[SIZE]\tIncrement size in bytes\n"); }
9dd8f41f32fd8504f134341faa98c3e91d9abe6c.cu
/* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * This is a simple test program to measure the memcopy bandwidth of the GPU. * It can measure device to device copy bandwidth, host to device copy bandwidth * for pageable and pinned memory, and device to host copy bandwidth for pageable * and pinned memory. * * Usage: * ./bandwidthTest [option]... */ // CUDA runtime #include <cuda_runtime.h> // includes #include <helper_functions.h> // helper for shared functions common to CUDA Samples #include <helper_cuda.h> // helper functions for CUDA error checking and initialization #include <cuda.h> #include <memory> #include <iostream> #include <cassert> static char *sSDKsample = "CUDA Bandwidth Test"; // defines, project #define MEMCOPY_ITERATIONS 10 #define DEFAULT_SIZE ( 32 * ( 1 << 20 ) ) //32 M #define DEFAULT_INCREMENT (1 << 22) //4 M #define CACHE_CLEAR_SIZE (1 << 24) //16 M //shmoo mode defines #define SHMOO_MEMSIZE_MAX (1 << 26) //64 M #define SHMOO_MEMSIZE_START (1 << 10) //1 KB #define SHMOO_INCREMENT_1KB (1 << 10) //1 KB #define SHMOO_INCREMENT_2KB (1 << 11) //2 KB #define SHMOO_INCREMENT_10KB (10 * (1 << 10)) //10KB #define SHMOO_INCREMENT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_INCREMENT_1MB (1 << 20) //1 MB #define SHMOO_INCREMENT_2MB (1 << 21) //2 MB #define SHMOO_INCREMENT_4MB (1 << 22) //4 MB #define SHMOO_LIMIT_20KB (20 * (1 << 10)) //20 KB #define SHMOO_LIMIT_50KB (50 * (1 << 10)) //50 KB #define SHMOO_LIMIT_100KB (100 * (1 << 10)) //100 KB #define SHMOO_LIMIT_1MB (1 << 20) //1 MB #define SHMOO_LIMIT_16MB (1 << 24) //16 MB #define SHMOO_LIMIT_32MB (1 << 25) //32 MB //enums, project enum testMode { QUICK_MODE, RANGE_MODE, SHMOO_MODE }; enum memcpyKind { DEVICE_TO_HOST, HOST_TO_DEVICE, DEVICE_TO_DEVICE }; enum printMode { USER_READABLE, CSV }; enum memoryMode { PINNED, PAGEABLE }; const char *sMemoryCopyKind[] = { "Device to Host", "Host to Device", "Device to Device", NULL }; const char *sMemoryMode[] = { "PINNED", "PAGEABLE", NULL }; // if true, use CPU based timing for everything static bool bDontUseGPUTiming; int *pArgc = NULL; char **pArgv = NULL; //////////////////////////////////////////////////////////////////////////////// // declaration, forward int runTest(const int argc, const char **argv); void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc); float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc); float testDeviceToDeviceTransfer(unsigned int memSize); void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc); void printHelp(void); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { pArgc = &argc; pArgv = argv; // set logfile name and start logs printf("[%s] - Starting...\n", sSDKsample); int iRetVal = runTest(argc, (const char **)argv); if (iRetVal < 0) { checkCudaErrors(cudaSetDevice(0)); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); } // finish printf("%s\n", (iRetVal==0) ? "Result = PASS" : "Result = FAIL"); exit((iRetVal==0) ? EXIT_SUCCESS : EXIT_FAILURE); } /////////////////////////////////////////////////////////////////////////////// //Parse args, run the appropriate tests /////////////////////////////////////////////////////////////////////////////// int runTest(const int argc, const char **argv) { int start = DEFAULT_SIZE; int end = DEFAULT_SIZE; int startDevice = 0; int endDevice = 0; int increment = DEFAULT_INCREMENT; testMode mode = QUICK_MODE; bool htod = false; bool dtoh = false; bool dtod = false; bool wc = false; char *modeStr; char *device = NULL; printMode printmode = USER_READABLE; char *memModeStr = NULL; memoryMode memMode = PINNED; //process command line args if (checkCmdLineFlag(argc, argv, "help")) { printHelp(); return 0; } if (checkCmdLineFlag(argc, argv, "csv")) { printmode = CSV; } if (getCmdLineArgumentString(argc, argv, "memory", &memModeStr)) { if (strcmp(memModeStr, "pageable") == 0) { memMode = PAGEABLE; } else if (strcmp(memModeStr, "pinned") == 0) { memMode = PINNED; } else { printf("Invalid memory mode - valid modes are pageable or pinned\n"); printf("See --help for more information\n"); return -1000; } } else { //default - pinned memory memMode = PINNED; } if (getCmdLineArgumentString(argc, argv, "device", &device)) { int deviceCount; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); exit(EXIT_FAILURE); } if (deviceCount == 0) { printf("!!!!!No devices found!!!!!\n"); return -2000; } if (strcmp(device, "all") == 0) { printf("\n!!!!!Cumulative Bandwidth to be computed from all the devices !!!!!!\n\n"); startDevice = 0; endDevice = deviceCount-1; } else { startDevice = endDevice = atoi(device); if (startDevice >= deviceCount || startDevice < 0) { printf("\n!!!!!Invalid GPU number %d given hence default gpu %d will be used !!!!!\n", startDevice,0); startDevice = endDevice = 0; } } } printf("Running on...\n\n"); for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaDeviceProp deviceProp; cudaError_t error_id = cudaGetDeviceProperties(&deviceProp, currentDevice); if (error_id == cudaSuccess) { printf(" Device %d: %s\n", currentDevice, deviceProp.name); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); checkCudaErrors(cudaSetDevice(currentDevice)); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_FAILURE); } } else { printf("cudaGetDeviceProperties returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); checkCudaErrors(cudaSetDevice(currentDevice)); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_FAILURE); } } if (getCmdLineArgumentString(argc, argv, "mode", &modeStr)) { //figure out the mode if (strcmp(modeStr, "quick") == 0) { printf(" Quick Mode\n\n"); mode = QUICK_MODE; } else if (strcmp(modeStr, "shmoo") == 0) { printf(" Shmoo Mode\n\n"); mode = SHMOO_MODE; } else if (strcmp(modeStr, "range") == 0) { printf(" Range Mode\n\n"); mode = RANGE_MODE; } else { printf("Invalid mode - valid modes are quick, range, or shmoo\n"); printf("See --help for more information\n"); return -3000; } } else { //default mode - quick printf(" Quick Mode\n\n"); mode = QUICK_MODE; } if (checkCmdLineFlag(argc, argv, "htod")) { htod = true; } if (checkCmdLineFlag(argc, argv, "dtoh")) { dtoh = true; } if (checkCmdLineFlag(argc, argv, "dtod")) { dtod = true; } #if CUDART_VERSION >= 2020 if (checkCmdLineFlag(argc, argv, "wc")) { wc = true; } #endif if (checkCmdLineFlag(argc, argv, "cputiming")) { bDontUseGPUTiming = true; } if (!htod && !dtoh && !dtod) { //default: All htod = true; dtoh = true; dtod = true; } if (RANGE_MODE == mode) { if (checkCmdLineFlag(argc, (const char **)argv, "start")) { start = getCmdLineArgumentInt(argc, argv, "start"); if (start <= 0) { printf("Illegal argument - start must be greater than zero\n"); return -4000; } } else { printf("Must specify a starting size in range mode\n"); printf("See --help for more information\n"); return -5000; } if (checkCmdLineFlag(argc, (const char **)argv, "end")) { end = getCmdLineArgumentInt(argc, argv, "end"); if (end <= 0) { printf("Illegal argument - end must be greater than zero\n"); return -6000; } if (start > end) { printf("Illegal argument - start is greater than end\n"); return -7000; } } else { printf("Must specify an end size in range mode.\n"); printf("See --help for more information\n"); return -8000; } if (checkCmdLineFlag(argc, argv, "increment")) { increment = getCmdLineArgumentInt(argc, argv, "increment"); if (increment <= 0) { printf("Illegal argument - increment must be greater than zero\n"); return -9000; } } else { printf("Must specify an increment in user mode\n"); printf("See --help for more information\n"); return -10000; } } if (htod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, HOST_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } if (dtoh) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_HOST, printmode, memMode, startDevice, endDevice, wc); } if (dtod) { testBandwidth((unsigned int)start, (unsigned int)end, (unsigned int)increment, mode, DEVICE_TO_DEVICE, printmode, memMode, startDevice, endDevice, wc); } // Ensure that we reset all CUDA Devices in question for (int nDevice = startDevice; nDevice <= endDevice; nDevice++) { cudaSetDevice(nDevice); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); } return 0; } /////////////////////////////////////////////////////////////////////////////// // Run a bandwidth test /////////////////////////////////////////////////////////////////////////////// void testBandwidth(unsigned int start, unsigned int end, unsigned int increment, testMode mode, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { switch (mode) { case QUICK_MODE: testBandwidthQuick(DEFAULT_SIZE, kind, printmode, memMode, startDevice, endDevice, wc); break; case RANGE_MODE: testBandwidthRange(start, end, increment, kind, printmode, memMode, startDevice, endDevice, wc); break; case SHMOO_MODE: testBandwidthShmoo(kind, printmode, memMode, startDevice, endDevice, wc); break; default: break; } } ////////////////////////////////////////////////////////////////////// // Run a quick mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthQuick(unsigned int size, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { testBandwidthRange(size, size, DEFAULT_INCREMENT, kind, printmode, memMode, startDevice, endDevice, wc); } /////////////////////////////////////////////////////////////////////// // Run a range mode bandwidth test ////////////////////////////////////////////////////////////////////// void testBandwidthRange(unsigned int start, unsigned int end, unsigned int increment, memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies we're going to run unsigned int count = 1 + ((end - start) / increment); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //run each of the copies for (unsigned int i = 0; i < count; i++) { memSizes[i] = start + i * increment; switch (kind) { case DEVICE_TO_HOST: bandwidths[i] += testDeviceToHostTransfer(memSizes[i], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[i] += testHostToDeviceTransfer(memSizes[i], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[i] += testDeviceToDeviceTransfer(memSizes[i]); break; } } } // Complete the bandwidth computation on all the devices //print results if (printmode == CSV) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } ////////////////////////////////////////////////////////////////////////////// // Intense shmoo mode - covers a large range of values with varying increments ////////////////////////////////////////////////////////////////////////////// void testBandwidthShmoo(memcpyKind kind, printMode printmode, memoryMode memMode, int startDevice, int endDevice, bool wc) { //count the number of copies to make unsigned int count = 1 + (SHMOO_LIMIT_20KB / SHMOO_INCREMENT_1KB) + ((SHMOO_LIMIT_50KB - SHMOO_LIMIT_20KB) / SHMOO_INCREMENT_2KB) + ((SHMOO_LIMIT_100KB - SHMOO_LIMIT_50KB) / SHMOO_INCREMENT_10KB) + ((SHMOO_LIMIT_1MB - SHMOO_LIMIT_100KB) / SHMOO_INCREMENT_100KB) + ((SHMOO_LIMIT_16MB - SHMOO_LIMIT_1MB) / SHMOO_INCREMENT_1MB) + ((SHMOO_LIMIT_32MB - SHMOO_LIMIT_16MB) / SHMOO_INCREMENT_2MB) + ((SHMOO_MEMSIZE_MAX - SHMOO_LIMIT_32MB) / SHMOO_INCREMENT_4MB); unsigned int *memSizes = (unsigned int *)malloc(count * sizeof(unsigned int)); double *bandwidths = (double *) malloc(count * sizeof(double)); // Before calculating the cumulative bandwidth, initialize bandwidths array to NULL for (unsigned int i = 0; i < count; i++) { bandwidths[i] = 0.0; } // Use the device asked by the user for (int currentDevice = startDevice; currentDevice <= endDevice; currentDevice++) { cudaSetDevice(currentDevice); //Run the shmoo int iteration = 0; unsigned int memSize = 0; while (memSize <= SHMOO_MEMSIZE_MAX) { if (memSize < SHMOO_LIMIT_20KB) { memSize += SHMOO_INCREMENT_1KB; } else if (memSize < SHMOO_LIMIT_50KB) { memSize += SHMOO_INCREMENT_2KB; } else if (memSize < SHMOO_LIMIT_100KB) { memSize += SHMOO_INCREMENT_10KB; } else if (memSize < SHMOO_LIMIT_1MB) { memSize += SHMOO_INCREMENT_100KB; } else if (memSize < SHMOO_LIMIT_16MB) { memSize += SHMOO_INCREMENT_1MB; } else if (memSize < SHMOO_LIMIT_32MB) { memSize += SHMOO_INCREMENT_2MB; } else { memSize += SHMOO_INCREMENT_4MB; } memSizes[iteration] = memSize; switch (kind) { case DEVICE_TO_HOST: bandwidths[iteration] += testDeviceToHostTransfer(memSizes[iteration], memMode, wc); break; case HOST_TO_DEVICE: bandwidths[iteration] += testHostToDeviceTransfer(memSizes[iteration], memMode, wc); break; case DEVICE_TO_DEVICE: bandwidths[iteration] += testDeviceToDeviceTransfer(memSizes[iteration]); break; } iteration++; printf("."); } } // Complete the bandwidth computation on all the devices //print results printf("\n"); if (CSV == printmode) { printResultsCSV(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } else { printResultsReadable(memSizes, bandwidths, count, kind, memMode, (1 + endDevice - startDevice), wc); } //clean up free(memSizes); free(bandwidths); } /////////////////////////////////////////////////////////////////////////////// // test the bandwidth of a device to host memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToHostTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; unsigned char *h_idata = NULL; unsigned char *h_odata = NULL; cudaEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); //allocate host memory if (PINNED == memMode) { //pinned memory mode - use special function to get OS-pinned memory #if CUDART_VERSION >= 2020 checkCudaErrors(cudaHostAlloc((void **)&h_idata, memSize, (wc) ? cudaHostAllocWriteCombined : 0)); checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0)); #else checkCudaErrors(cudaMallocHost((void **)&h_idata, memSize)); checkCudaErrors(cudaMallocHost((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_idata = (unsigned char *)malloc(memSize); h_odata = (unsigned char *)malloc(memSize); if (h_idata == 0 || h_odata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } // allocate device memory unsigned char *d_idata; checkCudaErrors(cudaMalloc((void **) &d_idata, memSize)); //initialize the device memory checkCudaErrors(cudaMemcpy(d_idata, h_idata, memSize, cudaMemcpyHostToDevice)); //copy data from GPU to Host sdkStartTimer(&timer); checkCudaErrors(cudaEventRecord(start, 0)); if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpyAsync(h_odata, d_idata, memSize, cudaMemcpyDeviceToHost, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpy(h_odata, d_idata, memSize, cudaMemcpyDeviceToHost)); } } checkCudaErrors(cudaEventRecord(stop, 0)); // make sure GPU has finished copying checkCudaErrors(cudaDeviceSynchronize()); //get the the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(cudaFreeHost(h_idata)); checkCudaErrors(cudaFreeHost(h_odata)); } else { free(h_idata); free(h_odata); } checkCudaErrors(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a host to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testHostToDeviceTransfer(unsigned int memSize, memoryMode memMode, bool wc) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); //allocate host memory unsigned char *h_odata = NULL; if (PINNED == memMode) { #if CUDART_VERSION >= 2020 //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(cudaHostAlloc((void **)&h_odata, memSize, (wc) ? cudaHostAllocWriteCombined : 0)); #else //pinned memory mode - use special function to get OS-pinned memory checkCudaErrors(cudaMallocHost((void **)&h_odata, memSize)); #endif } else { //pageable memory mode - use malloc h_odata = (unsigned char *)malloc(memSize); if (h_odata == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } } unsigned char *h_cacheClear1 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); unsigned char *h_cacheClear2 = (unsigned char *)malloc(CACHE_CLEAR_SIZE); if (h_cacheClear1 == 0 || h_cacheClear1 == 0) { fprintf(stderr, "Not enough memory available on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_odata[i] = (unsigned char)(i & 0xff); } for (unsigned int i = 0; i < CACHE_CLEAR_SIZE / sizeof(unsigned char); i++) { h_cacheClear1[i] = (unsigned char)(i & 0xff); h_cacheClear2[i] = (unsigned char)(0xff - (i & 0xff)); } //allocate device memory unsigned char *d_idata; checkCudaErrors(cudaMalloc((void **) &d_idata, memSize)); sdkStartTimer(&timer); checkCudaErrors(cudaEventRecord(start, 0)); //copy host memory to device memory if (PINNED == memMode) { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpyAsync(d_idata, h_odata, memSize, cudaMemcpyHostToDevice, 0)); } } else { for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpy(d_idata, h_odata, memSize, cudaMemcpyHostToDevice)); } } checkCudaErrors(cudaEventRecord(stop, 0)); checkCudaErrors(cudaDeviceSynchronize()); //total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop)); if (PINNED != memMode || bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } sdkResetTimer(&timer); //calculate bandwidth in MB/s bandwidthInMBs = ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaEventDestroy(start)); sdkDeleteTimer(&timer); if (PINNED == memMode) { checkCudaErrors(cudaFreeHost(h_odata)); } else { free(h_odata); } free(h_cacheClear1); free(h_cacheClear2); checkCudaErrors(cudaFree(d_idata)); return bandwidthInMBs; } /////////////////////////////////////////////////////////////////////////////// //! test the bandwidth of a device to device memcopy of a specific size /////////////////////////////////////////////////////////////////////////////// float testDeviceToDeviceTransfer(unsigned int memSize) { StopWatchInterface *timer = NULL; float elapsedTimeInMs = 0.0f; float bandwidthInMBs = 0.0f; cudaEvent_t start, stop; sdkCreateTimer(&timer); checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); //allocate host memory unsigned char *h_idata = (unsigned char *)malloc(memSize); if (h_idata == 0) { fprintf(stderr, "Not enough memory avaialable on host to run test!\n"); exit(EXIT_FAILURE); } //initialize the host memory for (unsigned int i = 0; i < memSize/sizeof(unsigned char); i++) { h_idata[i] = (unsigned char)(i & 0xff); } //allocate device memory unsigned char *d_idata; checkCudaErrors(cudaMalloc((void **) &d_idata, memSize)); unsigned char *d_odata; checkCudaErrors(cudaMalloc((void **) &d_odata, memSize)); //initialize memory checkCudaErrors(cudaMemcpy(d_idata, h_idata, memSize, cudaMemcpyHostToDevice)); //run the memcopy sdkStartTimer(&timer); checkCudaErrors(cudaEventRecord(start, 0)); for (unsigned int i = 0; i < MEMCOPY_ITERATIONS; i++) { checkCudaErrors(cudaMemcpy(d_odata, d_idata, memSize, cudaMemcpyDeviceToDevice)); } checkCudaErrors(cudaEventRecord(stop, 0)); //Since device to device memory copies are non-blocking, //cudaDeviceSynchronize() is required in order to get //proper timing. checkCudaErrors(cudaDeviceSynchronize()); //get the the total elapsed time in ms sdkStopTimer(&timer); checkCudaErrors(cudaEventElapsedTime(&elapsedTimeInMs, start, stop)); if (bDontUseGPUTiming) { elapsedTimeInMs = sdkGetTimerValue(&timer); } //calculate bandwidth in MB/s bandwidthInMBs = 2.0f * ((float)(1<<10) * memSize * (float)MEMCOPY_ITERATIONS) / (elapsedTimeInMs * (float)(1 << 20)); //clean up memory sdkDeleteTimer(&timer); free(h_idata); checkCudaErrors(cudaEventDestroy(stop)); checkCudaErrors(cudaEventDestroy(start)); checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(d_odata)); return bandwidthInMBs; } ///////////////////////////////////////////////////////// //print results in an easily read format //////////////////////////////////////////////////////// void printResultsReadable(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { printf(" %s Bandwidth, %i Device(s)\n", sMemoryCopyKind[kind], iNumDevs); printf(" %s Memory Transfers\n", sMemoryMode[memMode]); if (wc) { printf(" Write-Combined Memory Writes are Enabled"); } printf(" Transfer Size (Bytes)\tBandwidth(MB/s)\n"); unsigned int i; for (i = 0; i < (count - 1); i++) { printf(" %u\t\t\t%s%.1f\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } printf(" %u\t\t\t%s%.1f\n\n", memSizes[i], (memSizes[i] < 10000)? "\t" : "", bandwidths[i]); } /////////////////////////////////////////////////////////////////////////// //print results in a database format /////////////////////////////////////////////////////////////////////////// void printResultsCSV(unsigned int *memSizes, double *bandwidths, unsigned int count, memcpyKind kind, memoryMode memMode, int iNumDevs, bool wc) { std::string sConfig; // log config information if (kind == DEVICE_TO_DEVICE) { sConfig += "D2D"; } else { if (kind == DEVICE_TO_HOST) { sConfig += "D2H"; } else if (kind == HOST_TO_DEVICE) { sConfig += "H2D"; } if (memMode == PAGEABLE) { sConfig += "-Paged"; } else if (memMode == PINNED) { sConfig += "-Pinned"; if (wc) { sConfig += "-WriteCombined"; } } } unsigned int i; double dSeconds = 0.0; for (i = 0; i < count; i++) { dSeconds = (double)memSizes[i] / (bandwidths[i] * (double)(1<<20)); printf("bandwidthTest-%s, Bandwidth = %.1f MB/s, Time = %.5f s, Size = %u bytes, NumDevsUsed = %d\n", sConfig.c_str(), bandwidths[i], dSeconds, memSizes[i], iNumDevs); } } /////////////////////////////////////////////////////////////////////////// //Print help screen /////////////////////////////////////////////////////////////////////////// void printHelp(void) { printf("Usage: bandwidthTest [OPTION]...\n"); printf("Test the bandwidth for device to host, host to device, and device to device transfers\n"); printf("\n"); printf("Example: measure the bandwidth of device to host pinned memory copies in the range 1024 Bytes to 102400 Bytes in 1024 Byte increments\n"); printf("./bandwidthTest --memory=pinned --mode=range --start=1024 --end=102400 --increment=1024 --dtoh\n"); printf("\n"); printf("Options:\n"); printf("--help\tDisplay this help menu\n"); printf("--csv\tPrint results as a CSV\n"); printf("--device=[deviceno]\tSpecify the device device to be used\n"); printf(" all - compute cumulative bandwidth on all the devices\n"); printf(" 0,1,2,...,n - Specify any particular device to be used\n"); printf("--memory=[MEMMODE]\tSpecify which memory mode to use\n"); printf(" pageable - pageable memory\n"); printf(" pinned - non-pageable system memory\n"); printf("--mode=[MODE]\tSpecify the mode to use\n"); printf(" quick - performs a quick measurement\n"); printf(" range - measures a user-specified range of values\n"); printf(" shmoo - performs an intense shmoo of a large range of values\n"); printf("--htod\tMeasure host to device transfers\n"); printf("--dtoh\tMeasure device to host transfers\n"); printf("--dtod\tMeasure device to device transfers\n"); #if CUDART_VERSION >= 2020 printf("--wc\tAllocate pinned memory as write-combined\n"); #endif printf("--cputiming\tForce CPU-based timing always\n"); printf("Range mode options\n"); printf("--start=[SIZE]\tStarting transfer size in bytes\n"); printf("--end=[SIZE]\tEnding transfer size in bytes\n"); printf("--increment=[SIZE]\tIncrement size in bytes\n"); }
0c01926614cb1f8fe6515ac03a27e1e25f9469ec.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "b2b_conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm75.h" #include "b2b_conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm80.h" #include "b2b_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm75.h" #include "b2b_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.h" int run() { hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) std::cout << "Running on SM80" << std::endl; run_nonfused_conv2d_fprop_optimized_f16_sm80(); run_fused_conv2d_fprop_optimized_f16_sm80(); run_nonfused_conv2d_fprop_optimized_s8_sm80(); run_fused_conv2d_fprop_optimized_s8_sm80(); #elif defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) std::cout << "Running on SM75" << std::endl; run_nonfused_conv2d_fprop_optimized_f16_sm75(); run_fused_conv2d_fprop_optimized_f16_sm75(); run_nonfused_conv2d_fprop_optimized_s8_sm75(); run_fused_conv2d_fprop_optimized_s8_sm75(); #endif return 0; } int main() { bool notSupported = false; // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Tensor Core operations used in this example must be compiled with CUDA 10.2 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Tensor Ops used in this example must be run on a machine with compute capability at least 75." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
0c01926614cb1f8fe6515ac03a27e1e25f9469ec.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include "b2b_conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm75.h" #include "b2b_conv2d_fprop_implicit_gemm_s8ncxhwx_s8cxrskx_s8ncxhwx_tensor_op_s32_sm80.h" #include "b2b_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm75.h" #include "b2b_conv2d_fprop_implicit_gemm_f16nhwc_f16nhwc_f16nhwc_tensor_op_f16_sm80.h" int run() { cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Turing Tensor Ops must be run on a machine with compute capability at least 75." << std::endl; // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } #if defined(CUTLASS_ARCH_MMA_SM80_SUPPORTED) std::cout << "Running on SM80" << std::endl; run_nonfused_conv2d_fprop_optimized_f16_sm80(); run_fused_conv2d_fprop_optimized_f16_sm80(); run_nonfused_conv2d_fprop_optimized_s8_sm80(); run_fused_conv2d_fprop_optimized_s8_sm80(); #elif defined(CUTLASS_ARCH_MMA_SM75_SUPPORTED) std::cout << "Running on SM75" << std::endl; run_nonfused_conv2d_fprop_optimized_f16_sm75(); run_fused_conv2d_fprop_optimized_f16_sm75(); run_nonfused_conv2d_fprop_optimized_s8_sm75(); run_fused_conv2d_fprop_optimized_s8_sm75(); #endif return 0; } int main() { bool notSupported = false; // Turing Tensor Core operations exposed with mma.sync are first available in CUDA 10.2. // // CUTLASS must be compiled with CUDA 10.2 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ > 10 || (__CUDACC_VER_MAJOR__ == 10 && __CUDACC_VER_MINOR__ >= 2))) { std::cerr << "Tensor Core operations used in this example must be compiled with CUDA 10.2 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!(props.major * 10 + props.minor >= 75)) { std::cerr << "Tensor Ops used in this example must be run on a machine with compute capability at least 75." << std::endl; notSupported = true; } if (notSupported) { // Returning zero so this test passes on older Toolkits. Its actions are no-op. return 0; } return run(); }
87b8819d1ee25722b379aacdcef8334d71d9aec0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <stdlib.h> #include <windows.h> #include "cutil_inline.h" // CUDA FFT Libraries #include <hipfft.h> // OpenGL Graphics includes #include <GL/glew.h> #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/freeglut.h> #endif // FluidsGL CUDA kernel definitions #include "fluidsGL_kernels.cuh" // Texture reference for reading velocity field texture<float2, 2> texref; static hipArray *array = NULL; // Particle data extern GLuint vbo; // OpenGL vertex buffer object extern struct cudaGraphicsResource *cuda_vbo_resource; // handles OpenGL-CUDA exchange // Texture pitch extern size_t tPitch; extern hipfftHandle planr2c; extern hipfftHandle planc2r; extern cData *vxfield = NULL; extern cData *vyfield = NULL; __constant__ __device__ BYTE* d_kinectVideo; __constant__ __device__ BYTE* d_kinectDepth; void setupTexture(int x, int y) { // Wrap mode appears to be the new default texref.filterMode = hipFilterModeLinear; hipChannelFormatDesc desc = hipCreateChannelDesc<float2>(); hipMallocArray(&array, &desc, y, x); cutilCheckMsg("hipMalloc failed"); } void bindTexture(void) { hipBindTextureToArray(texref, array); cutilCheckMsg("hipBindTexture failed"); } void unbindTexture(void) { hipUnbindTexture(texref); } void updateTexture(cData *data, size_t wib, size_t h, size_t pitch) { hipMemcpy2DToArray(array, 0, 0, data, pitch, wib, h, hipMemcpyDeviceToDevice); cutilCheckMsg("hipMemcpy failed"); } void deleteTexture(void) { hipFreeArray(array); } #if 0 // Note that these kernels are designed to work with arbitrary // domain sizes, not just domains that are multiples of the tile // size. Therefore, we have extra code that checks to make sure // a given thread location falls within the domain boundaries in // both X and Y. Also, the domain is covered by looping over // multiple elements in the Y direction, while there is a one-to-one // mapping between threads in X and the tile size in X. // Nolan Goodnight 9/22/06 // This method adds constant force vectors to the velocity field // stored in 'v' according to v(x,t+1) = v(x,t) + dt * f. __global__ void addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch) { int tx = threadIdx.x; int ty = threadIdx.y; cData *fj = (cData*)((char*)v + (ty + spy) * pitch) + tx + spx; cData vterm = *fj; tx -= r; ty -= r; float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty); vterm.x += s * fx; vterm.y += s * fy; *fj = vterm; } // This method performs the velocity advection step, where we // trace velocity vectors back in time to update each grid cell. // That is, v(x,t+1) = v(p(x,-dt),t). Here we perform bilinear // interpolation in the velocity space. __global__ void advectVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData vterm, ploc; float vxterm, vyterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * pdx + gtidx; vterm = tex2D(texref, (float)gtidx, (float)fi); ploc.x = (gtidx + 0.5f) - (dt * vterm.x * dx); ploc.y = (fi + 0.5f) - (dt * vterm.y * dy); vterm = tex2D(texref, ploc.x, ploc.y); vxterm = vterm.x; vyterm = vterm.y; vx[fj] = vxterm; vy[fj] = vyterm; } } } } // This method performs velocity diffusion and forces mass conservation // in the frequency domain. The inputs 'vx' and 'vy' are complex-valued // arrays holding the Fourier coefficients of the velocity field in // X and Y. Diffusion in this space takes a simple form described as: // v(k,t) = v(k,t) / (1 + visc * dt * k^2), where visc is the viscosity, // and k is the wavenumber. The projection step forces the Fourier // velocity vectors to be orthogonal to the vectors for each // wavenumber: v(k,t) = v(k,t) - ((k dot v(k,t) * k) / k^2. __global__ void diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt, float visc, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData xterm, yterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; xterm = vx[fj]; yterm = vy[fj]; // Compute the index of the wavenumber based on the // data order produced by a standard NN FFT. int iix = gtidx; int iiy = (fi>dy/2)?(fi-(dy)):fi; // Velocity diffusion float kk = (float)(iix * iix + iiy * iiy); // k^2 float diff = 1.f / (1.f + visc * dt * kk); xterm.x *= diff; xterm.y *= diff; yterm.x *= diff; yterm.y *= diff; // Velocity projection if (kk > 0.f) { float rkk = 1.f / kk; // Real portion of velocity projection float rkp = (iix * xterm.x + iiy * yterm.x); // Imaginary portion of velocity projection float ikp = (iix * xterm.y + iiy * yterm.y); xterm.x -= rkk * rkp * iix; xterm.y -= rkk * ikp * iix; yterm.x -= rkk * rkp * iiy; yterm.y -= rkk * ikp * iiy; } vx[fj] = xterm; vy[fj] = yterm; } } } } // This method updates the velocity field 'v' using the two complex // arrays from the previous step: 'vx' and 'vy'. Here we scale the // real components by 1/(dx*dy) to account for an unnormalized FFT. __global__ void updateVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; float vxterm, vyterm; cData nvterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fjr = fi * pdx + gtidx; vxterm = vx[fjr]; vyterm = vy[fjr]; // Normalize the result of the inverse FFT float scale = 1.f / (dx * dy); nvterm.x = vxterm * scale; nvterm.y = vyterm * scale; cData *fj = (cData*)((char*)v + fi * pitch) + gtidx; *fj = nvterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } // This method updates the particles by moving particle positions // according to the velocity field and time step. That is, for each // particle: p(t+1) = p(t) + dt * v(p(t)). __global__ void advectParticles_k(cData *part, cData *v, int dx, int dy, float dt, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; // gtidx is the domain location in x for this thread cData pterm, vterm; if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; pterm = part[fj]; int xvi = ((int)(pterm.x * dx)); int yvi = ((int)(pterm.y * dy)); vterm = *((cData*)((char*)v + yvi * pitch) + xvi); pterm.x += dt * vterm.x; pterm.x = pterm.x - (int)pterm.x; pterm.x += 1.f; pterm.x = pterm.x - (int)pterm.x; pterm.y += dt * vterm.y; pterm.y = pterm.y - (int)pterm.y; pterm.y += 1.f; pterm.y = pterm.y - (int)pterm.y; part[fj] = pterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } #endif // 0 // This method updates the particles by moving particle positions // according to the velocity field and time step. That is, for each // particle: p(t+1) = p(t) + dt * v(p(t)). __global__ void feelTheAttraction_k(cData *particules, float3 position, float timer, float param, BYTE* depth) { int index = blockDim.x*blockIdx.x + threadIdx.x; float3 originalPosition; originalPosition.y = (index / DIMX) ; originalPosition.x = (index - (originalPosition.y*DIMX)); originalPosition.z = 0.f; /* int depthIndex = originalPosition.y/(512/gKinectDepthHeight)*gKinectDepthWidth+0.3f*originalPosition.x/(512/gKinectDepthWidth); int di = depthIndex*gKinectDepth; */ float d = ((512*512)/(gKinectDepthWidth*gKinectDepthHeight*gKinectDepth)); int di = gKinectDepth*index*d; float depthPosition = 0.f; unsigned char p = 0; if( di < gKinectDepthWidth*gKinectDepthHeight*gKinectDepth) { unsigned char a = depth[di]; unsigned char b = depth[di+1]; p = a & 7; unsigned char A = (a<<3); depthPosition = (A*256.f+b)/1024.f; } //if( position.x != 0.f && position.y != 0.f ) if( p != 0 ) { float3 length; length.x = (position.x - particules[index].x+0.5f); length.y = (position.y - particules[index].y+0.5f); length.z = (position.z - particules[index].z+0.5f); float l = (depthPosition+sqrt(length.x*length.x + length.y*length.y + length.z*length.z))/param; //if( l > 0.7f*param ) { particules[index].x += l/length.x + 0.005f*cos(timer*32.f+l); particules[index].y += l/length.y + 0.005f*sin(timer*22.f+l); } /* else { if( l > 0.69f*param ) { particules[index].x += 0.5f*cos(timer+l); particules[index].y += 0.5f*sin(timer+l); } else { particules[index].x -= length.x/l; particules[index].y -= length.y/l; } } */ } else { particules[index].x += (originalPosition.x/DIMX - particules[index].x)/10.f; particules[index].y += (originalPosition.y/DIMY - particules[index].y)/10.f; //particules[index].z += (originalPosition.z/DIMX - particules[index].z)/10.f; } } // These are the external function calls necessary for launching fluid simuation extern "C" void feelTheAttraction( cData *particules, int dx, int dy, float3 position, float timer, float param ) { cData *p; cutilSafeCall(hipGraphicsMapResources(1, &cuda_vbo_resource, 0)); size_t num_bytes; cutilSafeCall(hipGraphicsResourceGetMappedPointer((void **)&p, &num_bytes, cuda_vbo_resource)); cutilCheckMsg("hipGraphicsResourceGetMappedPointer failed"); dim3 gridSize(512,1,1); dim3 blockSize(dx*dy/gridSize.x,1,1); hipLaunchKernelGGL(( feelTheAttraction_k), dim3(gridSize), dim3(blockSize), 0, 0, p,position,timer, param, d_kinectDepth); //cutilCheckMsg("feelTheAttraction_k failed."); cutilSafeCall(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); cutilCheckMsg("hipGraphicsUnmapResources failed"); } extern "C" void h2d_kinect( BYTE* kinectVideo, BYTE* kinectDepth ) { //cutilSafeCall(hipMemcpy( d_kinectVideo, kinectVideo, gKinectVideoWidth*gKinectVideoHeight*gKinectVideo*sizeof(BYTE), hipMemcpyHostToDevice )); cutilSafeCall(hipMemcpy( d_kinectDepth, kinectDepth, gKinectDepthWidth*gKinectDepthHeight*gKinectDepth*sizeof(BYTE), hipMemcpyHostToDevice )); } extern "C" void initialize_scene() { //cutilSafeCall(hipMalloc( (void**)&d_kinectVideo, gKinectVideoWidth*gKinectVideoHeight*gKinectVideo*sizeof(BYTE))); cutilSafeCall(hipMalloc( (void**)&d_kinectDepth, gKinectDepthWidth*gKinectDepthHeight*gKinectDepth*sizeof(BYTE))); } extern "C" void finalize_scene() { //cutilSafeCall(hipFree( d_kinectVideo )); cutilSafeCall(hipFree( d_kinectDepth )); } #if 0 // These are the external function calls necessary for launching fluid simuation extern "C" void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r) { dim3 tids(2*r+1, 2*r+1); hipLaunchKernelGGL(( addForces_k), dim3(1), dim3(tids), 0, 0, v, dx, dy, spx, spy, fx, fy, r, tPitch); cutilCheckMsg("addForces_k failed."); } extern "C" void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateTexture(v, DIMX*sizeof(cData), DIMX, tPitch); hipLaunchKernelGGL(( advectVelocity_k), dim3(grid), dim3(tids), 0, 0, v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY); cutilCheckMsg("advectVelocity_k failed."); } extern "C" void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc) { // Forward FFT hipfftExecR2C(planr2c, (hipfftReal*)vx, (hipfftComplex*)vx); hipfftExecR2C(planr2c, (hipfftReal*)vy, (hipfftComplex*)vy); uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1), 1); uint3 tids = make_uint3(TIDSX, TIDSY, 1); hipLaunchKernelGGL(( diffuseProject_k), dim3(grid), dim3(tids), 0, 0, vx, vy, dx, dy, dt, visc, TILEY/TIDSY); cutilCheckMsg("diffuseProject_k failed."); // Inverse FFT hipfftExecC2R(planc2r, (hipfftComplex*)vx, (hipfftReal*)vx); hipfftExecC2R(planc2r, (hipfftComplex*)vy, (hipfftReal*)vy); } extern "C" void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); hipLaunchKernelGGL(( updateVelocity_k), dim3(grid), dim3(tids), 0, 0, v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch); cutilCheckMsg("updateVelocity_k failed."); } extern "C" void advectParticles(GLuint vbo, cData *v, int dx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); cData *p; cutilSafeCall(hipGraphicsMapResources(1, &cuda_vbo_resource, 0)); size_t num_bytes; cutilSafeCall(hipGraphicsResourceGetMappedPointer((void **)&p, &num_bytes, cuda_vbo_resource)); cutilCheckMsg("hipGraphicsResourceGetMappedPointer failed"); hipLaunchKernelGGL(( advectParticles_k), dim3(grid), dim3(tids), 0, 0, p, v, dx, dy, dt, TILEY/TIDSY, tPitch); cutilCheckMsg("advectParticles_k failed."); cutilSafeCall(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); cutilCheckMsg("hipGraphicsUnmapResources failed"); }
87b8819d1ee25722b379aacdcef8334d71d9aec0.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include <stdio.h> #include <stdlib.h> #include <windows.h> #include "cutil_inline.h" // CUDA FFT Libraries #include <cufft.h> // OpenGL Graphics includes #include <GL/glew.h> #if defined(__APPLE__) || defined(MACOSX) #include <GLUT/glut.h> #else #include <GL/freeglut.h> #endif // FluidsGL CUDA kernel definitions #include "fluidsGL_kernels.cuh" // Texture reference for reading velocity field texture<float2, 2> texref; static cudaArray *array = NULL; // Particle data extern GLuint vbo; // OpenGL vertex buffer object extern struct cudaGraphicsResource *cuda_vbo_resource; // handles OpenGL-CUDA exchange // Texture pitch extern size_t tPitch; extern cufftHandle planr2c; extern cufftHandle planc2r; extern cData *vxfield = NULL; extern cData *vyfield = NULL; __constant__ __device__ BYTE* d_kinectVideo; __constant__ __device__ BYTE* d_kinectDepth; void setupTexture(int x, int y) { // Wrap mode appears to be the new default texref.filterMode = cudaFilterModeLinear; cudaChannelFormatDesc desc = cudaCreateChannelDesc<float2>(); cudaMallocArray(&array, &desc, y, x); cutilCheckMsg("cudaMalloc failed"); } void bindTexture(void) { cudaBindTextureToArray(texref, array); cutilCheckMsg("cudaBindTexture failed"); } void unbindTexture(void) { cudaUnbindTexture(texref); } void updateTexture(cData *data, size_t wib, size_t h, size_t pitch) { cudaMemcpy2DToArray(array, 0, 0, data, pitch, wib, h, cudaMemcpyDeviceToDevice); cutilCheckMsg("cudaMemcpy failed"); } void deleteTexture(void) { cudaFreeArray(array); } #if 0 // Note that these kernels are designed to work with arbitrary // domain sizes, not just domains that are multiples of the tile // size. Therefore, we have extra code that checks to make sure // a given thread location falls within the domain boundaries in // both X and Y. Also, the domain is covered by looping over // multiple elements in the Y direction, while there is a one-to-one // mapping between threads in X and the tile size in X. // Nolan Goodnight 9/22/06 // This method adds constant force vectors to the velocity field // stored in 'v' according to v(x,t+1) = v(x,t) + dt * f. __global__ void addForces_k(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r, size_t pitch) { int tx = threadIdx.x; int ty = threadIdx.y; cData *fj = (cData*)((char*)v + (ty + spy) * pitch) + tx + spx; cData vterm = *fj; tx -= r; ty -= r; float s = 1.f / (1.f + tx*tx*tx*tx + ty*ty*ty*ty); vterm.x += s * fx; vterm.y += s * fy; *fj = vterm; } // This method performs the velocity advection step, where we // trace velocity vectors back in time to update each grid cell. // That is, v(x,t+1) = v(p(x,-dt),t). Here we perform bilinear // interpolation in the velocity space. __global__ void advectVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData vterm, ploc; float vxterm, vyterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * pdx + gtidx; vterm = tex2D(texref, (float)gtidx, (float)fi); ploc.x = (gtidx + 0.5f) - (dt * vterm.x * dx); ploc.y = (fi + 0.5f) - (dt * vterm.y * dy); vterm = tex2D(texref, ploc.x, ploc.y); vxterm = vterm.x; vyterm = vterm.y; vx[fj] = vxterm; vy[fj] = vyterm; } } } } // This method performs velocity diffusion and forces mass conservation // in the frequency domain. The inputs 'vx' and 'vy' are complex-valued // arrays holding the Fourier coefficients of the velocity field in // X and Y. Diffusion in this space takes a simple form described as: // v(k,t) = v(k,t) / (1 + visc * dt * k^2), where visc is the viscosity, // and k is the wavenumber. The projection step forces the Fourier // velocity vectors to be orthogonal to the vectors for each // wavenumber: v(k,t) = v(k,t) - ((k dot v(k,t) * k) / k^2. __global__ void diffuseProject_k(cData *vx, cData *vy, int dx, int dy, float dt, float visc, int lb) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; cData xterm, yterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; xterm = vx[fj]; yterm = vy[fj]; // Compute the index of the wavenumber based on the // data order produced by a standard NN FFT. int iix = gtidx; int iiy = (fi>dy/2)?(fi-(dy)):fi; // Velocity diffusion float kk = (float)(iix * iix + iiy * iiy); // k^2 float diff = 1.f / (1.f + visc * dt * kk); xterm.x *= diff; xterm.y *= diff; yterm.x *= diff; yterm.y *= diff; // Velocity projection if (kk > 0.f) { float rkk = 1.f / kk; // Real portion of velocity projection float rkp = (iix * xterm.x + iiy * yterm.x); // Imaginary portion of velocity projection float ikp = (iix * xterm.y + iiy * yterm.y); xterm.x -= rkk * rkp * iix; xterm.y -= rkk * ikp * iix; yterm.x -= rkk * rkp * iiy; yterm.y -= rkk * ikp * iiy; } vx[fj] = xterm; vy[fj] = yterm; } } } } // This method updates the velocity field 'v' using the two complex // arrays from the previous step: 'vx' and 'vy'. Here we scale the // real components by 1/(dx*dy) to account for an unnormalized FFT. __global__ void updateVelocity_k(cData *v, float *vx, float *vy, int dx, int pdx, int dy, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; float vxterm, vyterm; cData nvterm; // gtidx is the domain location in x for this thread if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fjr = fi * pdx + gtidx; vxterm = vx[fjr]; vyterm = vy[fjr]; // Normalize the result of the inverse FFT float scale = 1.f / (dx * dy); nvterm.x = vxterm * scale; nvterm.y = vyterm * scale; cData *fj = (cData*)((char*)v + fi * pitch) + gtidx; *fj = nvterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } // This method updates the particles by moving particle positions // according to the velocity field and time step. That is, for each // particle: p(t+1) = p(t) + dt * v(p(t)). __global__ void advectParticles_k(cData *part, cData *v, int dx, int dy, float dt, int lb, size_t pitch) { int gtidx = blockIdx.x * blockDim.x + threadIdx.x; int gtidy = blockIdx.y * (lb * blockDim.y) + threadIdx.y * lb; int p; // gtidx is the domain location in x for this thread cData pterm, vterm; if (gtidx < dx) { for (p = 0; p < lb; p++) { // fi is the domain location in y for this thread int fi = gtidy + p; if (fi < dy) { int fj = fi * dx + gtidx; pterm = part[fj]; int xvi = ((int)(pterm.x * dx)); int yvi = ((int)(pterm.y * dy)); vterm = *((cData*)((char*)v + yvi * pitch) + xvi); pterm.x += dt * vterm.x; pterm.x = pterm.x - (int)pterm.x; pterm.x += 1.f; pterm.x = pterm.x - (int)pterm.x; pterm.y += dt * vterm.y; pterm.y = pterm.y - (int)pterm.y; pterm.y += 1.f; pterm.y = pterm.y - (int)pterm.y; part[fj] = pterm; } } // If this thread is inside the domain in Y } // If this thread is inside the domain in X } #endif // 0 // This method updates the particles by moving particle positions // according to the velocity field and time step. That is, for each // particle: p(t+1) = p(t) + dt * v(p(t)). __global__ void feelTheAttraction_k(cData *particules, float3 position, float timer, float param, BYTE* depth) { int index = blockDim.x*blockIdx.x + threadIdx.x; float3 originalPosition; originalPosition.y = (index / DIMX) ; originalPosition.x = (index - (originalPosition.y*DIMX)); originalPosition.z = 0.f; /* int depthIndex = originalPosition.y/(512/gKinectDepthHeight)*gKinectDepthWidth+0.3f*originalPosition.x/(512/gKinectDepthWidth); int di = depthIndex*gKinectDepth; */ float d = ((512*512)/(gKinectDepthWidth*gKinectDepthHeight*gKinectDepth)); int di = gKinectDepth*index*d; float depthPosition = 0.f; unsigned char p = 0; if( di < gKinectDepthWidth*gKinectDepthHeight*gKinectDepth) { unsigned char a = depth[di]; unsigned char b = depth[di+1]; p = a & 7; unsigned char A = (a<<3); depthPosition = (A*256.f+b)/1024.f; } //if( position.x != 0.f && position.y != 0.f ) if( p != 0 ) { float3 length; length.x = (position.x - particules[index].x+0.5f); length.y = (position.y - particules[index].y+0.5f); length.z = (position.z - particules[index].z+0.5f); float l = (depthPosition+sqrt(length.x*length.x + length.y*length.y + length.z*length.z))/param; //if( l > 0.7f*param ) { particules[index].x += l/length.x + 0.005f*cos(timer*32.f+l); particules[index].y += l/length.y + 0.005f*sin(timer*22.f+l); } /* else { if( l > 0.69f*param ) { particules[index].x += 0.5f*cos(timer+l); particules[index].y += 0.5f*sin(timer+l); } else { particules[index].x -= length.x/l; particules[index].y -= length.y/l; } } */ } else { particules[index].x += (originalPosition.x/DIMX - particules[index].x)/10.f; particules[index].y += (originalPosition.y/DIMY - particules[index].y)/10.f; //particules[index].z += (originalPosition.z/DIMX - particules[index].z)/10.f; } } // These are the external function calls necessary for launching fluid simuation extern "C" void feelTheAttraction( cData *particules, int dx, int dy, float3 position, float timer, float param ) { cData *p; cutilSafeCall(cudaGraphicsMapResources(1, &cuda_vbo_resource, 0)); size_t num_bytes; cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&p, &num_bytes, cuda_vbo_resource)); cutilCheckMsg("cudaGraphicsResourceGetMappedPointer failed"); dim3 gridSize(512,1,1); dim3 blockSize(dx*dy/gridSize.x,1,1); feelTheAttraction_k<<<gridSize, blockSize>>>(p,position,timer, param, d_kinectDepth); //cutilCheckMsg("feelTheAttraction_k failed."); cutilSafeCall(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); cutilCheckMsg("cudaGraphicsUnmapResources failed"); } extern "C" void h2d_kinect( BYTE* kinectVideo, BYTE* kinectDepth ) { //cutilSafeCall(cudaMemcpy( d_kinectVideo, kinectVideo, gKinectVideoWidth*gKinectVideoHeight*gKinectVideo*sizeof(BYTE), cudaMemcpyHostToDevice )); cutilSafeCall(cudaMemcpy( d_kinectDepth, kinectDepth, gKinectDepthWidth*gKinectDepthHeight*gKinectDepth*sizeof(BYTE), cudaMemcpyHostToDevice )); } extern "C" void initialize_scene() { //cutilSafeCall(cudaMalloc( (void**)&d_kinectVideo, gKinectVideoWidth*gKinectVideoHeight*gKinectVideo*sizeof(BYTE))); cutilSafeCall(cudaMalloc( (void**)&d_kinectDepth, gKinectDepthWidth*gKinectDepthHeight*gKinectDepth*sizeof(BYTE))); } extern "C" void finalize_scene() { //cutilSafeCall(cudaFree( d_kinectVideo )); cutilSafeCall(cudaFree( d_kinectDepth )); } #if 0 // These are the external function calls necessary for launching fluid simuation extern "C" void addForces(cData *v, int dx, int dy, int spx, int spy, float fx, float fy, int r) { dim3 tids(2*r+1, 2*r+1); addForces_k<<<1, tids>>>(v, dx, dy, spx, spy, fx, fy, r, tPitch); cutilCheckMsg("addForces_k failed."); } extern "C" void advectVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateTexture(v, DIMX*sizeof(cData), DIMX, tPitch); advectVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, dt, TILEY/TIDSY); cutilCheckMsg("advectVelocity_k failed."); } extern "C" void diffuseProject(cData *vx, cData *vy, int dx, int dy, float dt, float visc) { // Forward FFT cufftExecR2C(planr2c, (cufftReal*)vx, (cufftComplex*)vx); cufftExecR2C(planr2c, (cufftReal*)vy, (cufftComplex*)vy); uint3 grid = make_uint3((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1), 1); uint3 tids = make_uint3(TIDSX, TIDSY, 1); diffuseProject_k<<<grid, tids>>>(vx, vy, dx, dy, dt, visc, TILEY/TIDSY); cutilCheckMsg("diffuseProject_k failed."); // Inverse FFT cufftExecC2R(planc2r, (cufftComplex*)vx, (cufftReal*)vx); cufftExecC2R(planc2r, (cufftComplex*)vy, (cufftReal*)vy); } extern "C" void updateVelocity(cData *v, float *vx, float *vy, int dx, int pdx, int dy) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); updateVelocity_k<<<grid, tids>>>(v, vx, vy, dx, pdx, dy, TILEY/TIDSY, tPitch); cutilCheckMsg("updateVelocity_k failed."); } extern "C" void advectParticles(GLuint vbo, cData *v, int dx, int dy, float dt) { dim3 grid((dx/TILEX)+(!(dx%TILEX)?0:1), (dy/TILEY)+(!(dy%TILEY)?0:1)); dim3 tids(TIDSX, TIDSY); cData *p; cutilSafeCall(cudaGraphicsMapResources(1, &cuda_vbo_resource, 0)); size_t num_bytes; cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&p, &num_bytes, cuda_vbo_resource)); cutilCheckMsg("cudaGraphicsResourceGetMappedPointer failed"); advectParticles_k<<<grid, tids>>>(p, v, dx, dy, dt, TILEY/TIDSY, tPitch); cutilCheckMsg("advectParticles_k failed."); cutilSafeCall(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0)); cutilCheckMsg("cudaGraphicsUnmapResources failed"); }
03f442170b6763b379d4e278d7011678fb00bb2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "neural_q_pathtracer.cuh" __host__ NeuralQPathtracer::NeuralQPathtracer( unsigned int frames, unsigned int batch_size, SDLScreen& screen, Scene& scene, Camera& camera, int argc, char** argv ){ ////////////////////////////////////////////////////////////// /* Estimate Render Time */ ////////////////////////////////////////////////////////////// const float time_per_bounce = 2.f; std::cout << "Weakly Estimated Frame Render Time: " << time_per_bounce * (float)MAX_RAY_BOUNCES * (float)SAMPLES_PER_PIXEL << "s" << std::endl; ////////////////////////////////////////////////////////////// /* Assign attributes */ ////////////////////////////////////////////////////////////// this->epsilon = EPSILON_START; this->vertices_count = scene.vertices_count; this->ray_batch_size = batch_size; /* How many rays to be processed at once */ this->num_batches = (SCREEN_HEIGHT*SCREEN_WIDTH + (batch_size -1))/batch_size; /* How many batches in total */ printf("Batch Size: %d\n", batch_size); printf("Number of Batches: %d\n", num_batches); dim3 b_size(8,8); this->block_size = b_size; /* How many threads in a single block to process the screen*/ int blocks_x = (SCREEN_WIDTH + this->block_size.x - 1)/this->block_size.x; int blocks_y = (SCREEN_HEIGHT + this->block_size.y - 1)/this->block_size.y; dim3 n_bs(blocks_x, blocks_y); this->num_blocks = n_bs;/* How many blocks to process all pixels on the screen */ ////////////////////////////////////////////////////////////// /* Initialise the DQN */ ////////////////////////////////////////////////////////////// //TODO: Might have to specify the amount of memory the GPU can use // beforehand, otherwise it seems to assign over memory allocated later // on. It may continue to do this when calculating back&forwad prop auto dyparams = dynet::extract_dynet_params(argc, argv); dynet::initialize(dyparams); dynet::ParameterCollection model; dynet::AdamTrainer trainer(model); this->dqn = DQNetwork(); unsigned int input_dim = TRAIN_ON_POSITION ? 3 : this->vertices_count; this->dqn.initialize(model, input_dim /* Input dims */, GRID_RESOLUTION*GRID_RESOLUTION /* Output dims */); ////////////////////////////////////////////////////////////// /* Load the previous DQN Model */ ////////////////////////////////////////////////////////////// std::string fname = "../Radiance_Map_Data/deep_q_learning_12_12.model"; if (LOAD_MODEL && file_exists(fname)){ dynet::TextFileLoader loader(fname); loader.populate(model); } ////////////////////////////////////////////////////////////// /* Intialise Pixel value buffers */ ////////////////////////////////////////////////////////////// vec3* host_buffer = new vec3[ SCREEN_HEIGHT * SCREEN_WIDTH ]; vec3* device_buffer; checkCudaErrors(hipMalloc(&device_buffer, SCREEN_HEIGHT * SCREEN_WIDTH * sizeof(vec3))); ////////////////////////////////////////////////////////////// /* Initialise Prev Host buffers */ ////////////////////////////////////////////////////////////// unsigned int* directions_host = new unsigned int[ SCREEN_HEIGHT * SCREEN_WIDTH ]; ////////////////////////////////////////////////////////////// /* Initialise ray arrays on CUDA device */ ////////////////////////////////////////////////////////////// float* ray_locations; /* Ray intersection location (State) */ float* prev_ray_locations; /* Location the ray was previously sampled from */ float* ray_normals; /* Intersection normal */ float* ray_directions; /* Direction to next shoot the ray */ unsigned int* ray_states; /* Ray can either be in the process of being traced, terminated, or post tracing for learning */ float* ray_rewards; /* Reward recieved from Q(s,a) */ float* ray_discounts; /* Discount factor for current rays path */ float* ray_throughputs; /* Throughput for calc pixel value */ unsigned int* ray_bounces; /* Total number of bounces for each ray before intersection*/ float* ray_vertices; /* Stores vertices in coordinate system centered at the current ray position */ checkCudaErrors(hipMalloc(&ray_locations, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&prev_ray_locations, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_normals, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_directions, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_states, sizeof(unsigned int) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_rewards, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_discounts, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_throughputs, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_bounces, sizeof(unsigned int) *SCREEN_HEIGHT *SCREEN_WIDTH)); checkCudaErrors(hipMalloc(&ray_vertices, sizeof(float) * this->vertices_count * SCREEN_HEIGHT * SCREEN_WIDTH)); Camera* device_camera; /* Camera on the CUDA device */ Surface* device_surfaces; AreaLight* device_light_planes; float* device_vertices; Scene* device_scene; /* Scene to render */ // Copy the camera checkCudaErrors(hipMalloc(&device_camera, sizeof(Camera))); checkCudaErrors(hipMemcpy(device_camera, &camera, sizeof(Camera), hipMemcpyHostToDevice)); // Copy surfaces into device memory space checkCudaErrors(hipMalloc(&device_surfaces, scene.surfaces_count * sizeof(Surface))); checkCudaErrors(hipMemcpy(device_surfaces, scene.surfaces, scene.surfaces_count * sizeof(Surface), hipMemcpyHostToDevice)); // Copy light planes into device memory space checkCudaErrors(hipMalloc(&device_light_planes, scene.area_light_count * sizeof(AreaLight))); checkCudaErrors(hipMemcpy(device_light_planes, scene.area_lights, scene.area_light_count * sizeof(AreaLight), hipMemcpyHostToDevice)); // Copy vertices into device memory space checkCudaErrors(hipMalloc(&device_vertices, scene.vertices_count * sizeof(float))); checkCudaErrors(hipMemcpy(device_vertices, scene.vertices, scene.vertices_count * sizeof(float), hipMemcpyHostToDevice)); // Copy the scene structure into the device and its corresponding pointers to Surfaces, Area Lights and Vertices checkCudaErrors(hipMalloc(&device_scene, sizeof(Scene))); checkCudaErrors(hipMemcpy(device_scene, &scene, sizeof(Scene), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(&(device_scene->surfaces), &device_surfaces, sizeof(Surface*), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(&(device_scene->area_lights), &device_light_planes, sizeof(AreaLight*), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(&(device_scene->vertices), &device_vertices, sizeof(float*), hipMemcpyHostToDevice)); ////////////////////////////////////////////////////////////// /* Intialise cuRand State */ ////////////////////////////////////////////////////////////// hiprandState_t * d_rand_state; checkCudaErrors(hipMalloc(&d_rand_state, (float)SCREEN_HEIGHT * (float)SCREEN_WIDTH * sizeof(hiprandState_t))); hipLaunchKernelGGL(( init_rand_state), dim3(this->num_blocks), dim3(this->block_size), 0, 0, d_rand_state, SCREEN_WIDTH, SCREEN_HEIGHT); ////////////////////////////////////////////////////////////// /* Reset the file to write to */ ////////////////////////////////////////////////////////////// if (SAVE_TRAINING_STATS){ if (file_exists("../Radiance_Map_Data/nn_training_stats.txt")) std::remove("../Radiance_Map_Data/nn_training_stats.txt"); } ////////////////////////////////////////////////////////////// /* Render the frames */ ////////////////////////////////////////////////////////////// for (int i = 0; i < frames; i++){ //Clear the pixel buffer memset(host_buffer, 0.f, sizeof(vec3)* SCREEN_HEIGHT * SCREEN_WIDTH); /* Compute frame time */ static int t = SDL_GetTicks(); int t2 = SDL_GetTicks(); float dt = float(t2-t); t = t2; printf("Render Time: %.3f ms.\n", dt); // Fill the pixel buffer each frame using Deep Q-Learning strategy this->render_frame( trainer, d_rand_state, device_camera, device_scene, device_buffer, device_vertices, directions_host, ray_locations, prev_ray_locations, ray_normals, ray_directions, ray_states, ray_rewards, ray_discounts, ray_throughputs, ray_bounces, ray_vertices ); // Copy the device buffer values to the host buffer checkCudaErrors(hipMemcpy(host_buffer, device_buffer, SCREEN_HEIGHT * SCREEN_WIDTH * sizeof(vec3), hipMemcpyDeviceToHost)); // Display the rendered frame for (int x = 0; x < SCREEN_WIDTH; x++){ for (int y = 0; y < SCREEN_HEIGHT; y++){ screen.PutPixelSDL(x, y, host_buffer[x*(int)SCREEN_HEIGHT + y]); } } screen.SDL_Renderframe(); ////////////////////////////////////////////////////////////// /* Save the DQN Model */ ////////////////////////////////////////////////////////////// std::cout << "Saving model..." << std::endl; if (SAVE_MODEL){ dynet::TextFileSaver saver(fname); saver.save(model); } std::cout << "Model saved." << std::endl; } ////////////////////////////////////////////////////////////// /* Save the image */ ////////////////////////////////////////////////////////////// screen.SDL_SaveImage("../Images/render.bmp"); ////////////////////////////////////////////////////////////// /* Free memory used */ ////////////////////////////////////////////////////////////// delete [] host_buffer; delete [] directions_host; hipFree(device_buffer); hipFree(d_rand_state); hipFree(ray_locations); hipFree(ray_normals); hipFree(ray_directions); hipFree(ray_states); hipFree(ray_rewards); hipFree(ray_throughputs); hipFree(ray_bounces); hipFree(device_camera); hipFree(device_surfaces); hipFree(device_light_planes); hipFree(device_vertices); hipFree(device_scene); } __host__ void NeuralQPathtracer::render_frame( dynet::AdamTrainer trainer, hiprandState_t* d_rand_state, Camera* device_camera, Scene* device_scene, vec3* device_buffer, float* device_vertices, unsigned int* directions_host, float* ray_locations, /* Ray intersection location (State) */ float* prev_ray_locations, float* ray_normals, /* Intersection normal */ float* ray_directions, /* Direction to next shoot the ray */ unsigned int* ray_states, /* Has the ray intersected with a light/nothing */ float* ray_rewards, /* Reward recieved from Q(s,a) */ float* ray_discounts, /* Discount factor for current rays path */ float* ray_throughputs, /* Throughput for calc pixel value */ unsigned int* ray_bounces, /* Total number of bounces for each ray before intersection*/ float* ray_vertices ){ // Initialise buffer to hold total throughput vec3* total_throughputs; checkCudaErrors(hipMalloc(&total_throughputs, sizeof(vec3) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(hipMemset(total_throughputs, 0.f, sizeof(vec3) * SCREEN_HEIGHT * SCREEN_WIDTH)); // Sample through each pixel SAMPLES_PER_PIXEL times for (int i = 0; i < SAMPLES_PER_PIXEL; i++){ // Initialise ray variables hipLaunchKernelGGL(( initialise_ray), dim3(this->num_blocks), dim3(this->block_size), 0, 0, d_rand_state, device_camera, ray_locations, prev_ray_locations, ray_directions, ray_states, ray_rewards, ray_discounts, ray_throughputs, ray_bounces ); checkCudaErrors(hipDeviceSynchronize()); // Create bool to determine if all rays in the batch have collided with a light int rays_finished = 0; /* If not updated to false by trace_ray, end loop */ int* device_rays_finished; checkCudaErrors(hipMalloc(&device_rays_finished, sizeof(int))); checkCudaErrors(hipMemset(device_rays_finished, 1, sizeof(int))); // Trace rays path until all have intersected with a light unsigned int bounces = 0; float loss = 0.f; while(rays_finished == 0 && bounces < MAX_RAY_BOUNCES){ printf("Bounce: %d/%d\n", bounces, MAX_RAY_BOUNCES); std::chrono::time_point<std::chrono::high_resolution_clock> start; if (TIMING){ // TIMER START: Sampling ray directions start = std::chrono::high_resolution_clock::now(); } // Does not apply to shooting from camera if (bounces > 0){ // Convert vertices into coordinate system centred around each rays current location // Get vertices in coordinate system surrounding current location hipLaunchKernelGGL(( convert_vertices_to_point_coord_system), dim3(this->num_blocks), dim3(this->block_size), 0, 0, ray_vertices, ray_locations, device_vertices, this->vertices_count ); hipDeviceSynchronize(); // For each batch sample Q-values and apply eta-greedy policy for(int n = 0; n < this->num_batches; n++){ // Compute Batch-Size unsigned int current_batch_size = ::min(SCREEN_HEIGHT*SCREEN_WIDTH - (n*this->ray_batch_size), this->ray_batch_size); // Initialise the graph dynet::ComputationGraph graph; // Formulate the expression with the state and the scenes vertices dynet::Dim input_dim; std::vector<float> input_vals; if (TRAIN_ON_POSITION){ input_dim = dynet::Dim({(unsigned int)3},current_batch_size); input_vals = std::vector<float>(3*current_batch_size); checkCudaErrors(hipMemcpy(&(input_vals[0]), &(ray_locations[n*this->ray_batch_size*3]), sizeof(float) * 3 * current_batch_size, hipMemcpyDeviceToHost)); } else{ input_dim = dynet::Dim({(unsigned int)this->vertices_count},current_batch_size); input_vals = std::vector<float>(this->vertices_count*current_batch_size); checkCudaErrors(hipMemcpy(&(input_vals[0]), &(ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, hipMemcpyDeviceToHost)); } dynet::Expression states_batch = dynet::input(graph, input_dim, input_vals); // Get the Q-values dynet::Expression prediction = this->dqn.network_inference(graph, states_batch, false); std::vector<float> current_qs = dynet::as_vector(graph.forward(prediction)); // Copy Q-vals to GPU for find the argmax float* current_qs_device; checkCudaErrors(hipMalloc(&current_qs_device, sizeof(float) * current_qs.size())); checkCudaErrors(hipMemcpy(current_qs_device, &(current_qs[0]), sizeof(float) * current_qs.size() , hipMemcpyHostToDevice)); // Setup the deivce storage for the ray direction indices unsigned int* ray_direction_indices; checkCudaErrors(hipMalloc(&ray_direction_indices, sizeof(unsigned int) * current_batch_size)); // Get direction indices (Call once for every element in the batch) int threads = 32; int blocks = (current_batch_size + (threads-1))/threads; hipLaunchKernelGGL(( sample_batch_ray_directions_epsilon_greedy), dim3(blocks), dim3(threads), 0, 0, this->epsilon, d_rand_state, ray_direction_indices, current_qs_device, ray_directions, ray_locations, ray_normals, ray_throughputs, ray_states, (n*this->ray_batch_size) ); hipDeviceSynchronize(); // Copy over ray_direction_indices to the host checkCudaErrors(hipMemcpy(&(directions_host[ this->ray_batch_size * n ]), ray_direction_indices, sizeof(unsigned int) * current_batch_size, hipMemcpyDeviceToHost)); // Free memory hipFree(ray_direction_indices); hipFree(current_qs_device); } } if (TIMING){ // TIMER END: Sampling ray directions auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "Sampling Ray Dir Time: " << elapsed.count() << "s" << std::endl; } if (TIMING){ // TIMER START: Tracing rays start = std::chrono::high_resolution_clock::now(); } // Trace the rays in their set directions hipLaunchKernelGGL(( trace_ray), dim3(this->num_blocks), dim3(this->block_size), 0, 0, device_scene, d_rand_state, device_rays_finished, ray_locations, prev_ray_locations, ray_normals, ray_directions, ray_states, ray_rewards, ray_discounts, ray_throughputs, ray_bounces, bounces ); hipDeviceSynchronize(); if (TIMING){ // TIMER END: Tracing rays auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "Tracing Rays Time: " << elapsed.count() << "s" << std::endl; } if (TIMING){ // TIMER START: Train network start = std::chrono::high_resolution_clock::now(); } // Does not apply to shooting from camera //TODO: We are current backpropogating rays that have terminated continually, this is bad if(bounces > 0){ // Make a copy of the old converted vertices float* prev_ray_vertices; checkCudaErrors(hipMalloc(&prev_ray_vertices, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH * this->vertices_count)); checkCudaErrors(hipMemcpy(prev_ray_vertices, ray_vertices, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH * this->vertices_count, hipMemcpyDeviceToDevice)); // Convert vertices into coordinate system centred around each rays current location // Get vertices in coordinate system surrounding current location hipLaunchKernelGGL(( convert_vertices_to_point_coord_system), dim3(this->num_blocks), dim3(this->block_size), 0, 0, ray_vertices, ray_locations, device_vertices, this->vertices_count ); hipDeviceSynchronize(); // Run learning rule on the network with the results received and sample new direction for each ray in batches for(int n = 0; n < this->num_batches; n++){ dynet::ComputationGraph graph; // 1) Create the input expression to the neural network for S_t+1 unsigned int current_batch_size = ::min(SCREEN_HEIGHT*SCREEN_WIDTH - (n*this->ray_batch_size), this->ray_batch_size); dynet::Dim input_dim; std::vector<float> input_vals; if (TRAIN_ON_POSITION){ input_dim = dynet::Dim({(unsigned int)3},current_batch_size); input_vals = std::vector<float>(3*current_batch_size); checkCudaErrors(hipMemcpy(&(input_vals[0]), &(ray_locations[n*this->ray_batch_size*3]), sizeof(float) * 3 * current_batch_size, hipMemcpyDeviceToHost)); } else{ input_dim = dynet::Dim({(unsigned int)this->vertices_count},current_batch_size); input_vals = std::vector<float>(this->vertices_count*current_batch_size); checkCudaErrors(hipMemcpy(&(input_vals[0]), &(ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, hipMemcpyDeviceToHost)); } dynet::Expression input_batch = dynet::input(graph, input_dim, input_vals); // 2) Get max_a Q(S_{t+1}, a) dynet::Expression next_qs_expr = this->dqn.network_inference(graph, input_batch, false); std::vector<float> next_qs = dynet::as_vector(graph.forward(next_qs_expr)); // 3) Compute TD-Targets float* next_qs_device; checkCudaErrors(hipMalloc(&next_qs_device, sizeof(float) * current_batch_size * GRID_RESOLUTION * GRID_RESOLUTION)); checkCudaErrors(hipMemcpy(next_qs_device, &(next_qs[0]), sizeof(float) * current_batch_size * GRID_RESOLUTION * GRID_RESOLUTION, hipMemcpyHostToDevice)); float* td_targets_device; checkCudaErrors(hipMalloc(&td_targets_device, sizeof(float) * current_batch_size)); int threads = 32; int blocks = int((current_batch_size + (threads-1))/threads); hipLaunchKernelGGL(( compute_td_targets), dim3(blocks), dim3(threads), 0, 0, d_rand_state, next_qs_device, td_targets_device, ray_locations, ray_normals, ray_rewards, ray_discounts, ray_states, (n*this->ray_batch_size) ); hipDeviceSynchronize(); hipFree(next_qs_device); std::vector<float> td_targets(current_batch_size); checkCudaErrors(hipMemcpy(&(td_targets[0]), td_targets_device, sizeof(float) * current_batch_size, hipMemcpyDeviceToHost)); hipFree(td_targets_device); // 4) Reset computational graph and use target_value as a constant graph.clear(); dynet::Expression td_target = dynet::input(graph, dynet::Dim({1}, current_batch_size), td_targets); // // 5) Get current Q(s,a) value // Formulate the expression with the state and the scenes vertices std::vector<float> input_curr_state; if (TRAIN_ON_POSITION){ input_dim = dynet::Dim({(unsigned int)3},current_batch_size); input_curr_state = std::vector<float>(3*current_batch_size); checkCudaErrors(hipMemcpy(&(input_curr_state[0]), &(prev_ray_locations[n*this->ray_batch_size*3]), sizeof(float) * 3 * current_batch_size, hipMemcpyDeviceToHost)); } else{ input_dim = dynet::Dim({(unsigned int)this->vertices_count},current_batch_size); input_curr_state = std::vector<float>(this->vertices_count*current_batch_size); checkCudaErrors(hipMemcpy(&(input_curr_state[0]), &(prev_ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, hipMemcpyDeviceToHost)); } // std::vector<float> input_curr_state(this->vertices_count*current_batch_size); // checkCudaErrors(hipMemcpy(&(input_curr_state[0]), &(prev_ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, hipMemcpyDeviceToHost)); dynet::Expression states_batch = dynet::input(graph, input_dim, input_curr_state); dynet::Expression prediction_qs = this->dqn.network_inference(graph, states_batch, true); // Get the vector of action value indices we took std::vector<unsigned int> action_value_indices(current_batch_size); memcpy(&(action_value_indices[0]), &directions_host[this->ray_batch_size*n], sizeof(unsigned int) * current_batch_size); // Get the current Q values for the actions taken dynet::Expression current_qs = dynet::pick(prediction_qs, action_value_indices, (unsigned int) 0); // 6) Calculate the loss dynet::Expression loss_expr = dynet::pow((td_target - current_qs), dynet::input(graph, 2.f)); loss_expr = dynet::sum_batches(loss_expr); loss += dynet::as_scalar(graph.forward(loss_expr)); // 7) Train the network graph.backward(loss_expr); trainer.update(); } hipFree(prev_ray_vertices); } // Sample a new direction for rays which have reached the terminal state hipLaunchKernelGGL(( sample_random_scene_pos_for_terminated_rays), dim3(this->num_blocks), dim3(this->block_size), 0, 0, device_scene, d_rand_state, ray_normals, ray_locations, ray_states ); hipDeviceSynchronize(); if (TIMING){ // TIMER END: Train network auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "Train Network Time: " << elapsed.count() << "s" << std::endl; } // Copy over value to check if all rays have intersected with a light checkCudaErrors(hipMemcpy(&rays_finished, device_rays_finished, sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemset(device_rays_finished, 1, sizeof(int))); // Increment the number of bounces bounces++; } // Update epsilon this->epsilon = ::max( this->epsilon - EPSILON_DECAY, EPSILON_MIN); printf("epsilon: %.3f\n", this->epsilon); printf("loss: %.3f\n",loss); // Calculate the average path length int* total_path_lengths_device; checkCudaErrors(hipMalloc(&total_path_lengths_device, sizeof(int))); checkCudaErrors(hipMemset(total_path_lengths_device, 0, sizeof(int))); hipLaunchKernelGGL(( sum_path_lengths), dim3(this->num_blocks), dim3(this->block_size), 0, 0, total_path_lengths_device, ray_bounces ); int total_path_lengths = 0; checkCudaErrors(hipMemcpy(&total_path_lengths, total_path_lengths_device, sizeof(int), hipMemcpyDeviceToHost)); float avg_path_length = total_path_lengths/((float)SCREEN_HEIGHT*(float)SCREEN_WIDTH); std::cout << "Avg Path Length: " << avg_path_length << std::endl; hipFree(total_path_lengths_device); // Calculate the number of zero-contribution light paths by throughputs below THROUGHPUT_THRESHOLD int* total_zero_contribution_light_paths; checkCudaErrors(hipMalloc(&total_zero_contribution_light_paths, sizeof(int))); checkCudaErrors(hipMemset(total_zero_contribution_light_paths, 0, sizeof(int))); hipLaunchKernelGGL(( sum_zero_contribution_light_paths), dim3(this->num_blocks), dim3(this->block_size), 0, 0, total_zero_contribution_light_paths, ray_throughputs ); int total_zclp = 0; checkCudaErrors(hipMemcpy(&total_zclp, total_zero_contribution_light_paths, sizeof(int), hipMemcpyDeviceToHost)); std::cout << "Total zero contribution light paths: " << total_zclp << std::endl; hipFree(total_zero_contribution_light_paths); // Save collected stats to the file if (SAVE_TRAINING_STATS){ std::ofstream stats_file; stats_file.open("../Radiance_Map_Data/nn_training_stats.txt", std::ios::app); stats_file << avg_path_length << " " << loss << " " << total_zclp << "\n"; stats_file.close(); } // Add computed throughput values to the running total hipLaunchKernelGGL(( update_total_throughput), dim3(this->num_blocks), dim3(this->block_size), 0, 0, ray_throughputs, total_throughputs ); hipDeviceSynchronize(); hipFree(device_rays_finished); } // Update the device_buffer with the throughput hipLaunchKernelGGL(( update_device_buffer), dim3(this->num_blocks), dim3(this->block_size), 0, 0, device_buffer, total_throughputs ); hipDeviceSynchronize(); hipFree(total_throughputs); } // Gets the initial direction to shoot a ray in __global__ void initialise_ray( hiprandState_t* d_rand_state, Camera* device_camera, float* ray_locations, float* prev_ray_locations, float* ray_directions, unsigned int* ray_states, float* ray_rewards, float* ray_discounts, float* ray_throughputs, unsigned int* ray_bounces ){ // Ray index int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = SCREEN_HEIGHT*x + y; // Randomly sample a ray within the pixel Ray r = Ray::sample_ray_through_pixel(d_rand_state, *device_camera, x, y); ray_locations[(i*3) ] = r.start.x; ray_locations[(i*3) + 1] = r.start.y; ray_locations[(i*3) + 2] = r.start.z; prev_ray_locations[(i*3) ] = r.start.x; prev_ray_locations[(i*3) + 1] = r.start.y; prev_ray_locations[(i*3) + 2] = r.start.z; ray_directions[(i*3) ] = r.direction.x; ray_directions[(i*3) + 1] = r.direction.y; ray_directions[(i*3) + 2] = r.direction.z; // Initialise ray_variables ray_rewards[i] = 0.f; ray_states[i] = 0; ray_throughputs[(i*3) ] = 1.f; ray_throughputs[(i*3) + 1] = 1.f; ray_throughputs[(i*3) + 2] = 1.f; ray_discounts[i] = 1.f; ray_bounces[i] = MAX_RAY_BOUNCES; } // Trace a ray for all ray locations given in the angles specified within the scene __global__ void trace_ray( Scene* scene, hiprandState_t* d_rand_state, int* rays_finished, float* ray_locations, float* prev_ray_locations, float* ray_normals, float* ray_directions, unsigned int* ray_states, float* ray_rewards, float* ray_discounts, float* ray_throughputs, unsigned int* ray_bounces, int bounces ){ // Ray index int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = SCREEN_HEIGHT*x + y; // For the current ray, get its next state by shooting a ray in the direction stored in ray_directions vec3 position = vec3(ray_locations[(i*3)], ray_locations[(i*3)+1], ray_locations[(i*3)+2]); vec3 dir = vec3(ray_directions[(i*3)], ray_directions[(i*3)+1], ray_directions[(i*3)+2]); // Create the ray and trace it Ray ray(vec4(position + (dir * 0.00001f), 1.f), vec4(dir, 1.f)); ray.closest_intersection(scene); // Update position, normal, and discount factor based on intersection switch(ray.intersection.intersection_type){ // TERMINAL STATE: R_(t+1) = Environment light power case NOTHING: ray_rewards[i] = 0.f; ray_discounts[i] = 0.f; if ( ray_states[i] == 0 ){ ray_throughputs[(i*3)] = ray_throughputs[(i*3)] * ENVIRONMENT_LIGHT; ray_throughputs[(i*3)+1] = ray_throughputs[(i*3)+1] * ENVIRONMENT_LIGHT; ray_throughputs[(i*3)+2] = ray_throughputs[(i*3)+2] * ENVIRONMENT_LIGHT; ray_bounces[i] = (unsigned int)bounces; } ray_states[i] = 1; break; // TERMINAL STATE: R_(t+1) = Area light power case AREA_LIGHT: float diffuse_light_power = scene->area_lights[ray.intersection.index].luminance; ray_rewards[i] = diffuse_light_power*200.f; ray_discounts[i] = 0.f; if ( ray_states[i] == 0 ){ vec3 diffuse_p = scene->area_lights[ray.intersection.index].diffuse_p; ray_throughputs[(i*3)] = ray_throughputs[(i*3)] * diffuse_p.x; ray_throughputs[(i*3)+1] = ray_throughputs[(i*3)+1] * diffuse_p.y; ray_throughputs[(i*3)+2] = ray_throughputs[(i*3)+2] * diffuse_p.z; ray_bounces[i] = (unsigned int)bounces; } ray_states[i] = 1; break; // NON-TERMINAL STATE: R_(t+1) + \gamma * max_a Q(S_t+1, a) // where R_(t+1) = 0 for diffuse surfaces case SURFACE: vec3 new_loc = vec3(ray.intersection.position); prev_ray_locations[(i*3) ] = ray_locations[(i*3) ]; prev_ray_locations[(i*3)+1] = ray_locations[(i*3)+1]; prev_ray_locations[(i*3)+2] = ray_locations[(i*3)+2]; ray_locations[(i*3) ] = new_loc.x; ray_locations[(i*3)+1] = new_loc.y; ray_locations[(i*3)+2] = new_loc.z; vec3 new_norm = ray.intersection.normal; ray_normals[(i*3) ] = new_norm.x; ray_normals[(i*3)+1] = new_norm.y; ray_normals[(i*3)+2] = new_norm.z; vec3 BRDF = scene->surfaces[ray.intersection.index].material.diffuse_c; // Get luminance of material float max_rgb = max(BRDF.x, BRDF.y); max_rgb = max(BRDF.z, max_rgb); float min_rgb = min(BRDF.x, BRDF.y); min_rgb = min(BRDF.z, min_rgb); float luminance = 0.5f * (max_rgb + min_rgb); // discount_factors holds cos_theta currently, update rgb throughput first if ( ray_states[i] == 0 ){ ray_throughputs[(i*3)] = ray_throughputs[(i*3)] * (BRDF.x / (float)M_PI); ray_throughputs[(i*3)+1] = ray_throughputs[(i*3)+1] * (BRDF.y / (float)M_PI); ray_throughputs[(i*3)+2] = ray_throughputs[(i*3)+2] * (BRDF.z / (float)M_PI); } // Now update discount_factors with luminance ray_rewards[i] = 0.f; ray_discounts[i] = luminance; // Still a ray being to bounce, so not finished if ( ray_states[i] == 0 ){ atomicExch(rays_finished, 0); } break; } }
03f442170b6763b379d4e278d7011678fb00bb2d.cu
#include "neural_q_pathtracer.cuh" __host__ NeuralQPathtracer::NeuralQPathtracer( unsigned int frames, unsigned int batch_size, SDLScreen& screen, Scene& scene, Camera& camera, int argc, char** argv ){ ////////////////////////////////////////////////////////////// /* Estimate Render Time */ ////////////////////////////////////////////////////////////// const float time_per_bounce = 2.f; std::cout << "Weakly Estimated Frame Render Time: " << time_per_bounce * (float)MAX_RAY_BOUNCES * (float)SAMPLES_PER_PIXEL << "s" << std::endl; ////////////////////////////////////////////////////////////// /* Assign attributes */ ////////////////////////////////////////////////////////////// this->epsilon = EPSILON_START; this->vertices_count = scene.vertices_count; this->ray_batch_size = batch_size; /* How many rays to be processed at once */ this->num_batches = (SCREEN_HEIGHT*SCREEN_WIDTH + (batch_size -1))/batch_size; /* How many batches in total */ printf("Batch Size: %d\n", batch_size); printf("Number of Batches: %d\n", num_batches); dim3 b_size(8,8); this->block_size = b_size; /* How many threads in a single block to process the screen*/ int blocks_x = (SCREEN_WIDTH + this->block_size.x - 1)/this->block_size.x; int blocks_y = (SCREEN_HEIGHT + this->block_size.y - 1)/this->block_size.y; dim3 n_bs(blocks_x, blocks_y); this->num_blocks = n_bs;/* How many blocks to process all pixels on the screen */ ////////////////////////////////////////////////////////////// /* Initialise the DQN */ ////////////////////////////////////////////////////////////// //TODO: Might have to specify the amount of memory the GPU can use // beforehand, otherwise it seems to assign over memory allocated later // on. It may continue to do this when calculating back&forwad prop auto dyparams = dynet::extract_dynet_params(argc, argv); dynet::initialize(dyparams); dynet::ParameterCollection model; dynet::AdamTrainer trainer(model); this->dqn = DQNetwork(); unsigned int input_dim = TRAIN_ON_POSITION ? 3 : this->vertices_count; this->dqn.initialize(model, input_dim /* Input dims */, GRID_RESOLUTION*GRID_RESOLUTION /* Output dims */); ////////////////////////////////////////////////////////////// /* Load the previous DQN Model */ ////////////////////////////////////////////////////////////// std::string fname = "../Radiance_Map_Data/deep_q_learning_12_12.model"; if (LOAD_MODEL && file_exists(fname)){ dynet::TextFileLoader loader(fname); loader.populate(model); } ////////////////////////////////////////////////////////////// /* Intialise Pixel value buffers */ ////////////////////////////////////////////////////////////// vec3* host_buffer = new vec3[ SCREEN_HEIGHT * SCREEN_WIDTH ]; vec3* device_buffer; checkCudaErrors(cudaMalloc(&device_buffer, SCREEN_HEIGHT * SCREEN_WIDTH * sizeof(vec3))); ////////////////////////////////////////////////////////////// /* Initialise Prev Host buffers */ ////////////////////////////////////////////////////////////// unsigned int* directions_host = new unsigned int[ SCREEN_HEIGHT * SCREEN_WIDTH ]; ////////////////////////////////////////////////////////////// /* Initialise ray arrays on CUDA device */ ////////////////////////////////////////////////////////////// float* ray_locations; /* Ray intersection location (State) */ float* prev_ray_locations; /* Location the ray was previously sampled from */ float* ray_normals; /* Intersection normal */ float* ray_directions; /* Direction to next shoot the ray */ unsigned int* ray_states; /* Ray can either be in the process of being traced, terminated, or post tracing for learning */ float* ray_rewards; /* Reward recieved from Q(s,a) */ float* ray_discounts; /* Discount factor for current rays path */ float* ray_throughputs; /* Throughput for calc pixel value */ unsigned int* ray_bounces; /* Total number of bounces for each ray before intersection*/ float* ray_vertices; /* Stores vertices in coordinate system centered at the current ray position */ checkCudaErrors(cudaMalloc(&ray_locations, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&prev_ray_locations, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_normals, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_directions, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_states, sizeof(unsigned int) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_rewards, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_discounts, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_throughputs, sizeof(float) * 3 * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_bounces, sizeof(unsigned int) *SCREEN_HEIGHT *SCREEN_WIDTH)); checkCudaErrors(cudaMalloc(&ray_vertices, sizeof(float) * this->vertices_count * SCREEN_HEIGHT * SCREEN_WIDTH)); Camera* device_camera; /* Camera on the CUDA device */ Surface* device_surfaces; AreaLight* device_light_planes; float* device_vertices; Scene* device_scene; /* Scene to render */ // Copy the camera checkCudaErrors(cudaMalloc(&device_camera, sizeof(Camera))); checkCudaErrors(cudaMemcpy(device_camera, &camera, sizeof(Camera), cudaMemcpyHostToDevice)); // Copy surfaces into device memory space checkCudaErrors(cudaMalloc(&device_surfaces, scene.surfaces_count * sizeof(Surface))); checkCudaErrors(cudaMemcpy(device_surfaces, scene.surfaces, scene.surfaces_count * sizeof(Surface), cudaMemcpyHostToDevice)); // Copy light planes into device memory space checkCudaErrors(cudaMalloc(&device_light_planes, scene.area_light_count * sizeof(AreaLight))); checkCudaErrors(cudaMemcpy(device_light_planes, scene.area_lights, scene.area_light_count * sizeof(AreaLight), cudaMemcpyHostToDevice)); // Copy vertices into device memory space checkCudaErrors(cudaMalloc(&device_vertices, scene.vertices_count * sizeof(float))); checkCudaErrors(cudaMemcpy(device_vertices, scene.vertices, scene.vertices_count * sizeof(float), cudaMemcpyHostToDevice)); // Copy the scene structure into the device and its corresponding pointers to Surfaces, Area Lights and Vertices checkCudaErrors(cudaMalloc(&device_scene, sizeof(Scene))); checkCudaErrors(cudaMemcpy(device_scene, &scene, sizeof(Scene), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(&(device_scene->surfaces), &device_surfaces, sizeof(Surface*), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(&(device_scene->area_lights), &device_light_planes, sizeof(AreaLight*), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(&(device_scene->vertices), &device_vertices, sizeof(float*), cudaMemcpyHostToDevice)); ////////////////////////////////////////////////////////////// /* Intialise cuRand State */ ////////////////////////////////////////////////////////////// curandState * d_rand_state; checkCudaErrors(cudaMalloc(&d_rand_state, (float)SCREEN_HEIGHT * (float)SCREEN_WIDTH * sizeof(curandState))); init_rand_state<<<this->num_blocks, this->block_size>>>(d_rand_state, SCREEN_WIDTH, SCREEN_HEIGHT); ////////////////////////////////////////////////////////////// /* Reset the file to write to */ ////////////////////////////////////////////////////////////// if (SAVE_TRAINING_STATS){ if (file_exists("../Radiance_Map_Data/nn_training_stats.txt")) std::remove("../Radiance_Map_Data/nn_training_stats.txt"); } ////////////////////////////////////////////////////////////// /* Render the frames */ ////////////////////////////////////////////////////////////// for (int i = 0; i < frames; i++){ //Clear the pixel buffer memset(host_buffer, 0.f, sizeof(vec3)* SCREEN_HEIGHT * SCREEN_WIDTH); /* Compute frame time */ static int t = SDL_GetTicks(); int t2 = SDL_GetTicks(); float dt = float(t2-t); t = t2; printf("Render Time: %.3f ms.\n", dt); // Fill the pixel buffer each frame using Deep Q-Learning strategy this->render_frame( trainer, d_rand_state, device_camera, device_scene, device_buffer, device_vertices, directions_host, ray_locations, prev_ray_locations, ray_normals, ray_directions, ray_states, ray_rewards, ray_discounts, ray_throughputs, ray_bounces, ray_vertices ); // Copy the device buffer values to the host buffer checkCudaErrors(cudaMemcpy(host_buffer, device_buffer, SCREEN_HEIGHT * SCREEN_WIDTH * sizeof(vec3), cudaMemcpyDeviceToHost)); // Display the rendered frame for (int x = 0; x < SCREEN_WIDTH; x++){ for (int y = 0; y < SCREEN_HEIGHT; y++){ screen.PutPixelSDL(x, y, host_buffer[x*(int)SCREEN_HEIGHT + y]); } } screen.SDL_Renderframe(); ////////////////////////////////////////////////////////////// /* Save the DQN Model */ ////////////////////////////////////////////////////////////// std::cout << "Saving model..." << std::endl; if (SAVE_MODEL){ dynet::TextFileSaver saver(fname); saver.save(model); } std::cout << "Model saved." << std::endl; } ////////////////////////////////////////////////////////////// /* Save the image */ ////////////////////////////////////////////////////////////// screen.SDL_SaveImage("../Images/render.bmp"); ////////////////////////////////////////////////////////////// /* Free memory used */ ////////////////////////////////////////////////////////////// delete [] host_buffer; delete [] directions_host; cudaFree(device_buffer); cudaFree(d_rand_state); cudaFree(ray_locations); cudaFree(ray_normals); cudaFree(ray_directions); cudaFree(ray_states); cudaFree(ray_rewards); cudaFree(ray_throughputs); cudaFree(ray_bounces); cudaFree(device_camera); cudaFree(device_surfaces); cudaFree(device_light_planes); cudaFree(device_vertices); cudaFree(device_scene); } __host__ void NeuralQPathtracer::render_frame( dynet::AdamTrainer trainer, curandState* d_rand_state, Camera* device_camera, Scene* device_scene, vec3* device_buffer, float* device_vertices, unsigned int* directions_host, float* ray_locations, /* Ray intersection location (State) */ float* prev_ray_locations, float* ray_normals, /* Intersection normal */ float* ray_directions, /* Direction to next shoot the ray */ unsigned int* ray_states, /* Has the ray intersected with a light/nothing */ float* ray_rewards, /* Reward recieved from Q(s,a) */ float* ray_discounts, /* Discount factor for current rays path */ float* ray_throughputs, /* Throughput for calc pixel value */ unsigned int* ray_bounces, /* Total number of bounces for each ray before intersection*/ float* ray_vertices ){ // Initialise buffer to hold total throughput vec3* total_throughputs; checkCudaErrors(cudaMalloc(&total_throughputs, sizeof(vec3) * SCREEN_HEIGHT * SCREEN_WIDTH)); checkCudaErrors(cudaMemset(total_throughputs, 0.f, sizeof(vec3) * SCREEN_HEIGHT * SCREEN_WIDTH)); // Sample through each pixel SAMPLES_PER_PIXEL times for (int i = 0; i < SAMPLES_PER_PIXEL; i++){ // Initialise ray variables initialise_ray<<<this->num_blocks, this->block_size>>>( d_rand_state, device_camera, ray_locations, prev_ray_locations, ray_directions, ray_states, ray_rewards, ray_discounts, ray_throughputs, ray_bounces ); checkCudaErrors(cudaDeviceSynchronize()); // Create bool to determine if all rays in the batch have collided with a light int rays_finished = 0; /* If not updated to false by trace_ray, end loop */ int* device_rays_finished; checkCudaErrors(cudaMalloc(&device_rays_finished, sizeof(int))); checkCudaErrors(cudaMemset(device_rays_finished, 1, sizeof(int))); // Trace rays path until all have intersected with a light unsigned int bounces = 0; float loss = 0.f; while(rays_finished == 0 && bounces < MAX_RAY_BOUNCES){ printf("Bounce: %d/%d\n", bounces, MAX_RAY_BOUNCES); std::chrono::time_point<std::chrono::high_resolution_clock> start; if (TIMING){ // TIMER START: Sampling ray directions start = std::chrono::high_resolution_clock::now(); } // Does not apply to shooting from camera if (bounces > 0){ // Convert vertices into coordinate system centred around each rays current location // Get vertices in coordinate system surrounding current location convert_vertices_to_point_coord_system<<<this->num_blocks, this->block_size>>>( ray_vertices, ray_locations, device_vertices, this->vertices_count ); cudaDeviceSynchronize(); // For each batch sample Q-values and apply eta-greedy policy for(int n = 0; n < this->num_batches; n++){ // Compute Batch-Size unsigned int current_batch_size = std::min(SCREEN_HEIGHT*SCREEN_WIDTH - (n*this->ray_batch_size), this->ray_batch_size); // Initialise the graph dynet::ComputationGraph graph; // Formulate the expression with the state and the scenes vertices dynet::Dim input_dim; std::vector<float> input_vals; if (TRAIN_ON_POSITION){ input_dim = dynet::Dim({(unsigned int)3},current_batch_size); input_vals = std::vector<float>(3*current_batch_size); checkCudaErrors(cudaMemcpy(&(input_vals[0]), &(ray_locations[n*this->ray_batch_size*3]), sizeof(float) * 3 * current_batch_size, cudaMemcpyDeviceToHost)); } else{ input_dim = dynet::Dim({(unsigned int)this->vertices_count},current_batch_size); input_vals = std::vector<float>(this->vertices_count*current_batch_size); checkCudaErrors(cudaMemcpy(&(input_vals[0]), &(ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, cudaMemcpyDeviceToHost)); } dynet::Expression states_batch = dynet::input(graph, input_dim, input_vals); // Get the Q-values dynet::Expression prediction = this->dqn.network_inference(graph, states_batch, false); std::vector<float> current_qs = dynet::as_vector(graph.forward(prediction)); // Copy Q-vals to GPU for find the argmax float* current_qs_device; checkCudaErrors(cudaMalloc(&current_qs_device, sizeof(float) * current_qs.size())); checkCudaErrors(cudaMemcpy(current_qs_device, &(current_qs[0]), sizeof(float) * current_qs.size() , cudaMemcpyHostToDevice)); // Setup the deivce storage for the ray direction indices unsigned int* ray_direction_indices; checkCudaErrors(cudaMalloc(&ray_direction_indices, sizeof(unsigned int) * current_batch_size)); // Get direction indices (Call once for every element in the batch) int threads = 32; int blocks = (current_batch_size + (threads-1))/threads; sample_batch_ray_directions_epsilon_greedy<<<blocks, threads>>>( this->epsilon, d_rand_state, ray_direction_indices, current_qs_device, ray_directions, ray_locations, ray_normals, ray_throughputs, ray_states, (n*this->ray_batch_size) ); cudaDeviceSynchronize(); // Copy over ray_direction_indices to the host checkCudaErrors(cudaMemcpy(&(directions_host[ this->ray_batch_size * n ]), ray_direction_indices, sizeof(unsigned int) * current_batch_size, cudaMemcpyDeviceToHost)); // Free memory cudaFree(ray_direction_indices); cudaFree(current_qs_device); } } if (TIMING){ // TIMER END: Sampling ray directions auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "Sampling Ray Dir Time: " << elapsed.count() << "s" << std::endl; } if (TIMING){ // TIMER START: Tracing rays start = std::chrono::high_resolution_clock::now(); } // Trace the rays in their set directions trace_ray<<<this->num_blocks, this->block_size>>>( device_scene, d_rand_state, device_rays_finished, ray_locations, prev_ray_locations, ray_normals, ray_directions, ray_states, ray_rewards, ray_discounts, ray_throughputs, ray_bounces, bounces ); cudaDeviceSynchronize(); if (TIMING){ // TIMER END: Tracing rays auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "Tracing Rays Time: " << elapsed.count() << "s" << std::endl; } if (TIMING){ // TIMER START: Train network start = std::chrono::high_resolution_clock::now(); } // Does not apply to shooting from camera //TODO: We are current backpropogating rays that have terminated continually, this is bad if(bounces > 0){ // Make a copy of the old converted vertices float* prev_ray_vertices; checkCudaErrors(cudaMalloc(&prev_ray_vertices, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH * this->vertices_count)); checkCudaErrors(cudaMemcpy(prev_ray_vertices, ray_vertices, sizeof(float) * SCREEN_HEIGHT * SCREEN_WIDTH * this->vertices_count, cudaMemcpyDeviceToDevice)); // Convert vertices into coordinate system centred around each rays current location // Get vertices in coordinate system surrounding current location convert_vertices_to_point_coord_system<<<this->num_blocks, this->block_size>>>( ray_vertices, ray_locations, device_vertices, this->vertices_count ); cudaDeviceSynchronize(); // Run learning rule on the network with the results received and sample new direction for each ray in batches for(int n = 0; n < this->num_batches; n++){ dynet::ComputationGraph graph; // 1) Create the input expression to the neural network for S_t+1 unsigned int current_batch_size = std::min(SCREEN_HEIGHT*SCREEN_WIDTH - (n*this->ray_batch_size), this->ray_batch_size); dynet::Dim input_dim; std::vector<float> input_vals; if (TRAIN_ON_POSITION){ input_dim = dynet::Dim({(unsigned int)3},current_batch_size); input_vals = std::vector<float>(3*current_batch_size); checkCudaErrors(cudaMemcpy(&(input_vals[0]), &(ray_locations[n*this->ray_batch_size*3]), sizeof(float) * 3 * current_batch_size, cudaMemcpyDeviceToHost)); } else{ input_dim = dynet::Dim({(unsigned int)this->vertices_count},current_batch_size); input_vals = std::vector<float>(this->vertices_count*current_batch_size); checkCudaErrors(cudaMemcpy(&(input_vals[0]), &(ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, cudaMemcpyDeviceToHost)); } dynet::Expression input_batch = dynet::input(graph, input_dim, input_vals); // 2) Get max_a Q(S_{t+1}, a) dynet::Expression next_qs_expr = this->dqn.network_inference(graph, input_batch, false); std::vector<float> next_qs = dynet::as_vector(graph.forward(next_qs_expr)); // 3) Compute TD-Targets float* next_qs_device; checkCudaErrors(cudaMalloc(&next_qs_device, sizeof(float) * current_batch_size * GRID_RESOLUTION * GRID_RESOLUTION)); checkCudaErrors(cudaMemcpy(next_qs_device, &(next_qs[0]), sizeof(float) * current_batch_size * GRID_RESOLUTION * GRID_RESOLUTION, cudaMemcpyHostToDevice)); float* td_targets_device; checkCudaErrors(cudaMalloc(&td_targets_device, sizeof(float) * current_batch_size)); int threads = 32; int blocks = int((current_batch_size + (threads-1))/threads); compute_td_targets<<<blocks, threads>>>( d_rand_state, next_qs_device, td_targets_device, ray_locations, ray_normals, ray_rewards, ray_discounts, ray_states, (n*this->ray_batch_size) ); cudaDeviceSynchronize(); cudaFree(next_qs_device); std::vector<float> td_targets(current_batch_size); checkCudaErrors(cudaMemcpy(&(td_targets[0]), td_targets_device, sizeof(float) * current_batch_size, cudaMemcpyDeviceToHost)); cudaFree(td_targets_device); // 4) Reset computational graph and use target_value as a constant graph.clear(); dynet::Expression td_target = dynet::input(graph, dynet::Dim({1}, current_batch_size), td_targets); // // 5) Get current Q(s,a) value // Formulate the expression with the state and the scenes vertices std::vector<float> input_curr_state; if (TRAIN_ON_POSITION){ input_dim = dynet::Dim({(unsigned int)3},current_batch_size); input_curr_state = std::vector<float>(3*current_batch_size); checkCudaErrors(cudaMemcpy(&(input_curr_state[0]), &(prev_ray_locations[n*this->ray_batch_size*3]), sizeof(float) * 3 * current_batch_size, cudaMemcpyDeviceToHost)); } else{ input_dim = dynet::Dim({(unsigned int)this->vertices_count},current_batch_size); input_curr_state = std::vector<float>(this->vertices_count*current_batch_size); checkCudaErrors(cudaMemcpy(&(input_curr_state[0]), &(prev_ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, cudaMemcpyDeviceToHost)); } // std::vector<float> input_curr_state(this->vertices_count*current_batch_size); // checkCudaErrors(cudaMemcpy(&(input_curr_state[0]), &(prev_ray_vertices[n*this->ray_batch_size*this->vertices_count]), sizeof(float) * this->vertices_count * current_batch_size, cudaMemcpyDeviceToHost)); dynet::Expression states_batch = dynet::input(graph, input_dim, input_curr_state); dynet::Expression prediction_qs = this->dqn.network_inference(graph, states_batch, true); // Get the vector of action value indices we took std::vector<unsigned int> action_value_indices(current_batch_size); memcpy(&(action_value_indices[0]), &directions_host[this->ray_batch_size*n], sizeof(unsigned int) * current_batch_size); // Get the current Q values for the actions taken dynet::Expression current_qs = dynet::pick(prediction_qs, action_value_indices, (unsigned int) 0); // 6) Calculate the loss dynet::Expression loss_expr = dynet::pow((td_target - current_qs), dynet::input(graph, 2.f)); loss_expr = dynet::sum_batches(loss_expr); loss += dynet::as_scalar(graph.forward(loss_expr)); // 7) Train the network graph.backward(loss_expr); trainer.update(); } cudaFree(prev_ray_vertices); } // Sample a new direction for rays which have reached the terminal state sample_random_scene_pos_for_terminated_rays<<<this->num_blocks, this->block_size>>>( device_scene, d_rand_state, ray_normals, ray_locations, ray_states ); cudaDeviceSynchronize(); if (TIMING){ // TIMER END: Train network auto end = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> elapsed = end - start; std::cout << "Train Network Time: " << elapsed.count() << "s" << std::endl; } // Copy over value to check if all rays have intersected with a light checkCudaErrors(cudaMemcpy(&rays_finished, device_rays_finished, sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemset(device_rays_finished, 1, sizeof(int))); // Increment the number of bounces bounces++; } // Update epsilon this->epsilon = std::max( this->epsilon - EPSILON_DECAY, EPSILON_MIN); printf("epsilon: %.3f\n", this->epsilon); printf("loss: %.3f\n",loss); // Calculate the average path length int* total_path_lengths_device; checkCudaErrors(cudaMalloc(&total_path_lengths_device, sizeof(int))); checkCudaErrors(cudaMemset(total_path_lengths_device, 0, sizeof(int))); sum_path_lengths<<<this->num_blocks, this->block_size>>>( total_path_lengths_device, ray_bounces ); int total_path_lengths = 0; checkCudaErrors(cudaMemcpy(&total_path_lengths, total_path_lengths_device, sizeof(int), cudaMemcpyDeviceToHost)); float avg_path_length = total_path_lengths/((float)SCREEN_HEIGHT*(float)SCREEN_WIDTH); std::cout << "Avg Path Length: " << avg_path_length << std::endl; cudaFree(total_path_lengths_device); // Calculate the number of zero-contribution light paths by throughputs below THROUGHPUT_THRESHOLD int* total_zero_contribution_light_paths; checkCudaErrors(cudaMalloc(&total_zero_contribution_light_paths, sizeof(int))); checkCudaErrors(cudaMemset(total_zero_contribution_light_paths, 0, sizeof(int))); sum_zero_contribution_light_paths<<<this->num_blocks, this->block_size>>>( total_zero_contribution_light_paths, ray_throughputs ); int total_zclp = 0; checkCudaErrors(cudaMemcpy(&total_zclp, total_zero_contribution_light_paths, sizeof(int), cudaMemcpyDeviceToHost)); std::cout << "Total zero contribution light paths: " << total_zclp << std::endl; cudaFree(total_zero_contribution_light_paths); // Save collected stats to the file if (SAVE_TRAINING_STATS){ std::ofstream stats_file; stats_file.open("../Radiance_Map_Data/nn_training_stats.txt", std::ios::app); stats_file << avg_path_length << " " << loss << " " << total_zclp << "\n"; stats_file.close(); } // Add computed throughput values to the running total update_total_throughput<<<this->num_blocks, this->block_size>>>( ray_throughputs, total_throughputs ); cudaDeviceSynchronize(); cudaFree(device_rays_finished); } // Update the device_buffer with the throughput update_device_buffer<<<this->num_blocks, this->block_size>>>( device_buffer, total_throughputs ); cudaDeviceSynchronize(); cudaFree(total_throughputs); } // Gets the initial direction to shoot a ray in __global__ void initialise_ray( curandState* d_rand_state, Camera* device_camera, float* ray_locations, float* prev_ray_locations, float* ray_directions, unsigned int* ray_states, float* ray_rewards, float* ray_discounts, float* ray_throughputs, unsigned int* ray_bounces ){ // Ray index int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = SCREEN_HEIGHT*x + y; // Randomly sample a ray within the pixel Ray r = Ray::sample_ray_through_pixel(d_rand_state, *device_camera, x, y); ray_locations[(i*3) ] = r.start.x; ray_locations[(i*3) + 1] = r.start.y; ray_locations[(i*3) + 2] = r.start.z; prev_ray_locations[(i*3) ] = r.start.x; prev_ray_locations[(i*3) + 1] = r.start.y; prev_ray_locations[(i*3) + 2] = r.start.z; ray_directions[(i*3) ] = r.direction.x; ray_directions[(i*3) + 1] = r.direction.y; ray_directions[(i*3) + 2] = r.direction.z; // Initialise ray_variables ray_rewards[i] = 0.f; ray_states[i] = 0; ray_throughputs[(i*3) ] = 1.f; ray_throughputs[(i*3) + 1] = 1.f; ray_throughputs[(i*3) + 2] = 1.f; ray_discounts[i] = 1.f; ray_bounces[i] = MAX_RAY_BOUNCES; } // Trace a ray for all ray locations given in the angles specified within the scene __global__ void trace_ray( Scene* scene, curandState* d_rand_state, int* rays_finished, float* ray_locations, float* prev_ray_locations, float* ray_normals, float* ray_directions, unsigned int* ray_states, float* ray_rewards, float* ray_discounts, float* ray_throughputs, unsigned int* ray_bounces, int bounces ){ // Ray index int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int i = SCREEN_HEIGHT*x + y; // For the current ray, get its next state by shooting a ray in the direction stored in ray_directions vec3 position = vec3(ray_locations[(i*3)], ray_locations[(i*3)+1], ray_locations[(i*3)+2]); vec3 dir = vec3(ray_directions[(i*3)], ray_directions[(i*3)+1], ray_directions[(i*3)+2]); // Create the ray and trace it Ray ray(vec4(position + (dir * 0.00001f), 1.f), vec4(dir, 1.f)); ray.closest_intersection(scene); // Update position, normal, and discount factor based on intersection switch(ray.intersection.intersection_type){ // TERMINAL STATE: R_(t+1) = Environment light power case NOTHING: ray_rewards[i] = 0.f; ray_discounts[i] = 0.f; if ( ray_states[i] == 0 ){ ray_throughputs[(i*3)] = ray_throughputs[(i*3)] * ENVIRONMENT_LIGHT; ray_throughputs[(i*3)+1] = ray_throughputs[(i*3)+1] * ENVIRONMENT_LIGHT; ray_throughputs[(i*3)+2] = ray_throughputs[(i*3)+2] * ENVIRONMENT_LIGHT; ray_bounces[i] = (unsigned int)bounces; } ray_states[i] = 1; break; // TERMINAL STATE: R_(t+1) = Area light power case AREA_LIGHT: float diffuse_light_power = scene->area_lights[ray.intersection.index].luminance; ray_rewards[i] = diffuse_light_power*200.f; ray_discounts[i] = 0.f; if ( ray_states[i] == 0 ){ vec3 diffuse_p = scene->area_lights[ray.intersection.index].diffuse_p; ray_throughputs[(i*3)] = ray_throughputs[(i*3)] * diffuse_p.x; ray_throughputs[(i*3)+1] = ray_throughputs[(i*3)+1] * diffuse_p.y; ray_throughputs[(i*3)+2] = ray_throughputs[(i*3)+2] * diffuse_p.z; ray_bounces[i] = (unsigned int)bounces; } ray_states[i] = 1; break; // NON-TERMINAL STATE: R_(t+1) + \gamma * max_a Q(S_t+1, a) // where R_(t+1) = 0 for diffuse surfaces case SURFACE: vec3 new_loc = vec3(ray.intersection.position); prev_ray_locations[(i*3) ] = ray_locations[(i*3) ]; prev_ray_locations[(i*3)+1] = ray_locations[(i*3)+1]; prev_ray_locations[(i*3)+2] = ray_locations[(i*3)+2]; ray_locations[(i*3) ] = new_loc.x; ray_locations[(i*3)+1] = new_loc.y; ray_locations[(i*3)+2] = new_loc.z; vec3 new_norm = ray.intersection.normal; ray_normals[(i*3) ] = new_norm.x; ray_normals[(i*3)+1] = new_norm.y; ray_normals[(i*3)+2] = new_norm.z; vec3 BRDF = scene->surfaces[ray.intersection.index].material.diffuse_c; // Get luminance of material float max_rgb = max(BRDF.x, BRDF.y); max_rgb = max(BRDF.z, max_rgb); float min_rgb = min(BRDF.x, BRDF.y); min_rgb = min(BRDF.z, min_rgb); float luminance = 0.5f * (max_rgb + min_rgb); // discount_factors holds cos_theta currently, update rgb throughput first if ( ray_states[i] == 0 ){ ray_throughputs[(i*3)] = ray_throughputs[(i*3)] * (BRDF.x / (float)M_PI); ray_throughputs[(i*3)+1] = ray_throughputs[(i*3)+1] * (BRDF.y / (float)M_PI); ray_throughputs[(i*3)+2] = ray_throughputs[(i*3)+2] * (BRDF.z / (float)M_PI); } // Now update discount_factors with luminance ray_rewards[i] = 0.f; ray_discounts[i] = luminance; // Still a ray being to bounce, so not finished if ( ray_states[i] == 0 ){ atomicExch(rays_finished, 0); } break; } }
0e5217d5eadadc2459296772013c79191b57416d.hip
// !!! This is a file automatically generated by hipify!!! #include "../gpu_inc/SGM.cuh" GPU_SGM::GPU_SGM() { hipSetDevice(0); checkCudaErrors(hipStreamCreate(&stream1)); checkCudaErrors(hipStreamCreate(&stream2)); checkCudaErrors(hipStreamCreate(&stream3)); checkCudaErrors(hipStreamCreate(&stream4)); checkCudaErrors(hipStreamCreate(&stream5)); checkCudaErrors(hipStreamCreate(&stream6)); checkCudaErrors(hipStreamCreate(&stream7)); checkCudaErrors(hipStreamCreate(&stream8)); checkCudaErrors(hipMalloc((void**)&d_img_l, IMG_H* IMG_W * sizeof(uchar))); checkCudaErrors(hipMalloc((void**)&d_img_r, IMG_H * IMG_W * sizeof(uchar))); checkCudaErrors(hipMalloc((void**)&d_disp, IMG_H * IMG_W * sizeof(uchar))); checkCudaErrors(hipMalloc((void**)&d_filtered_disp, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_cost_table_l, IMG_H * IMG_W * sizeof(uint64_t))); checkCudaErrors(hipMalloc((void**)&d_cost_table_r, IMG_H * IMG_W * sizeof(uint64_t))); checkCudaErrors(hipMalloc((void**)&d_cost, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_L1, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_L2, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_min_L1, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_min_L2, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_L3, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_L4, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_min_L3, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_min_L4, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_L5, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(hipMalloc((void**)&d_L6, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(hipMalloc((void**)&d_min_L5, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_min_L6, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_L7, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(hipMalloc((void**)&d_L8, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(hipMalloc((void**)&d_min_L7, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_min_L8, IMG_H * IMG_W * sizeof(float))); P1 = 10; P2 = 100; checkCudaErrors(hipMalloc((void**)&d_label, IMG_H * IMG_W * sizeof(int))); checkCudaErrors(hipMalloc((void**)&d_area, IMG_H * IMG_W * sizeof(int))); disp.create(IMG_H, IMG_W, CV_8UC1); filtered_disp.create(IMG_H, IMG_W, CV_32FC1); colored_disp.create(IMG_H, IMG_W, CV_8UC3); disp_cnt = 0; } GPU_SGM::~GPU_SGM() { checkCudaErrors(hipFree(d_img_l)); checkCudaErrors(hipFree(d_img_r)); checkCudaErrors(hipFree(d_disp)); checkCudaErrors(hipFree(d_filtered_disp)); checkCudaErrors(hipFree(d_cost_table_l)); checkCudaErrors(hipFree(d_cost_table_r)); checkCudaErrors(hipFree(d_cost)); checkCudaErrors(hipFree(d_L1)); checkCudaErrors(hipFree(d_L2)); checkCudaErrors(hipFree(d_min_L1)); checkCudaErrors(hipFree(d_min_L2)); checkCudaErrors(hipFree(d_L3)); checkCudaErrors(hipFree(d_L4)); checkCudaErrors(hipFree(d_min_L3)); checkCudaErrors(hipFree(d_min_L4)); checkCudaErrors(hipFree(d_L5)); checkCudaErrors(hipFree(d_L6)); checkCudaErrors(hipFree(d_min_L5)); checkCudaErrors(hipFree(d_min_L6)); checkCudaErrors(hipFree(d_L7)); checkCudaErrors(hipFree(d_L8)); checkCudaErrors(hipFree(d_min_L7)); checkCudaErrors(hipFree(d_min_L8)); checkCudaErrors(hipFree(d_label)); checkCudaErrors(hipFree(d_area)); checkCudaErrors(hipStreamDestroy(stream1)); checkCudaErrors(hipStreamDestroy(stream2)); checkCudaErrors(hipStreamDestroy(stream3)); checkCudaErrors(hipStreamDestroy(stream4)); checkCudaErrors(hipStreamDestroy(stream5)); checkCudaErrors(hipStreamDestroy(stream6)); checkCudaErrors(hipStreamDestroy(stream7)); checkCudaErrors(hipStreamDestroy(stream8)); } void GPU_SGM::process(Mat &img_l, Mat &img_r) { this->img_l = img_l; this->img_r = img_r; hipSetDevice(0); hipMemcpyAsync(d_img_l, img_l.data, IMG_H* IMG_W * sizeof(uchar), hipMemcpyHostToDevice, stream1); hipMemcpyAsync(d_img_r, img_r.data, IMG_H* IMG_W * sizeof(uchar), hipMemcpyHostToDevice, stream2); hipStreamSynchronize(stream1); hipStreamSynchronize(stream2); double be = get_cur_ms(); dim3 grid, block; grid.x = (IMG_W - 1) / 32 + 1; grid.y = (IMG_H - 1) / 32 + 1; block.x = 32; block.y = 32; cu_build_cost_table << <grid, block, 0, stream1 >> > (d_img_l, d_img_r, d_cost_table_l, d_cost_table_r, IMG_W, IMG_H, CU_WIN_W, CU_WIN_H); cu_build_dsi_from_table << <grid, block, 0, stream1 >> > (d_cost_table_l, d_cost_table_r, d_cost, IMG_W, IMG_H, MAX_DISP); hipDeviceSynchronize(); printf("build cost takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); grid.x = (IMG_W - 1) / 32 + 1; grid.y = (MAX_DISP - 1) / 32 + 1; cu_cost_horizontal_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W); cu_cost_vertical_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H); //cu_cost_horizontal_filter_new << <grid, block, 0, stream1 >> > (d_cost, d_L1, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W); //cu_cost_vertical_filter_new << <grid, block, 0, stream2 >> > (d_cost, d_L2, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H); //hipStreamSynchronize(stream1); //hipStreamSynchronize(stream2); //cu_cost_filter << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, IMG_W, IMG_H, MAX_DISP); hipDeviceSynchronize(); printf("cost filter takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); dim3 dp_grid, dp_block; dp_grid.x = IMG_W; dp_grid.y = 1; dp_block.x = MAX_DISP; // for dp syncronize dp_block.y = 1; cu_dp_L1 << <dp_grid, dp_block, 0, stream1 >> > (d_cost, d_L1, d_min_L1, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L2 << <dp_grid, dp_block, 0, stream2 >> > (d_cost, d_L2, d_min_L2, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L3 << <dp_grid, dp_block, 0, stream3 >> > (d_cost, d_L3, d_min_L3, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L4 << <dp_grid, dp_block, 0, stream4 >> > (d_cost, d_L4, d_min_L4, IMG_W, IMG_H, MAX_DISP, P1, P2); if (CU_USE_8_PATH) { //for (int i = 0; i < IMG_H; i++) //{ // cu_dp_L5 << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, i, IMG_W, IMG_H, MAX_DISP, P1, P2); // cu_dp_L6 << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, i, IMG_W, IMG_H, MAX_DISP, P1, P2); // cu_dp_L7 << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2); // cu_dp_L8 << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2); //} // use truncated dp to approximate the original method cu_dp_L5_truncated << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L6_truncated << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L7_truncated << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L8_truncated << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_W, IMG_H, MAX_DISP, P1, P2); } hipDeviceSynchronize(); printf("dp takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); grid.x = 512; grid.y = 512; aggregation << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, d_L3, d_L4, d_L5, d_L6, d_L7, d_L8, IMG_W, IMG_H, MAX_DISP); grid.x = (IMG_W - 1) / 32 + 1; grid.y = (IMG_H - 1) / 32 + 1; wta << <grid, block, 0, stream1 >> >(d_cost, d_disp, IMG_W, IMG_H, MAX_DISP, CU_UNIQUE_RATIO, INVALID_DISP); hipDeviceSynchronize(); printf("wta takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); cu_subpixel << <grid, block, 0, stream1 >> > (d_cost, d_disp, d_filtered_disp, IMG_W, IMG_H, MAX_DISP, INVALID_DISP); cu_median_filter << <grid, block, 0, stream1 >> > (d_filtered_disp, IMG_W, IMG_H, MAX_DISP, CU_MEDIAN_FILTER_W, CU_MEDIAN_FILTER_H); cu_speckle_filter_init << <grid, block, 0, stream2 >> > (d_label, d_area, IMG_W, IMG_H); hipStreamSynchronize(stream1); hipStreamSynchronize(stream2); cu_speckle_filter_union_find << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, CU_SPECKLE_DIS); cu_speckle_filter_sum_up << <grid, block, 0, stream1 >> > (d_label, d_area, IMG_W, IMG_H); cu_speckle_filter_end << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, INVALID_DISP, CU_SPECKLE_SIZE); hipDeviceSynchronize(); printf("cuda post_filter takes %lf ms\n", get_cur_ms() - be); hipMemcpyAsync(filtered_disp.data, d_filtered_disp, IMG_H * IMG_W * sizeof(float), hipMemcpyDeviceToHost, stream1); //hipMemcpyAsync(disp.data, d_disp, IMG_H * IMG_W * sizeof(uchar), hipMemcpyDeviceToHost, stream2); } void GPU_SGM::show_disp() { // left border invalid for (int i = 0; i < filtered_disp.rows; i++) { float *ptr = filtered_disp.ptr<float>(i); for (int j = 0; j < MAX_DISP / SCALE; j++) { ptr[j] = INVALID_DISP; } } // convert to RGB for better observation colormap(); Mat debug_view, tmp; debug_view = debug_view.zeros(IMG_H * 2, IMG_W, CV_8UC3); tmp = debug_view(Rect(0, 0, IMG_W, IMG_H)); cvtColor(img_l, img_l, CV_GRAY2BGR); img_l.copyTo(tmp); tmp = debug_view(Rect(0, IMG_H - 1, IMG_W, IMG_H)); colored_disp.copyTo(tmp); namedWindow("disp_map", 1); imshow("disp_map", debug_view); imwrite(num2str(disp_cnt++) + "_disp.png", debug_view); waitKey(-1); //destroyWindow("disp_map"); } void GPU_SGM::colormap() { float disp_value = 0; for (int i = 0; i < filtered_disp.rows; i++) { for (int j = 0; j < filtered_disp.cols; j++) { disp_value = filtered_disp.at<float>(i, j); //disp_value = disp.at<uchar>(i, j); if (disp_value > MAX_DISP - 1) { colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 0; colored_disp.at<Vec3b>(i, j)[2] = 0; } else { disp_value *= (256 / (MAX_DISP)); if (disp_value <= 51) { colored_disp.at<Vec3b>(i, j)[0] = 255; colored_disp.at<Vec3b>(i, j)[1] = disp_value * 5; colored_disp.at<Vec3b>(i, j)[2] = 0; } else if (disp_value <= 102) { disp_value -= 51; colored_disp.at<Vec3b>(i, j)[0] = 255 - disp_value * 5; colored_disp.at<Vec3b>(i, j)[1] = 255; colored_disp.at<Vec3b>(i, j)[2] = 0; } else if (disp_value <= 153) { disp_value -= 102; colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 255; colored_disp.at<Vec3b>(i, j)[2] = disp_value * 5; } else if (disp_value <= 204) { disp_value -= 153; colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 255 - uchar(128.0*disp_value / 51.0 + 0.5); colored_disp.at<Vec3b>(i, j)[2] = 255; } else { disp_value -= 204; colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 127 - uchar(127.0*disp_value / 51.0 + 0.5); colored_disp.at<Vec3b>(i, j)[2] = 255; } } } } }
0e5217d5eadadc2459296772013c79191b57416d.cu
#include "../gpu_inc/SGM.cuh" GPU_SGM::GPU_SGM() { cudaSetDevice(0); checkCudaErrors(cudaStreamCreate(&stream1)); checkCudaErrors(cudaStreamCreate(&stream2)); checkCudaErrors(cudaStreamCreate(&stream3)); checkCudaErrors(cudaStreamCreate(&stream4)); checkCudaErrors(cudaStreamCreate(&stream5)); checkCudaErrors(cudaStreamCreate(&stream6)); checkCudaErrors(cudaStreamCreate(&stream7)); checkCudaErrors(cudaStreamCreate(&stream8)); checkCudaErrors(cudaMalloc((void**)&d_img_l, IMG_H* IMG_W * sizeof(uchar))); checkCudaErrors(cudaMalloc((void**)&d_img_r, IMG_H * IMG_W * sizeof(uchar))); checkCudaErrors(cudaMalloc((void**)&d_disp, IMG_H * IMG_W * sizeof(uchar))); checkCudaErrors(cudaMalloc((void**)&d_filtered_disp, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_cost_table_l, IMG_H * IMG_W * sizeof(uint64_t))); checkCudaErrors(cudaMalloc((void**)&d_cost_table_r, IMG_H * IMG_W * sizeof(uint64_t))); checkCudaErrors(cudaMalloc((void**)&d_cost, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_L1, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_L2, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_min_L1, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_min_L2, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_L3, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_L4, IMG_H * IMG_W * MAX_DISP * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_min_L3, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_min_L4, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_L5, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(cudaMalloc((void**)&d_L6, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(cudaMalloc((void**)&d_min_L5, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_min_L6, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_L7, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(cudaMalloc((void**)&d_L8, IMG_H * IMG_W * MAX_DISP * sizeof(short))); checkCudaErrors(cudaMalloc((void**)&d_min_L7, IMG_H * IMG_W * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_min_L8, IMG_H * IMG_W * sizeof(float))); P1 = 10; P2 = 100; checkCudaErrors(cudaMalloc((void**)&d_label, IMG_H * IMG_W * sizeof(int))); checkCudaErrors(cudaMalloc((void**)&d_area, IMG_H * IMG_W * sizeof(int))); disp.create(IMG_H, IMG_W, CV_8UC1); filtered_disp.create(IMG_H, IMG_W, CV_32FC1); colored_disp.create(IMG_H, IMG_W, CV_8UC3); disp_cnt = 0; } GPU_SGM::~GPU_SGM() { checkCudaErrors(cudaFree(d_img_l)); checkCudaErrors(cudaFree(d_img_r)); checkCudaErrors(cudaFree(d_disp)); checkCudaErrors(cudaFree(d_filtered_disp)); checkCudaErrors(cudaFree(d_cost_table_l)); checkCudaErrors(cudaFree(d_cost_table_r)); checkCudaErrors(cudaFree(d_cost)); checkCudaErrors(cudaFree(d_L1)); checkCudaErrors(cudaFree(d_L2)); checkCudaErrors(cudaFree(d_min_L1)); checkCudaErrors(cudaFree(d_min_L2)); checkCudaErrors(cudaFree(d_L3)); checkCudaErrors(cudaFree(d_L4)); checkCudaErrors(cudaFree(d_min_L3)); checkCudaErrors(cudaFree(d_min_L4)); checkCudaErrors(cudaFree(d_L5)); checkCudaErrors(cudaFree(d_L6)); checkCudaErrors(cudaFree(d_min_L5)); checkCudaErrors(cudaFree(d_min_L6)); checkCudaErrors(cudaFree(d_L7)); checkCudaErrors(cudaFree(d_L8)); checkCudaErrors(cudaFree(d_min_L7)); checkCudaErrors(cudaFree(d_min_L8)); checkCudaErrors(cudaFree(d_label)); checkCudaErrors(cudaFree(d_area)); checkCudaErrors(cudaStreamDestroy(stream1)); checkCudaErrors(cudaStreamDestroy(stream2)); checkCudaErrors(cudaStreamDestroy(stream3)); checkCudaErrors(cudaStreamDestroy(stream4)); checkCudaErrors(cudaStreamDestroy(stream5)); checkCudaErrors(cudaStreamDestroy(stream6)); checkCudaErrors(cudaStreamDestroy(stream7)); checkCudaErrors(cudaStreamDestroy(stream8)); } void GPU_SGM::process(Mat &img_l, Mat &img_r) { this->img_l = img_l; this->img_r = img_r; cudaSetDevice(0); cudaMemcpyAsync(d_img_l, img_l.data, IMG_H* IMG_W * sizeof(uchar), cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(d_img_r, img_r.data, IMG_H* IMG_W * sizeof(uchar), cudaMemcpyHostToDevice, stream2); cudaStreamSynchronize(stream1); cudaStreamSynchronize(stream2); double be = get_cur_ms(); dim3 grid, block; grid.x = (IMG_W - 1) / 32 + 1; grid.y = (IMG_H - 1) / 32 + 1; block.x = 32; block.y = 32; cu_build_cost_table << <grid, block, 0, stream1 >> > (d_img_l, d_img_r, d_cost_table_l, d_cost_table_r, IMG_W, IMG_H, CU_WIN_W, CU_WIN_H); cu_build_dsi_from_table << <grid, block, 0, stream1 >> > (d_cost_table_l, d_cost_table_r, d_cost, IMG_W, IMG_H, MAX_DISP); cudaDeviceSynchronize(); printf("build cost takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); grid.x = (IMG_W - 1) / 32 + 1; grid.y = (MAX_DISP - 1) / 32 + 1; cu_cost_horizontal_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W); cu_cost_vertical_filter << <grid, block, 0, stream1 >> > (d_cost, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H); //cu_cost_horizontal_filter_new << <grid, block, 0, stream1 >> > (d_cost, d_L1, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_W); //cu_cost_vertical_filter_new << <grid, block, 0, stream2 >> > (d_cost, d_L2, IMG_W, IMG_H, MAX_DISP, CU_COST_WIN_H); //cudaStreamSynchronize(stream1); //cudaStreamSynchronize(stream2); //cu_cost_filter << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, IMG_W, IMG_H, MAX_DISP); cudaDeviceSynchronize(); printf("cost filter takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); dim3 dp_grid, dp_block; dp_grid.x = IMG_W; dp_grid.y = 1; dp_block.x = MAX_DISP; // for dp syncronize dp_block.y = 1; cu_dp_L1 << <dp_grid, dp_block, 0, stream1 >> > (d_cost, d_L1, d_min_L1, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L2 << <dp_grid, dp_block, 0, stream2 >> > (d_cost, d_L2, d_min_L2, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L3 << <dp_grid, dp_block, 0, stream3 >> > (d_cost, d_L3, d_min_L3, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L4 << <dp_grid, dp_block, 0, stream4 >> > (d_cost, d_L4, d_min_L4, IMG_W, IMG_H, MAX_DISP, P1, P2); if (CU_USE_8_PATH) { //for (int i = 0; i < IMG_H; i++) //{ // cu_dp_L5 << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, i, IMG_W, IMG_H, MAX_DISP, P1, P2); // cu_dp_L6 << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, i, IMG_W, IMG_H, MAX_DISP, P1, P2); // cu_dp_L7 << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2); // cu_dp_L8 << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_H - 1 - i, IMG_W, IMG_H, MAX_DISP, P1, P2); //} // use truncated dp to approximate the original method cu_dp_L5_truncated << <dp_grid, dp_block, 0, stream5 >> > (d_cost, d_L5, d_min_L5, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L6_truncated << <dp_grid, dp_block, 0, stream6 >> > (d_cost, d_L6, d_min_L6, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L7_truncated << <dp_grid, dp_block, 0, stream7 >> > (d_cost, d_L7, d_min_L7, IMG_W, IMG_H, MAX_DISP, P1, P2); cu_dp_L8_truncated << <dp_grid, dp_block, 0, stream8 >> > (d_cost, d_L8, d_min_L8, IMG_W, IMG_H, MAX_DISP, P1, P2); } cudaDeviceSynchronize(); printf("dp takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); grid.x = 512; grid.y = 512; aggregation << <grid, block, 0, stream1 >> > (d_cost, d_L1, d_L2, d_L3, d_L4, d_L5, d_L6, d_L7, d_L8, IMG_W, IMG_H, MAX_DISP); grid.x = (IMG_W - 1) / 32 + 1; grid.y = (IMG_H - 1) / 32 + 1; wta << <grid, block, 0, stream1 >> >(d_cost, d_disp, IMG_W, IMG_H, MAX_DISP, CU_UNIQUE_RATIO, INVALID_DISP); cudaDeviceSynchronize(); printf("wta takes %lf ms\n", get_cur_ms() - be); be = get_cur_ms(); cu_subpixel << <grid, block, 0, stream1 >> > (d_cost, d_disp, d_filtered_disp, IMG_W, IMG_H, MAX_DISP, INVALID_DISP); cu_median_filter << <grid, block, 0, stream1 >> > (d_filtered_disp, IMG_W, IMG_H, MAX_DISP, CU_MEDIAN_FILTER_W, CU_MEDIAN_FILTER_H); cu_speckle_filter_init << <grid, block, 0, stream2 >> > (d_label, d_area, IMG_W, IMG_H); cudaStreamSynchronize(stream1); cudaStreamSynchronize(stream2); cu_speckle_filter_union_find << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, CU_SPECKLE_DIS); cu_speckle_filter_sum_up << <grid, block, 0, stream1 >> > (d_label, d_area, IMG_W, IMG_H); cu_speckle_filter_end << <grid, block, 0, stream1 >> > (d_filtered_disp, d_label, d_area, IMG_W, IMG_H, INVALID_DISP, CU_SPECKLE_SIZE); cudaDeviceSynchronize(); printf("cuda post_filter takes %lf ms\n", get_cur_ms() - be); cudaMemcpyAsync(filtered_disp.data, d_filtered_disp, IMG_H * IMG_W * sizeof(float), cudaMemcpyDeviceToHost, stream1); //cudaMemcpyAsync(disp.data, d_disp, IMG_H * IMG_W * sizeof(uchar), cudaMemcpyDeviceToHost, stream2); } void GPU_SGM::show_disp() { // left border invalid for (int i = 0; i < filtered_disp.rows; i++) { float *ptr = filtered_disp.ptr<float>(i); for (int j = 0; j < MAX_DISP / SCALE; j++) { ptr[j] = INVALID_DISP; } } // convert to RGB for better observation colormap(); Mat debug_view, tmp; debug_view = debug_view.zeros(IMG_H * 2, IMG_W, CV_8UC3); tmp = debug_view(Rect(0, 0, IMG_W, IMG_H)); cvtColor(img_l, img_l, CV_GRAY2BGR); img_l.copyTo(tmp); tmp = debug_view(Rect(0, IMG_H - 1, IMG_W, IMG_H)); colored_disp.copyTo(tmp); namedWindow("disp_map", 1); imshow("disp_map", debug_view); imwrite(num2str(disp_cnt++) + "_disp.png", debug_view); waitKey(-1); //destroyWindow("disp_map"); } void GPU_SGM::colormap() { float disp_value = 0; for (int i = 0; i < filtered_disp.rows; i++) { for (int j = 0; j < filtered_disp.cols; j++) { disp_value = filtered_disp.at<float>(i, j); //disp_value = disp.at<uchar>(i, j); if (disp_value > MAX_DISP - 1) { colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 0; colored_disp.at<Vec3b>(i, j)[2] = 0; } else { disp_value *= (256 / (MAX_DISP)); if (disp_value <= 51) { colored_disp.at<Vec3b>(i, j)[0] = 255; colored_disp.at<Vec3b>(i, j)[1] = disp_value * 5; colored_disp.at<Vec3b>(i, j)[2] = 0; } else if (disp_value <= 102) { disp_value -= 51; colored_disp.at<Vec3b>(i, j)[0] = 255 - disp_value * 5; colored_disp.at<Vec3b>(i, j)[1] = 255; colored_disp.at<Vec3b>(i, j)[2] = 0; } else if (disp_value <= 153) { disp_value -= 102; colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 255; colored_disp.at<Vec3b>(i, j)[2] = disp_value * 5; } else if (disp_value <= 204) { disp_value -= 153; colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 255 - uchar(128.0*disp_value / 51.0 + 0.5); colored_disp.at<Vec3b>(i, j)[2] = 255; } else { disp_value -= 204; colored_disp.at<Vec3b>(i, j)[0] = 0; colored_disp.at<Vec3b>(i, j)[1] = 127 - uchar(127.0*disp_value / 51.0 + 0.5); colored_disp.at<Vec3b>(i, j)[2] = 255; } } } } }
67d3a335cc31208972dda8a8e8327c993ef477f6.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <Eigen/Dense> #include "cupoch/geometry/geometry_utils.h" #include "cupoch/utility/console.h" namespace cupoch { namespace geometry { namespace { template <int Dim> struct transform_points_functor { transform_points_functor( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transform) : transform_(transform){}; const Eigen::Matrix<float, Dim + 1, Dim + 1> transform_; __device__ void operator()(Eigen::Matrix<float, Dim, 1> &pt) { const Eigen::Matrix<float, Dim + 1, 1> new_pt = transform_ * (Eigen::Matrix<float, Dim + 1, 1>() << pt, 1.0).finished(); pt = new_pt.template head<Dim>() / new_pt(Dim); } }; struct transform_normals_functor { transform_normals_functor(const Eigen::Matrix4f &transform) : transform_(transform){}; const Eigen::Matrix4f transform_; __device__ void operator()(Eigen::Vector3f &nl) { const Eigen::Vector4f new_pt = transform_ * Eigen::Vector4f(nl(0), nl(1), nl(2), 0.0); nl = new_pt.head<3>(); } }; } // namespace template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMinBound( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { return ComputeMinBound<Dim>(0, points); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMinBound( hipStream_t stream, const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero(); Eigen::Matrix<float, Dim, 1> init = points[0]; return thrust::reduce( utility::exec_policy(stream)->on(stream), points.begin(), points.end(), init, thrust::elementwise_minimum<Eigen::Matrix<float, Dim, 1>>()); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMaxBound( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { return ComputeMaxBound<Dim>(0, points); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMaxBound( hipStream_t stream, const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero(); Eigen::Matrix<float, Dim, 1> init = points[0]; return thrust::reduce( utility::exec_policy(stream)->on(stream), points.begin(), points.end(), init, thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>()); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeCenter( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { Eigen::Matrix<float, Dim, 1> init = Eigen::Matrix<float, Dim, 1>::Zero(); if (points.empty()) return init; Eigen::Matrix<float, Dim, 1> sum = thrust::reduce(points.begin(), points.end(), init, thrust::plus<Eigen::Matrix<float, Dim, 1>>()); return sum / points.size(); } template Eigen::Matrix<float, 2, 1> ComputeMinBound<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMinBound<2>( hipStream_t stream, const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMinBound<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMinBound<3>( hipStream_t stream, const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMaxBound<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMaxBound<2>( hipStream_t stream, const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMaxBound<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMaxBound<3>( hipStream_t stream, const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeCenter<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeCenter<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); void ResizeAndPaintUniformColor(utility::device_vector<Eigen::Vector3f> &colors, const size_t size, const Eigen::Vector3f &color) { colors.resize(size); Eigen::Vector3f clipped_color = color; if (color.minCoeff() < 0 || color.maxCoeff() > 1) { utility::LogWarning( "invalid color in PaintUniformColor, clipping to [0, 1]"); clipped_color = clipped_color.array() .max(Eigen::Vector3f(0, 0, 0).array()) .matrix(); clipped_color = clipped_color.array() .min(Eigen::Vector3f(1, 1, 1).array()) .matrix(); } thrust::fill(colors.begin(), colors.end(), clipped_color); } template <int Dim> void TransformPoints( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { TransformPoints<Dim>(0, transformation, points); } template <int Dim> void TransformPoints( hipStream_t stream, const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { transform_points_functor<Dim> func(transformation); thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(), points.end(), func); } template void TransformPoints<2>(const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<2>(hipStream_t stream, const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<3>(const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); template void TransformPoints<3>(hipStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); void TransformNormals(const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { TransformNormals(0, transformation, normals); } void TransformNormals(hipStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { transform_normals_functor func(transformation); thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(), normals.end(), func); } template <int Dim> void TranslatePoints( const Eigen::Matrix<float, Dim, 1> &translation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool relative) { Eigen::Matrix<float, Dim, 1> transform = translation; if (!relative) { transform -= ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt += transform; }); } template <int Dim> void ScalePoints(const float scale, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = (pt - points_center) * scale + points_center; }); } template void TranslatePoints<2>(const Eigen::Vector2f &translation, utility::device_vector<Eigen::Vector2f> &points, bool relative); template void TranslatePoints<3>(const Eigen::Vector3f &translation, utility::device_vector<Eigen::Vector3f> &points, bool relative); template void ScalePoints<2>(const float scale, utility::device_vector<Eigen::Vector2f> &points, bool center); template void ScalePoints<3>(const float scale, utility::device_vector<Eigen::Vector3f> &points, bool center); template <int Dim> void RotatePoints(const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { RotatePoints<Dim>(0, R, points, center); } template <int Dim> void RotatePoints(hipStream_t stream, const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = ComputeCenter<Dim>(points); } thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = R * (pt - points_center) + points_center; }); } template void RotatePoints<2>(const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); template void RotatePoints<2>(hipStream_t stream, const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(hipStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); void RotateNormals(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { RotateNormals(0, R, normals); } void RotateNormals(hipStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(), normals.end(), [=] __device__(Eigen::Vector3f & normal) { normal = R * normal; }); } Eigen::Matrix3f GetRotationMatrixFromXYZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYZX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZXY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromXZY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZYX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYXZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromAxisAngle( const Eigen::Vector3f &rotation) { const float phi = rotation.norm(); return Eigen::AngleAxisf(phi, rotation / phi).toRotationMatrix(); } Eigen::Matrix3f GetRotationMatrixFromQuaternion( const Eigen::Vector4f &rotation) { return Eigen::Quaternionf(rotation(0), rotation(1), rotation(2), rotation(3)) .normalized() .toRotationMatrix(); } } // namespace geometry } // namespace cupoch
67d3a335cc31208972dda8a8e8327c993ef477f6.cu
/** * Copyright (c) 2020 Neka-Nat * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. **/ #include <Eigen/Dense> #include "cupoch/geometry/geometry_utils.h" #include "cupoch/utility/console.h" namespace cupoch { namespace geometry { namespace { template <int Dim> struct transform_points_functor { transform_points_functor( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transform) : transform_(transform){}; const Eigen::Matrix<float, Dim + 1, Dim + 1> transform_; __device__ void operator()(Eigen::Matrix<float, Dim, 1> &pt) { const Eigen::Matrix<float, Dim + 1, 1> new_pt = transform_ * (Eigen::Matrix<float, Dim + 1, 1>() << pt, 1.0).finished(); pt = new_pt.template head<Dim>() / new_pt(Dim); } }; struct transform_normals_functor { transform_normals_functor(const Eigen::Matrix4f &transform) : transform_(transform){}; const Eigen::Matrix4f transform_; __device__ void operator()(Eigen::Vector3f &nl) { const Eigen::Vector4f new_pt = transform_ * Eigen::Vector4f(nl(0), nl(1), nl(2), 0.0); nl = new_pt.head<3>(); } }; } // namespace template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMinBound( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { return ComputeMinBound<Dim>(0, points); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMinBound( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero(); Eigen::Matrix<float, Dim, 1> init = points[0]; return thrust::reduce( utility::exec_policy(stream)->on(stream), points.begin(), points.end(), init, thrust::elementwise_minimum<Eigen::Matrix<float, Dim, 1>>()); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMaxBound( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { return ComputeMaxBound<Dim>(0, points); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeMaxBound( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { if (points.empty()) return Eigen::Matrix<float, Dim, 1>::Zero(); Eigen::Matrix<float, Dim, 1> init = points[0]; return thrust::reduce( utility::exec_policy(stream)->on(stream), points.begin(), points.end(), init, thrust::elementwise_maximum<Eigen::Matrix<float, Dim, 1>>()); } template <int Dim> Eigen::Matrix<float, Dim, 1> ComputeCenter( const utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { Eigen::Matrix<float, Dim, 1> init = Eigen::Matrix<float, Dim, 1>::Zero(); if (points.empty()) return init; Eigen::Matrix<float, Dim, 1> sum = thrust::reduce(points.begin(), points.end(), init, thrust::plus<Eigen::Matrix<float, Dim, 1>>()); return sum / points.size(); } template Eigen::Matrix<float, 2, 1> ComputeMinBound<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMinBound<2>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMinBound<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMinBound<3>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMaxBound<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeMaxBound<2>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMaxBound<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeMaxBound<3>( cudaStream_t stream, const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); template Eigen::Matrix<float, 2, 1> ComputeCenter<2>( const utility::device_vector<Eigen::Matrix<float, 2, 1>> &points); template Eigen::Matrix<float, 3, 1> ComputeCenter<3>( const utility::device_vector<Eigen::Matrix<float, 3, 1>> &points); void ResizeAndPaintUniformColor(utility::device_vector<Eigen::Vector3f> &colors, const size_t size, const Eigen::Vector3f &color) { colors.resize(size); Eigen::Vector3f clipped_color = color; if (color.minCoeff() < 0 || color.maxCoeff() > 1) { utility::LogWarning( "invalid color in PaintUniformColor, clipping to [0, 1]"); clipped_color = clipped_color.array() .max(Eigen::Vector3f(0, 0, 0).array()) .matrix(); clipped_color = clipped_color.array() .min(Eigen::Vector3f(1, 1, 1).array()) .matrix(); } thrust::fill(colors.begin(), colors.end(), clipped_color); } template <int Dim> void TransformPoints( const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { TransformPoints<Dim>(0, transformation, points); } template <int Dim> void TransformPoints( cudaStream_t stream, const Eigen::Matrix<float, Dim + 1, Dim + 1> &transformation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points) { transform_points_functor<Dim> func(transformation); thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(), points.end(), func); } template void TransformPoints<2>(const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<2>(cudaStream_t stream, const Eigen::Matrix3f &transformation, utility::device_vector<Eigen::Vector2f> &points); template void TransformPoints<3>(const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); template void TransformPoints<3>(cudaStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &points); void TransformNormals(const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { TransformNormals(0, transformation, normals); } void TransformNormals(cudaStream_t stream, const Eigen::Matrix4f &transformation, utility::device_vector<Eigen::Vector3f> &normals) { transform_normals_functor func(transformation); thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(), normals.end(), func); } template <int Dim> void TranslatePoints( const Eigen::Matrix<float, Dim, 1> &translation, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool relative) { Eigen::Matrix<float, Dim, 1> transform = translation; if (!relative) { transform -= ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt += transform; }); } template <int Dim> void ScalePoints(const float scale, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = ComputeCenter<Dim>(points); } thrust::for_each(points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = (pt - points_center) * scale + points_center; }); } template void TranslatePoints<2>(const Eigen::Vector2f &translation, utility::device_vector<Eigen::Vector2f> &points, bool relative); template void TranslatePoints<3>(const Eigen::Vector3f &translation, utility::device_vector<Eigen::Vector3f> &points, bool relative); template void ScalePoints<2>(const float scale, utility::device_vector<Eigen::Vector2f> &points, bool center); template void ScalePoints<3>(const float scale, utility::device_vector<Eigen::Vector3f> &points, bool center); template <int Dim> void RotatePoints(const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { RotatePoints<Dim>(0, R, points, center); } template <int Dim> void RotatePoints(cudaStream_t stream, const Eigen::Matrix<float, Dim, Dim> &R, utility::device_vector<Eigen::Matrix<float, Dim, 1>> &points, bool center) { Eigen::Matrix<float, Dim, 1> points_center = Eigen::Matrix<float, Dim, 1>::Zero(); if (center && !points.empty()) { points_center = ComputeCenter<Dim>(points); } thrust::for_each(utility::exec_policy(stream)->on(stream), points.begin(), points.end(), [=] __device__(Eigen::Matrix<float, Dim, 1> & pt) { pt = R * (pt - points_center) + points_center; }); } template void RotatePoints<2>(const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); template void RotatePoints<2>(cudaStream_t stream, const Eigen::Matrix2f &R, utility::device_vector<Eigen::Vector2f> &points, bool center); template void RotatePoints<3>(cudaStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &points, bool center); void RotateNormals(const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { RotateNormals(0, R, normals); } void RotateNormals(cudaStream_t stream, const Eigen::Matrix3f &R, utility::device_vector<Eigen::Vector3f> &normals) { thrust::for_each(utility::exec_policy(stream)->on(stream), normals.begin(), normals.end(), [=] __device__(Eigen::Vector3f & normal) { normal = R * normal; }); } Eigen::Matrix3f GetRotationMatrixFromXYZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYZX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZXY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromXZY(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixX(rotation(0)) * cupoch::utility::RotationMatrixZ(rotation(1)) * cupoch::utility::RotationMatrixY(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromZYX(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixZ(rotation(0)) * cupoch::utility::RotationMatrixY(rotation(1)) * cupoch::utility::RotationMatrixX(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromYXZ(const Eigen::Vector3f &rotation) { return cupoch::utility::RotationMatrixY(rotation(0)) * cupoch::utility::RotationMatrixX(rotation(1)) * cupoch::utility::RotationMatrixZ(rotation(2)); } Eigen::Matrix3f GetRotationMatrixFromAxisAngle( const Eigen::Vector3f &rotation) { const float phi = rotation.norm(); return Eigen::AngleAxisf(phi, rotation / phi).toRotationMatrix(); } Eigen::Matrix3f GetRotationMatrixFromQuaternion( const Eigen::Vector4f &rotation) { return Eigen::Quaternionf(rotation(0), rotation(1), rotation(2), rotation(3)) .normalized() .toRotationMatrix(); } } // namespace geometry } // namespace cupoch
373e085231b68d5165bcfc3ab9190f18e0cee856.hip
// !!! This is a file automatically generated by hipify!!! #include "simmap.cuh" #include "call.cuh" SimMap::SimMap() { //no device memory allocated until init is called during runtime. width = 0; height = 0; maxLayers = 0; } SimMap::SimMap(int iheight = 1, int iwidth = 1, int imaxLayers = 1) { init(iheight,iwidth,imaxLayers); } void SimMap::init(int iwidth, int iheight, int imaxLayers) { //set major constants width = iwidth; height = iheight; maxLayers = imaxLayers; // selectionMask = SimLayer(width, height, "mask"); waterLevel = SimLayer(width, height, "water"); waterLast = SimLayer(width, height, "waterLast"); particles = SimLayer(width, height, "particles"); sedimentation = SimLayer(width, height, "sedimentation"); soilHydrataion = SimLayer(width, height, "soilHydrataion"); waterFlowVert = SimLayer(width, height, "waterFlowVert"); waterFlowHor = SimLayer(width, height, "waterFlowHor"); waterCellFlowVert = SimLayer(width, height, "waterCellFlowVert"); waterCellFlowHor = SimLayer(width, height, "waterCellFlowHor"); maxslope = SimLayer(width, height, "Max Slope"); sum = SimLayer(width, height, "sum"); selectionMask.blankData(1); waterLevel.blankData(0); waterLast.blankData(0); particles.blankData(0); sedimentation.blankData(0); soilHydrataion.blankData(0); waterFlowVert.blankData(0); waterFlowHor.blankData(0); waterCellFlowVert.blankData(0); waterCellFlowHor.blankData(0); maxslope.blankData(0); sum.blankData(0); } void SimMap::passLayerListPointers() { passPolygons(); free(h_layerDataList); free(h_layerMatIndexList); int sizer = layers.size() + 14; h_layerDataList = new float*[sizer]; h_layerMatIndexList = new int[layers.size()]; //get data pointers up to date. h_layerDataList[layers.size()] = waterLevel.getDataPtr(); h_layerDataList[layers.size()+1] = selectionMask.getDataPtr(); h_layerDataList[layers.size()+2] = sum.getDataPtr(); h_layerDataList[layers.size() + 3] = waterLast.getDataPtr(); h_layerDataList[layers.size() + 4] = particles.getDataPtr(); h_layerDataList[layers.size() + 5] = sedimentation.getDataPtr(); h_layerDataList[layers.size() + 6] = soilHydrataion.getDataPtr(); h_layerDataList[layers.size() + 7] = waterFlowVert.getDataPtr(); h_layerDataList[layers.size() + 8] = waterFlowHor.getDataPtr(); h_layerDataList[layers.size() + 9] = waterCellFlowVert.getDataPtr(); h_layerDataList[layers.size() + 10] = waterCellFlowHor.getDataPtr(); h_layerDataList[layers.size() + 11] = maxslope.getDataPtr(); h_layerDataList[layers.size() + 12] = polySelect.getDeviceDataPtr(); h_layerDataList[layers.size() + 13] = waterSprinkler.getDeviceDataPtr(); for (int i = layers.size()-1; i >= 0; --i) { h_layerDataList[i] = layers[i].getDataPtr(); h_layerMatIndexList[i] = layers[i].getMaterialIdx(); } //copy the pointers to device. size_t datasize = sizeof(float*)*(sizer); size_t datasizeMat = sizeof(int)*layers.size(); hipDeviceSynchronize(); hipError_t err = hipFree(d_layerDataList); err = hipMalloc(&d_layerDataList, datasize); err = hipMemcpy(d_layerDataList, h_layerDataList, datasize, hipMemcpyHostToDevice); // err = hipFree(d_layerMatIndexList); err = hipMalloc(&d_layerMatIndexList, datasizeMat); err = hipMemcpy(d_layerMatIndexList, h_layerMatIndexList, datasizeMat, hipMemcpyHostToDevice); } void SimMap::passPolygons() { polySelect.passData(); waterSprinkler.passData(); } void SimMap::passMaterialListPointers() { free(h_materialDataList); h_materialDataList = new float*[materials.size()]; //get data pointers up to date. for (int i = 0; i < materials.size(); ++i) { materials[i].passMaterialData(); h_materialDataList[i] = materials[i].getDataPtr(); } //copy the pointers to device. int datasize = sizeof(float*)*materials.size(); hipFree(d_materialDataList); hipMalloc(&d_materialDataList, datasize); hipMemcpy(d_materialDataList, h_materialDataList, datasize, hipMemcpyHostToDevice); } void SimMap::setLayerData(int idx, float* data) { int ret = layers[idx].setData(data); passLayerListPointers(); } void SimMap::addMaterial(int idx, std::string name, std::string texturePath, SimMaterial::ErosionParam params) { //fix indeces for (int i = 0; i < layers.size(); ++i) { if (layers[i].getMaterialIdx() >= idx) { //increase the higher or equal layers by 1. layers[i].setMaterialIdx(layers[i].getMaterialIdx() + 1); } } SimMaterial material(name, texturePath, params); materials.insert(materials.begin() + idx, material); //update the gpu records. passMaterialListPointers(); } void SimMap::addLayer(int idx, std::string name) { SimLayer layer(width, height, name); layers.insert(layers.begin()+idx, layer); } void SimMap::addLayer(int idx, std::string name, int materialIdx) { SimLayer layer(width, height, name, materialIdx); layers.insert(layers.begin() + idx, layer); } SimLayer* SimMap::getLayer(int idx) { return &layers[idx]; } SimMaterial* SimMap::getMaterial(int idx) { return &materials[idx]; } bool SimMap::containsMaterial(std::string name) { for (int i = 0; i < materials.size(); ++i) { if (name.compare(materials[i].name) == 0) { return true; } } return false; } void SimMap::removeLayer() { layers.pop_back(); passLayerListPointers(); } void SimMap::removeLayer(int idx) { layers[idx].cleanUp(); layers.erase(layers.begin() + idx); passLayerListPointers(); } void SimMap::removeMaterial(int idx) { for (int i = 0; i < layers.size(); ++i) { if (layers[i].getMaterialIdx() > idx) { //decrease the higher layers by 1. layers[i].setMaterialIdx(layers[i].getMaterialIdx()-1); } else if (layers[i].getMaterialIdx() == idx) { //reset all layers using this material to 0 layers[i].setMaterialIdx(0); } } materials[idx].cleanUp(); materials.erase(materials.begin() + idx); passMaterialListPointers(); } int SimMap::getLayerCount() { return (int)layers.size(); } int SimMap::getWidth() { return width; } int SimMap::getHeight() { return height; } SimPolygon* SimMap::getPoly() { return &polySelect; } SimPolygon* SimMap::getSprinkler() { return &waterSprinkler; } int SimMap::getMaterialCount() { return materials.size(); } float** SimMap::getDeviceLayerDataList() { return d_layerDataList; } int* SimMap::getDeviceLayerMaterialIndexList() { return d_layerMatIndexList; } float** SimMap::getDeviceMaterialDataList() { return d_materialDataList; } float * SimMap::getMaskPtr() { return waterLevel.getDataPtr(); } float * SimMap::getWaterPtr() { return selectionMask.getDataPtr(); } void SimMap::setMutex(HANDLE handle) { simMutex = handle; } HANDLE SimMap::getMutex() { return simMutex; } void SimMap::callbackMove(int idx, bool up) { if (up) { iter_swap(layers.begin() + idx, layers.begin() + idx + 1); } else { iter_swap(layers.begin() + idx, layers.begin() + idx - 1); } passLayerListPointers(); } void SimMap::setLayerDataFromFile(const char * path, int idx, bool r, bool g, bool b, float scale){ //generate data float * data = new float[width*height]; cimg_library::CImg<unsigned char> img(path); int offset = img.height()*img.width(); for (unsigned int i = 0; i < height; ++i) { for (unsigned int j = 0; j < width; ++j) { int clampi = i % img.height(); int clampj = j % img.width(); data[width * i + j] = 0; if (r) data[width * i + j] += img[ img.width() * clampi + clampj] * scale/255.0f; if (g) data[width * i + j] += img[ offset + img.width() * clampi + clampj] * scale / 255.0f; if (b) data[width * i + j] += img[ offset * 2 + img.width() * clampi + clampj] * scale / 255.0f; } } // this->setLayerData(idx, data); free(data); } void __stdcall SimMap::callbackAdd(void *clientData, int idx, int rgb, float scale) { if (layers.size() >= maxLayers) { return; } std::string bmp_file = "data/"; bmp_file = bmp_file+(char *)clientData; //generate data // std::string layerName = (char *)clientData; if (layerName.find(".") != std::string::npos) { layerName = layerName.substr(0, layerName.find_last_of('.')); } this->addLayer(idx, layerName); // setLayerDataFromFile(bmp_file.c_str(), idx, rgb == 0 || rgb == 1, rgb == 0 || rgb == 2, rgb == 0 || rgb > 2, scale); passLayerListPointers(); } void SimMap::cleanseExistingLayers() { for (int i = 0; i < layers.size(); ++i) { layers[i].cleanUp(); } layers.clear(); hipFree(d_layerDataList); } void SimMap::callbackAddBlank(int idx) { std::string layerName = "blank layer"; this->addLayer(idx, layerName, 0); layers[idx].blankData(0); passLayerListPointers(); } void __stdcall SimMap::callbackAddMaterial(void *clientData, int idx) { std::string bmp_file = "tex/"; bmp_file = bmp_file + (char *)clientData; // std::string matName = "Material "; matName += std::to_string((int)materials.size()); // SimMaterial::ErosionParam params = {1,15,1,1}; // this->addMaterial(materials.size(), matName, bmp_file, params); } void SimMap::callbackRemove(int idx) { removeLayer(idx); } void SimMap::callbackRemoveMaterial(int idx) { removeMaterial(idx); } SimMap::~SimMap() { for (int i = 0; i < layers.size(); ++i) { layers[i].cleanUp(); } hipFree(d_layerDataList); printf(">>> Freeing SimMap with up to %d layers.\n", maxLayers); delete[] h_layerDataList; layers.clear(); }
373e085231b68d5165bcfc3ab9190f18e0cee856.cu
#include "simmap.cuh" #include "call.cuh" SimMap::SimMap() { //no device memory allocated until init is called during runtime. width = 0; height = 0; maxLayers = 0; } SimMap::SimMap(int iheight = 1, int iwidth = 1, int imaxLayers = 1) { init(iheight,iwidth,imaxLayers); } void SimMap::init(int iwidth, int iheight, int imaxLayers) { //set major constants width = iwidth; height = iheight; maxLayers = imaxLayers; // selectionMask = SimLayer(width, height, "mask"); waterLevel = SimLayer(width, height, "water"); waterLast = SimLayer(width, height, "waterLast"); particles = SimLayer(width, height, "particles"); sedimentation = SimLayer(width, height, "sedimentation"); soilHydrataion = SimLayer(width, height, "soilHydrataion"); waterFlowVert = SimLayer(width, height, "waterFlowVert"); waterFlowHor = SimLayer(width, height, "waterFlowHor"); waterCellFlowVert = SimLayer(width, height, "waterCellFlowVert"); waterCellFlowHor = SimLayer(width, height, "waterCellFlowHor"); maxslope = SimLayer(width, height, "Max Slope"); sum = SimLayer(width, height, "sum"); selectionMask.blankData(1); waterLevel.blankData(0); waterLast.blankData(0); particles.blankData(0); sedimentation.blankData(0); soilHydrataion.blankData(0); waterFlowVert.blankData(0); waterFlowHor.blankData(0); waterCellFlowVert.blankData(0); waterCellFlowHor.blankData(0); maxslope.blankData(0); sum.blankData(0); } void SimMap::passLayerListPointers() { passPolygons(); free(h_layerDataList); free(h_layerMatIndexList); int sizer = layers.size() + 14; h_layerDataList = new float*[sizer]; h_layerMatIndexList = new int[layers.size()]; //get data pointers up to date. h_layerDataList[layers.size()] = waterLevel.getDataPtr(); h_layerDataList[layers.size()+1] = selectionMask.getDataPtr(); h_layerDataList[layers.size()+2] = sum.getDataPtr(); h_layerDataList[layers.size() + 3] = waterLast.getDataPtr(); h_layerDataList[layers.size() + 4] = particles.getDataPtr(); h_layerDataList[layers.size() + 5] = sedimentation.getDataPtr(); h_layerDataList[layers.size() + 6] = soilHydrataion.getDataPtr(); h_layerDataList[layers.size() + 7] = waterFlowVert.getDataPtr(); h_layerDataList[layers.size() + 8] = waterFlowHor.getDataPtr(); h_layerDataList[layers.size() + 9] = waterCellFlowVert.getDataPtr(); h_layerDataList[layers.size() + 10] = waterCellFlowHor.getDataPtr(); h_layerDataList[layers.size() + 11] = maxslope.getDataPtr(); h_layerDataList[layers.size() + 12] = polySelect.getDeviceDataPtr(); h_layerDataList[layers.size() + 13] = waterSprinkler.getDeviceDataPtr(); for (int i = layers.size()-1; i >= 0; --i) { h_layerDataList[i] = layers[i].getDataPtr(); h_layerMatIndexList[i] = layers[i].getMaterialIdx(); } //copy the pointers to device. size_t datasize = sizeof(float*)*(sizer); size_t datasizeMat = sizeof(int)*layers.size(); cudaDeviceSynchronize(); cudaError_t err = cudaFree(d_layerDataList); err = cudaMalloc(&d_layerDataList, datasize); err = cudaMemcpy(d_layerDataList, h_layerDataList, datasize, cudaMemcpyHostToDevice); // err = cudaFree(d_layerMatIndexList); err = cudaMalloc(&d_layerMatIndexList, datasizeMat); err = cudaMemcpy(d_layerMatIndexList, h_layerMatIndexList, datasizeMat, cudaMemcpyHostToDevice); } void SimMap::passPolygons() { polySelect.passData(); waterSprinkler.passData(); } void SimMap::passMaterialListPointers() { free(h_materialDataList); h_materialDataList = new float*[materials.size()]; //get data pointers up to date. for (int i = 0; i < materials.size(); ++i) { materials[i].passMaterialData(); h_materialDataList[i] = materials[i].getDataPtr(); } //copy the pointers to device. int datasize = sizeof(float*)*materials.size(); cudaFree(d_materialDataList); cudaMalloc(&d_materialDataList, datasize); cudaMemcpy(d_materialDataList, h_materialDataList, datasize, cudaMemcpyHostToDevice); } void SimMap::setLayerData(int idx, float* data) { int ret = layers[idx].setData(data); passLayerListPointers(); } void SimMap::addMaterial(int idx, std::string name, std::string texturePath, SimMaterial::ErosionParam params) { //fix indeces for (int i = 0; i < layers.size(); ++i) { if (layers[i].getMaterialIdx() >= idx) { //increase the higher or equal layers by 1. layers[i].setMaterialIdx(layers[i].getMaterialIdx() + 1); } } SimMaterial material(name, texturePath, params); materials.insert(materials.begin() + idx, material); //update the gpu records. passMaterialListPointers(); } void SimMap::addLayer(int idx, std::string name) { SimLayer layer(width, height, name); layers.insert(layers.begin()+idx, layer); } void SimMap::addLayer(int idx, std::string name, int materialIdx) { SimLayer layer(width, height, name, materialIdx); layers.insert(layers.begin() + idx, layer); } SimLayer* SimMap::getLayer(int idx) { return &layers[idx]; } SimMaterial* SimMap::getMaterial(int idx) { return &materials[idx]; } bool SimMap::containsMaterial(std::string name) { for (int i = 0; i < materials.size(); ++i) { if (name.compare(materials[i].name) == 0) { return true; } } return false; } void SimMap::removeLayer() { layers.pop_back(); passLayerListPointers(); } void SimMap::removeLayer(int idx) { layers[idx].cleanUp(); layers.erase(layers.begin() + idx); passLayerListPointers(); } void SimMap::removeMaterial(int idx) { for (int i = 0; i < layers.size(); ++i) { if (layers[i].getMaterialIdx() > idx) { //decrease the higher layers by 1. layers[i].setMaterialIdx(layers[i].getMaterialIdx()-1); } else if (layers[i].getMaterialIdx() == idx) { //reset all layers using this material to 0 layers[i].setMaterialIdx(0); } } materials[idx].cleanUp(); materials.erase(materials.begin() + idx); passMaterialListPointers(); } int SimMap::getLayerCount() { return (int)layers.size(); } int SimMap::getWidth() { return width; } int SimMap::getHeight() { return height; } SimPolygon* SimMap::getPoly() { return &polySelect; } SimPolygon* SimMap::getSprinkler() { return &waterSprinkler; } int SimMap::getMaterialCount() { return materials.size(); } float** SimMap::getDeviceLayerDataList() { return d_layerDataList; } int* SimMap::getDeviceLayerMaterialIndexList() { return d_layerMatIndexList; } float** SimMap::getDeviceMaterialDataList() { return d_materialDataList; } float * SimMap::getMaskPtr() { return waterLevel.getDataPtr(); } float * SimMap::getWaterPtr() { return selectionMask.getDataPtr(); } void SimMap::setMutex(HANDLE handle) { simMutex = handle; } HANDLE SimMap::getMutex() { return simMutex; } void SimMap::callbackMove(int idx, bool up) { if (up) { iter_swap(layers.begin() + idx, layers.begin() + idx + 1); } else { iter_swap(layers.begin() + idx, layers.begin() + idx - 1); } passLayerListPointers(); } void SimMap::setLayerDataFromFile(const char * path, int idx, bool r, bool g, bool b, float scale){ //generate data float * data = new float[width*height]; cimg_library::CImg<unsigned char> img(path); int offset = img.height()*img.width(); for (unsigned int i = 0; i < height; ++i) { for (unsigned int j = 0; j < width; ++j) { int clampi = i % img.height(); int clampj = j % img.width(); data[width * i + j] = 0; if (r) data[width * i + j] += img[ img.width() * clampi + clampj] * scale/255.0f; if (g) data[width * i + j] += img[ offset + img.width() * clampi + clampj] * scale / 255.0f; if (b) data[width * i + j] += img[ offset * 2 + img.width() * clampi + clampj] * scale / 255.0f; } } // this->setLayerData(idx, data); free(data); } void __stdcall SimMap::callbackAdd(void *clientData, int idx, int rgb, float scale) { if (layers.size() >= maxLayers) { return; } std::string bmp_file = "data/"; bmp_file = bmp_file+(char *)clientData; //generate data // std::string layerName = (char *)clientData; if (layerName.find(".") != std::string::npos) { layerName = layerName.substr(0, layerName.find_last_of('.')); } this->addLayer(idx, layerName); // setLayerDataFromFile(bmp_file.c_str(), idx, rgb == 0 || rgb == 1, rgb == 0 || rgb == 2, rgb == 0 || rgb > 2, scale); passLayerListPointers(); } void SimMap::cleanseExistingLayers() { for (int i = 0; i < layers.size(); ++i) { layers[i].cleanUp(); } layers.clear(); cudaFree(d_layerDataList); } void SimMap::callbackAddBlank(int idx) { std::string layerName = "blank layer"; this->addLayer(idx, layerName, 0); layers[idx].blankData(0); passLayerListPointers(); } void __stdcall SimMap::callbackAddMaterial(void *clientData, int idx) { std::string bmp_file = "tex/"; bmp_file = bmp_file + (char *)clientData; // std::string matName = "Material "; matName += std::to_string((int)materials.size()); // SimMaterial::ErosionParam params = {1,15,1,1}; // this->addMaterial(materials.size(), matName, bmp_file, params); } void SimMap::callbackRemove(int idx) { removeLayer(idx); } void SimMap::callbackRemoveMaterial(int idx) { removeMaterial(idx); } SimMap::~SimMap() { for (int i = 0; i < layers.size(); ++i) { layers[i].cleanUp(); } cudaFree(d_layerDataList); printf(">>> Freeing SimMap with up to %d layers.\n", maxLayers); delete[] h_layerDataList; layers.clear(); }
mat_copy.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> #include "array2d.h" #include "cuda_helper.h" #include "mat_bench.h" #define value_t double #define index_t int __global__ void kernel(index_t Nx, index_t Ny, value_t *x, value_t *y) { // grid moves along last index first // int N_grid_j = (Ny + blockDim.y - 1) / blockDim.y; // int grid_i = blockIdx.x / N_grid_j; // int grid_j = blockIdx.x - grid_i * N_grid_j; // grid moves along first index first int N_grid_i = (Nx + blockDim.x - 1) / blockDim.x; int grid_j = blockIdx.x / N_grid_i; int grid_i = blockIdx.x - grid_j * N_grid_i; int i = grid_i * blockDim.x + threadIdx.x; int j = grid_j * blockDim.y + threadIdx.y; int gid = i * Ny + j; if (i < Nx && j < Ny) y[gid] = x[gid]; } struct mat_copy : public mat_bench<value_t, index_t> { void benchmark() { print_bench(); std::cout << "\nSimulation info: 2d mat copy\n"; value_t **x = create_array2d<value_t, index_t>(side_size, side_size); value_t **y = create_array2d<value_t, index_t>(side_size, side_size); #pragma omp parallel for for (index_t i = 0; i < side_size; i++) { for (index_t j = 0; j < side_size; j++) { x[i][j] = 1.0; y[i][j] = 0.0; } } value_t *d_x, *d_y; value_t *h_x = x[0], *h_y = y[0]; checkCudaErrors(hipMalloc(&d_x, total_size * sizeof(value_t))); checkCudaErrors(hipMalloc(&d_y, total_size * sizeof(value_t))); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipMemcpy(d_x, h_x, total_size * sizeof(value_t), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_y, h_y, total_size * sizeof(value_t), hipMemcpyHostToDevice)); dim3 blockd3 = dim3(block0, block1, 1); dim3 grid = calc_grid2d2(blockd3, side_size, side_size); std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n" << " Grid size: " << grid.x << "\n\n"; loops = 0; auto startcpu = std::chrono::high_resolution_clock::now(); checkCudaErrors(hipEventRecord(start)); while ((std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::high_resolution_clock::now() - startcpu) .count()) < 1000.0 * benchtime) { hipLaunchKernelGGL(( kernel), dim3(grid), dim3(blockd3), 0, 0, side_size, side_size, d_x, d_y); checkCudaErrorsAfterKernels; loops++; } checkCudaErrors(hipEventRecord(stop)); checkCudaErrors(hipEventSynchronize(stop)); float du = 0; checkCudaErrors(hipEventElapsedTime(&du, start, stop)); duration = 1.0e-3 * du; checkCudaErrors(hipMemcpy(h_y, d_y, total_size * sizeof(value_t), hipMemcpyDeviceToHost)); test_result(y, value_t(total_size)); print_performance(); delete[] x; delete[] y; checkCudaErrors(hipFree(d_x)); checkCudaErrors(hipFree(d_y)); } mat_copy(int narg, char **arg) : mat_bench<value_t, index_t>(narg, arg) { memory_transfer_per_loop = 2.0 * sizeof(value_t) * double(total_size) / (1024.0 * 1024.0 * 1024.0); } }; int main(int narg, char **arg) { check_cuda_device(); mat_copy test(narg, arg); test.benchmark(); }
mat_copy.cu
#include <chrono> #include "array2d.h" #include "cuda_helper.h" #include "mat_bench.h" #define value_t double #define index_t int __global__ void kernel(index_t Nx, index_t Ny, value_t *x, value_t *y) { // grid moves along last index first // int N_grid_j = (Ny + blockDim.y - 1) / blockDim.y; // int grid_i = blockIdx.x / N_grid_j; // int grid_j = blockIdx.x - grid_i * N_grid_j; // grid moves along first index first int N_grid_i = (Nx + blockDim.x - 1) / blockDim.x; int grid_j = blockIdx.x / N_grid_i; int grid_i = blockIdx.x - grid_j * N_grid_i; int i = grid_i * blockDim.x + threadIdx.x; int j = grid_j * blockDim.y + threadIdx.y; int gid = i * Ny + j; if (i < Nx && j < Ny) y[gid] = x[gid]; } struct mat_copy : public mat_bench<value_t, index_t> { void benchmark() { print_bench(); std::cout << "\nSimulation info: 2d mat copy\n"; value_t **x = create_array2d<value_t, index_t>(side_size, side_size); value_t **y = create_array2d<value_t, index_t>(side_size, side_size); #pragma omp parallel for for (index_t i = 0; i < side_size; i++) { for (index_t j = 0; j < side_size; j++) { x[i][j] = 1.0; y[i][j] = 0.0; } } value_t *d_x, *d_y; value_t *h_x = x[0], *h_y = y[0]; checkCudaErrors(cudaMalloc(&d_x, total_size * sizeof(value_t))); checkCudaErrors(cudaMalloc(&d_y, total_size * sizeof(value_t))); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaMemcpy(d_x, h_x, total_size * sizeof(value_t), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_y, h_y, total_size * sizeof(value_t), cudaMemcpyHostToDevice)); dim3 blockd3 = dim3(block0, block1, 1); dim3 grid = calc_grid2d2(blockd3, side_size, side_size); std::cout << " Block: " << blockd3.x << "(x) X " << blockd3.y << "(y)\n" << " Grid size: " << grid.x << "\n\n"; loops = 0; auto startcpu = std::chrono::high_resolution_clock::now(); checkCudaErrors(cudaEventRecord(start)); while ((std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::high_resolution_clock::now() - startcpu) .count()) < 1000.0 * benchtime) { kernel<<<grid, blockd3>>>(side_size, side_size, d_x, d_y); checkCudaErrorsAfterKernels; loops++; } checkCudaErrors(cudaEventRecord(stop)); checkCudaErrors(cudaEventSynchronize(stop)); float du = 0; checkCudaErrors(cudaEventElapsedTime(&du, start, stop)); duration = 1.0e-3 * du; checkCudaErrors(cudaMemcpy(h_y, d_y, total_size * sizeof(value_t), cudaMemcpyDeviceToHost)); test_result(y, value_t(total_size)); print_performance(); delete[] x; delete[] y; checkCudaErrors(cudaFree(d_x)); checkCudaErrors(cudaFree(d_y)); } mat_copy(int narg, char **arg) : mat_bench<value_t, index_t>(narg, arg) { memory_transfer_per_loop = 2.0 * sizeof(value_t) * double(total_size) / (1024.0 * 1024.0 * 1024.0); } }; int main(int narg, char **arg) { check_cuda_device(); mat_copy test(narg, arg); test.benchmark(); }
43b57dafe7f267cf7dfafa06d62223f842b0b0ac.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #include <stdio.h> #include <math.h> /* ceil */ // Max Threads per block in GeForce 210 #define TxB 512 //Kernel correccin gamma __global__ void gamma_correction_kernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma) { // El mapeo de los componentes uchar4 aRGBA es: // .x -> R ; .y -> G ; .z -> B ; .w -> A //Nota: Ignoramos el canal alfa int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < numRows*numCols){ uchar4 px = rgbaImage[i]; // thread pixel to process //printf( "Antes: R: %i G: %i B %i \n",px.x,px.y,px.z ); unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma) ) * 255.0f); outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor; unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma) ) * 255.0f); outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor; unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma) ) * 255.0f); outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor; //printf( "Despues: R: %i G: %i B %i \n",outputImage[i].x,outputImage[i].y,outputImage[i].z ); } } void gamma_correction(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma) { // Dado que no importa la posicion relativa de los pixels // en este algoritmo, la estrategia para asignar hilos a // bloques y rejillas sera sencillamente la de cubrir // a todos los pixeles con hebras en el eje X long long int total_px = numRows * numCols; // total pixels long int grids_n = ceil(total_px / TxB); // grids numer const dim3 blockSize(TxB, 1, 1); const dim3 gridSize(grids_n, 1, 1); hipLaunchKernelGGL(( gamma_correction_kernel), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_outputImage, numRows, numCols, gamma); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
43b57dafe7f267cf7dfafa06d62223f842b0b0ac.cu
#include "utils.h" #include <stdio.h> #include <math.h> /* ceil */ // Max Threads per block in GeForce 210 #define TxB 512 //Kernel corrección gamma __global__ void gamma_correction_kernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma) { // El mapeo de los componentes uchar4 aRGBA es: // .x -> R ; .y -> G ; .z -> B ; .w -> A //Nota: Ignoramos el canal alfa int i = blockIdx.x * blockDim.x + threadIdx.x; if( i < numRows*numCols){ uchar4 px = rgbaImage[i]; // thread pixel to process //printf( "Antes: R: %i G: %i B %i \n",px.x,px.y,px.z ); unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma) ) * 255.0f); outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor; unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma) ) * 255.0f); outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor; unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma) ) * 255.0f); outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor; //printf( "Despues: R: %i G: %i B %i \n",outputImage[i].x,outputImage[i].y,outputImage[i].z ); } } void gamma_correction(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma) { // Dado que no importa la posicion relativa de los pixels // en este algoritmo, la estrategia para asignar hilos a // bloques y rejillas sera sencillamente la de cubrir // a todos los pixeles con hebras en el eje X long long int total_px = numRows * numCols; // total pixels long int grids_n = ceil(total_px / TxB); // grids numer const dim3 blockSize(TxB, 1, 1); const dim3 gridSize(grids_n, 1, 1); gamma_correction_kernel<<<gridSize, blockSize>>>(d_rgbaImage, d_outputImage, numRows, numCols, gamma); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
4392c61c4293952988c90cea6246284b63338e59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" #define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 __global__ void filter(int *Input_Image, int *Output_Image, int Image_Width, int Image_Height) { int surround[9]; int iterator; const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if( (x >= (Image_Width - 1)) || (y >= Image_Height - 1) || (x == 0) || (y == 0)) return; // --- Fill array private to the threads iterator = 0; for (int r = x - 1; r <= x + 1; r++) { for (int c = y - 1; c <= y + 1; c++) { surround[iterator] = Input_Image[c*Image_Width+r]; iterator++; } } // --- Sort private array to find the median using Bubble Short for (int i=0; i<5; ++i) { // --- Find the position of the minimum element int minval=i; for (int l=i+1; l<9; ++l) if (surround[l] < surround[minval]) minval=l; // --- Put found minimum element in its place int temp = surround[i]; surround[i]=surround[minval]; surround[minval]=temp; } // --- Pick the middle one Output_Image[(y*Image_Width)+x]=surround[4]; }
4392c61c4293952988c90cea6246284b63338e59.cu
extern "C" #define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 __global__ void filter(int *Input_Image, int *Output_Image, int Image_Width, int Image_Height) { int surround[9]; int iterator; const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if( (x >= (Image_Width - 1)) || (y >= Image_Height - 1) || (x == 0) || (y == 0)) return; // --- Fill array private to the threads iterator = 0; for (int r = x - 1; r <= x + 1; r++) { for (int c = y - 1; c <= y + 1; c++) { surround[iterator] = Input_Image[c*Image_Width+r]; iterator++; } } // --- Sort private array to find the median using Bubble Short for (int i=0; i<5; ++i) { // --- Find the position of the minimum element int minval=i; for (int l=i+1; l<9; ++l) if (surround[l] < surround[minval]) minval=l; // --- Put found minimum element in its place int temp = surround[i]; surround[i]=surround[minval]; surround[minval]=temp; } // --- Pick the middle one Output_Image[(y*Image_Width)+x]=surround[4]; }
b3b8b72d685991f971ab7fbd1d27b0c700c27028.hip
// !!! This is a file automatically generated by hipify!!! #include <gdf/gdf.h> #include <gdf/utils.h> #include <gdf/errorutils.h> #include "rmm.h" #include <hipcub/hipcub.hpp> struct RadixSortPlan{ const size_t num_items; // temporary storage void *storage; size_t storage_bytes; void *back_key, *back_val; size_t back_key_size, back_val_size; hipStream_t stream; int descending; unsigned begin_bit, end_bit; RadixSortPlan(size_t num_items, int descending, unsigned begin_bit, unsigned end_bit) : num_items(num_items), storage(nullptr), storage_bytes(0), back_key(nullptr), back_val(nullptr), back_key_size(0), back_val_size(0), stream(0), descending(descending), begin_bit(begin_bit), end_bit(end_bit) {} gdf_error setup(size_t sizeof_key, size_t sizeof_val) { back_key_size = num_items * sizeof_key; back_val_size = num_items * sizeof_val; RMM_TRY( rmmAlloc(&back_key, back_key_size, stream) ); // TODO: non-default stream RMM_TRY( rmmAlloc(&back_val, back_val_size, stream) ); return GDF_SUCCESS; } gdf_error teardown() { RMM_TRY( rmmFree(back_key, stream) ); RMM_TRY( rmmFree(back_val, stream) ); RMM_TRY( rmmFree(storage, stream) ); return GDF_SUCCESS; } }; template <typename Tk, typename Tv> struct RadixSort { static gdf_error sort( RadixSortPlan *plan, Tk *d_key_buf, Tv *d_value_buf) { unsigned num_items = plan->num_items; Tk *d_key_alt_buf = (Tk*)plan->back_key; Tv *d_value_alt_buf = (Tv*)plan->back_val; hipStream_t stream = plan->stream; int descending = plan->descending; unsigned begin_bit = plan->begin_bit; unsigned end_bit = plan->end_bit; cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf); if (d_value_buf) { // Sort KeyValue pairs cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf); if (descending) { hipcub::DeviceRadixSort::SortPairsDescending(plan->storage, plan->storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, stream); } else { hipcub::DeviceRadixSort::SortPairs( plan->storage, plan->storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); if (plan->storage && d_value_buf != d_values.Current()){ hipMemcpyAsync(d_value_buf, d_value_alt_buf, num_items * sizeof(Tv), hipMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // Sort Keys only if (descending) { hipcub::DeviceRadixSort::SortKeysDescending( plan->storage, plan->storage_bytes, d_keys, num_items, begin_bit, end_bit, stream ); CUDA_CHECK_LAST() } else { hipcub::DeviceRadixSort::SortKeys( plan->storage, plan->storage_bytes, d_keys, num_items, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); } if ( plan->storage ) { // We have operated and the result is not in front buffer if (d_key_buf != d_keys.Current()){ hipMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk), hipMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // We have not operated. // Just checking for temporary storage requirement RMM_TRY( rmmAlloc(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream CUDA_CHECK_LAST(); // Now that we have allocated, do real work. return sort(plan, d_key_buf, d_value_buf); } return GDF_SUCCESS; } }; gdf_radixsort_plan_type* cffi_wrap(RadixSortPlan* obj){ return reinterpret_cast<gdf_radixsort_plan_type*>(obj); } RadixSortPlan* cffi_unwrap(gdf_radixsort_plan_type* hdl){ return reinterpret_cast<RadixSortPlan*>(hdl); } gdf_radixsort_plan_type* gdf_radixsort_plan(size_t num_items, int descending, unsigned begin_bit, unsigned end_bit){ return cffi_wrap(new RadixSortPlan(num_items, descending, begin_bit, end_bit)); } gdf_error gdf_radixsort_plan_setup(gdf_radixsort_plan_type *hdl, size_t sizeof_key, size_t sizeof_val) { return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val); } gdf_error gdf_radixsort_plan_free(gdf_radixsort_plan_type *hdl) { auto plan = cffi_unwrap(hdl); gdf_error status = plan->teardown(); delete plan; return status; } #define WRAP(Fn, Tk, Tv) \ gdf_error gdf_radixsort_##Fn(gdf_radixsort_plan_type *hdl, \ gdf_column *keycol, \ gdf_column *valcol) \ { \ /* validity mask must be empty */ \ GDF_REQUIRE(!keycol->valid, GDF_VALIDITY_UNSUPPORTED); \ GDF_REQUIRE(!valcol->valid, GDF_VALIDITY_UNSUPPORTED); \ /* size of columns must match */ \ GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH); \ RadixSortPlan *plan = cffi_unwrap(hdl); \ /* num_items must match */ \ GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH); \ /* back buffer size must match */ \ GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size, \ GDF_COLUMN_SIZE_MISMATCH); \ GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size, \ GDF_COLUMN_SIZE_MISMATCH); \ /* Do sort */ \ return RadixSort<Tk, Tv>::sort(plan, \ (Tk*)keycol->data, (Tv*)valcol->data); \ } WRAP(f32, float, int64_t) WRAP(f64, double, int64_t) WRAP(i8, int8_t, int64_t) WRAP(i32, int32_t, int64_t) WRAP(i64, int64_t, int64_t) gdf_error gdf_radixsort_generic(gdf_radixsort_plan_type *hdl, gdf_column *keycol, gdf_column *valcol) { GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE); // dispatch table switch ( keycol->dtype ) { case GDF_INT8: return gdf_radixsort_i8(hdl, keycol, valcol); case GDF_INT32: return gdf_radixsort_i32(hdl, keycol, valcol); case GDF_INT64: return gdf_radixsort_i64(hdl, keycol, valcol); case GDF_FLOAT32: return gdf_radixsort_f32(hdl, keycol, valcol); case GDF_FLOAT64: return gdf_radixsort_f64(hdl, keycol, valcol); default: return GDF_UNSUPPORTED_DTYPE; } }
b3b8b72d685991f971ab7fbd1d27b0c700c27028.cu
#include <gdf/gdf.h> #include <gdf/utils.h> #include <gdf/errorutils.h> #include "rmm.h" #include <cub/device/device_radix_sort.cuh> struct RadixSortPlan{ const size_t num_items; // temporary storage void *storage; size_t storage_bytes; void *back_key, *back_val; size_t back_key_size, back_val_size; cudaStream_t stream; int descending; unsigned begin_bit, end_bit; RadixSortPlan(size_t num_items, int descending, unsigned begin_bit, unsigned end_bit) : num_items(num_items), storage(nullptr), storage_bytes(0), back_key(nullptr), back_val(nullptr), back_key_size(0), back_val_size(0), stream(0), descending(descending), begin_bit(begin_bit), end_bit(end_bit) {} gdf_error setup(size_t sizeof_key, size_t sizeof_val) { back_key_size = num_items * sizeof_key; back_val_size = num_items * sizeof_val; RMM_TRY( rmmAlloc(&back_key, back_key_size, stream) ); // TODO: non-default stream RMM_TRY( rmmAlloc(&back_val, back_val_size, stream) ); return GDF_SUCCESS; } gdf_error teardown() { RMM_TRY( rmmFree(back_key, stream) ); RMM_TRY( rmmFree(back_val, stream) ); RMM_TRY( rmmFree(storage, stream) ); return GDF_SUCCESS; } }; template <typename Tk, typename Tv> struct RadixSort { static gdf_error sort( RadixSortPlan *plan, Tk *d_key_buf, Tv *d_value_buf) { unsigned num_items = plan->num_items; Tk *d_key_alt_buf = (Tk*)plan->back_key; Tv *d_value_alt_buf = (Tv*)plan->back_val; cudaStream_t stream = plan->stream; int descending = plan->descending; unsigned begin_bit = plan->begin_bit; unsigned end_bit = plan->end_bit; cub::DoubleBuffer<Tk> d_keys(d_key_buf, d_key_alt_buf); if (d_value_buf) { // Sort KeyValue pairs cub::DoubleBuffer<Tv> d_values(d_value_buf, d_value_alt_buf); if (descending) { cub::DeviceRadixSort::SortPairsDescending(plan->storage, plan->storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, stream); } else { cub::DeviceRadixSort::SortPairs( plan->storage, plan->storage_bytes, d_keys, d_values, num_items, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); if (plan->storage && d_value_buf != d_values.Current()){ cudaMemcpyAsync(d_value_buf, d_value_alt_buf, num_items * sizeof(Tv), cudaMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // Sort Keys only if (descending) { cub::DeviceRadixSort::SortKeysDescending( plan->storage, plan->storage_bytes, d_keys, num_items, begin_bit, end_bit, stream ); CUDA_CHECK_LAST() } else { cub::DeviceRadixSort::SortKeys( plan->storage, plan->storage_bytes, d_keys, num_items, begin_bit, end_bit, stream ); } CUDA_CHECK_LAST(); } if ( plan->storage ) { // We have operated and the result is not in front buffer if (d_key_buf != d_keys.Current()){ cudaMemcpyAsync(d_key_buf, d_key_alt_buf, num_items * sizeof(Tk), cudaMemcpyDeviceToDevice, stream); CUDA_CHECK_LAST(); } } else { // We have not operated. // Just checking for temporary storage requirement RMM_TRY( rmmAlloc(&plan->storage, plan->storage_bytes, plan->stream) ); // TODO: non-default stream CUDA_CHECK_LAST(); // Now that we have allocated, do real work. return sort(plan, d_key_buf, d_value_buf); } return GDF_SUCCESS; } }; gdf_radixsort_plan_type* cffi_wrap(RadixSortPlan* obj){ return reinterpret_cast<gdf_radixsort_plan_type*>(obj); } RadixSortPlan* cffi_unwrap(gdf_radixsort_plan_type* hdl){ return reinterpret_cast<RadixSortPlan*>(hdl); } gdf_radixsort_plan_type* gdf_radixsort_plan(size_t num_items, int descending, unsigned begin_bit, unsigned end_bit){ return cffi_wrap(new RadixSortPlan(num_items, descending, begin_bit, end_bit)); } gdf_error gdf_radixsort_plan_setup(gdf_radixsort_plan_type *hdl, size_t sizeof_key, size_t sizeof_val) { return cffi_unwrap(hdl)->setup(sizeof_key, sizeof_val); } gdf_error gdf_radixsort_plan_free(gdf_radixsort_plan_type *hdl) { auto plan = cffi_unwrap(hdl); gdf_error status = plan->teardown(); delete plan; return status; } #define WRAP(Fn, Tk, Tv) \ gdf_error gdf_radixsort_##Fn(gdf_radixsort_plan_type *hdl, \ gdf_column *keycol, \ gdf_column *valcol) \ { \ /* validity mask must be empty */ \ GDF_REQUIRE(!keycol->valid, GDF_VALIDITY_UNSUPPORTED); \ GDF_REQUIRE(!valcol->valid, GDF_VALIDITY_UNSUPPORTED); \ /* size of columns must match */ \ GDF_REQUIRE(keycol->size == valcol->size, GDF_COLUMN_SIZE_MISMATCH); \ RadixSortPlan *plan = cffi_unwrap(hdl); \ /* num_items must match */ \ GDF_REQUIRE(plan->num_items == keycol->size, GDF_COLUMN_SIZE_MISMATCH); \ /* back buffer size must match */ \ GDF_REQUIRE(sizeof(Tk) * plan->num_items == plan->back_key_size, \ GDF_COLUMN_SIZE_MISMATCH); \ GDF_REQUIRE(sizeof(Tv) * plan->num_items == plan->back_val_size, \ GDF_COLUMN_SIZE_MISMATCH); \ /* Do sort */ \ return RadixSort<Tk, Tv>::sort(plan, \ (Tk*)keycol->data, (Tv*)valcol->data); \ } WRAP(f32, float, int64_t) WRAP(f64, double, int64_t) WRAP(i8, int8_t, int64_t) WRAP(i32, int32_t, int64_t) WRAP(i64, int64_t, int64_t) gdf_error gdf_radixsort_generic(gdf_radixsort_plan_type *hdl, gdf_column *keycol, gdf_column *valcol) { GDF_REQUIRE(valcol->dtype == GDF_INT64, GDF_UNSUPPORTED_DTYPE); // dispatch table switch ( keycol->dtype ) { case GDF_INT8: return gdf_radixsort_i8(hdl, keycol, valcol); case GDF_INT32: return gdf_radixsort_i32(hdl, keycol, valcol); case GDF_INT64: return gdf_radixsort_i64(hdl, keycol, valcol); case GDF_FLOAT32: return gdf_radixsort_f32(hdl, keycol, valcol); case GDF_FLOAT64: return gdf_radixsort_f64(hdl, keycol, valcol); default: return GDF_UNSUPPORTED_DTYPE; } }
0e14df053312c164f1300094363fb7d8a0c3a502.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2021 by XGBoost Contributors */ #include <thrust/scan.h> #include <hipcub/hipcub.hpp> #include <cassert> #include <limits> #include <memory> #include <utility> #include <tuple> #include "rabit/rabit.h" #include "xgboost/span.h" #include "xgboost/data.h" #include "auc.h" #include "../common/device_helpers.cuh" #include "../common/ranking_utils.cuh" namespace xgboost { namespace metric { namespace { template <typename T> using Discard = thrust::discard_iterator<T>; struct GetWeightOp { common::Span<float const> weights; common::Span<size_t const> sorted_idx; __device__ float operator()(size_t i) const { return weights.empty() ? 1.0f : weights[sorted_idx[i]]; } }; } // namespace /** * A cache to GPU data to avoid reallocating memory. */ struct DeviceAUCCache { // Pair of FP/TP using Pair = thrust::pair<float, float>; // index sorted by prediction value dh::device_vector<size_t> sorted_idx; // track FP/TP for computation on trapesoid area dh::device_vector<Pair> fptp; // track FP_PREV/TP_PREV for computation on trapesoid area dh::device_vector<Pair> neg_pos; // index of unique prediction values. dh::device_vector<size_t> unique_idx; // p^T: transposed prediction matrix, used by MultiClassAUC dh::device_vector<float> predts_t; std::unique_ptr<dh::AllReducer> reducer; void Init(common::Span<float const> predts, bool is_multi, int32_t device) { if (sorted_idx.size() != predts.size()) { sorted_idx.resize(predts.size()); fptp.resize(sorted_idx.size()); unique_idx.resize(sorted_idx.size()); neg_pos.resize(sorted_idx.size()); if (is_multi) { predts_t.resize(sorted_idx.size()); } } if (is_multi && !reducer) { reducer.reset(new dh::AllReducer); reducer->Init(device); } } }; /** * The GPU implementation uses same calculation as CPU with a few more steps to distribute * work across threads: * * - Run scan to obtain TP/FP values, which are right coordinates of trapesoid. * - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value, * which are left coordinates of trapesoids. * - Reduce the scan array into 1 AUC value. */ std::tuple<float, float, float> GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); dh::safe_cuda(hipSetDevice(device)); CHECK(!labels.empty()); CHECK_EQ(labels.size(), predts.size()); /** * Create sorted index for each class */ auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::ArgSort<false>(predts, d_sorted_idx); /** * Linear scan */ auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; float label = labels[idx]; float w = get_weight(i); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT auto d_fptp = dh::ToSpan(cache->fptp); dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return predts[d_sorted_idx[i]]; }); auto end_unique = thrust::unique_by_key_copy( thrust::hip::par(alloc), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), thrust::make_discard_iterator(), dh::tbegin(d_unique_idx)); d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx)); dh::InclusiveScan( dh::tbegin(d_fptp), dh::tbegin(d_fptp), [=] __device__(Pair const &l, Pair const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }, d_fptp.size()); auto d_neg_pos = dh::ToSpan(cache->neg_pos); // scatter unique negaive/positive values // shift to right by 1 with initial value being 0 dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] == 0) { // first unique index is 0 assert(i == 0); d_neg_pos[0] = {0, 0}; return; } d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == d_unique_idx.size() - 1) { // last one needs to be included, may override above assignment if the last // prediction value is distinct from previous one. d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1]; return; } }); auto in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { float fp, tp; float fp_prev, tp_prev; if (i == 0) { // handle the last element thrust::tie(fp, tp) = d_fptp.back(); thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } return TrapesoidArea(fp_prev, fp, tp_prev, tp); }); Pair last = cache->fptp.back(); float auc = thrust::reduce(thrust::hip::par(alloc), in, in + d_unique_idx.size()); return std::make_tuple(last.first, last.second, auc); } void Transpose(common::Span<float const> in, common::Span<float> out, size_t m, size_t n, int32_t device) { CHECK_EQ(in.size(), out.size()); CHECK_EQ(in.size(), m * n); dh::LaunchN(in.size(), [=] __device__(size_t i) { size_t col = i / m; size_t row = i % m; size_t idx = row * n + col; out[i] = in[idx]; }); } /** * Last index of a group in a CSR style of index pointer. */ template <typename Idx> XGBOOST_DEVICE size_t LastOf(size_t group, common::Span<Idx> indptr) { return indptr[group + 1] - 1; } float ScaleClasses(common::Span<float> results, common::Span<float> local_area, common::Span<float> fp, common::Span<float> tp, common::Span<float> auc, std::shared_ptr<DeviceAUCCache> cache, size_t n_classes) { dh::XGBDeviceAllocator<char> alloc; if (rabit::IsDistributed()) { CHECK_EQ(dh::CudaGetPointerDevice(results.data()), dh::CurrentDevice()); cache->reducer->AllReduceSum(results.data(), results.data(), results.size()); } auto reduce_in = dh::MakeTransformIterator<thrust::pair<float, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { if (local_area[i] > 0) { return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]); } return thrust::make_pair(std::numeric_limits<float>::quiet_NaN(), 0.0f); }); float tp_sum; float auc_sum; thrust::tie(auc_sum, tp_sum) = thrust::reduce( thrust::hip::par(alloc), reduce_in, reduce_in + n_classes, thrust::make_pair(0.0f, 0.0f), [=] __device__(auto const &l, auto const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }); if (tp_sum != 0 && !std::isnan(auc_sum)) { auc_sum /= tp_sum; } else { return std::numeric_limits<float>::quiet_NaN(); } return auc_sum; } /** * MultiClass implementation is similar to binary classification, except we need to split * up each class in all kernels. */ float GPUMultiClassAUCOVR(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache>* p_cache, size_t n_classes) { dh::safe_cuda(hipSetDevice(device)); auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, true, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); size_t n_samples = labels.size(); if (n_samples == 0) { dh::TemporaryArray<float> resutls(n_classes * 4, 0.0f); auto d_results = dh::ToSpan(resutls); dh::LaunchN(n_classes * 4, [=] __device__(size_t i) { d_results[i] = 0.0f; }); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } /** * Create sorted index for each class */ auto d_predts_t = dh::ToSpan(cache->predts_t); Transpose(predts, d_predts_t, n_samples, n_classes, device); dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0); auto d_class_ptr = dh::ToSpan(class_ptr); dh::LaunchN(n_classes + 1, [=] __device__(size_t i) { d_class_ptr[i] = i * n_samples; }); // no out-of-place sort for thrust, cub sort doesn't accept general iterator. So can't // use transform iterator in sorting. auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx); /** * Linear scan */ dh::caching_device_vector<float> d_auc(n_classes, 0); auto s_d_auc = dh::ToSpan(d_auc); auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto d_fptp = dh::ToSpan(cache->fptp); auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; size_t class_id = i / n_samples; // labels is a vector of size n_samples. float label = labels[idx % n_samples] == class_id; float w = weights.empty() ? 1.0f : weights[d_sorted_idx[i] % n_samples]; float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); /** * Handle duplicated predictions */ dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; float predt = d_predts_t[d_sorted_idx[i]]; return thrust::make_pair(class_id, predt); }); // unique values are sparse, so we need a CSR style indptr dh::TemporaryArray<uint32_t> unique_class_ptr(class_ptr.size()); auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr); auto n_uniques = dh::SegmentedUniqueByKey( thrust::hip::par(alloc), dh::tbegin(d_class_ptr), dh::tend(d_class_ptr), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), d_unique_class_ptr.data(), dh::tbegin(d_unique_idx), thrust::equal_to<thrust::pair<uint32_t, float>>{}); d_unique_idx = d_unique_idx.subspan(0, n_uniques); using Triple = thrust::tuple<uint32_t, float, float>; // expand to tuple to include class id auto fptp_it_in = dh::MakeTransformIterator<Triple>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return thrust::make_tuple(i, d_fptp[i].first, d_fptp[i].second); }); // shrink down to pair auto fptp_it_out = thrust::make_transform_output_iterator( dh::TypedDiscard<Triple>{}, [d_fptp] __device__(Triple const &t) { d_fptp[thrust::get<0>(t)] = thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t)); return t; }); dh::InclusiveScan( fptp_it_in, fptp_it_out, [=] __device__(Triple const &l, Triple const &r) { uint32_t l_cid = thrust::get<0>(l) / n_samples; uint32_t r_cid = thrust::get<0>(r) / n_samples; if (l_cid != r_cid) { return r; } return Triple(thrust::get<0>(r), thrust::get<1>(l) + thrust::get<1>(r), // fp thrust::get<2>(l) + thrust::get<2>(r)); // tp }, d_fptp.size()); // scatter unique FP_PREV/TP_PREV values auto d_neg_pos = dh::ToSpan(cache->neg_pos); // When dataset is not empty, each class must have at least 1 (unique) sample // prediction, so no need to handle special case. dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0 assert(d_unique_idx[i] % n_samples == 0); d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i return; } uint32_t class_id = d_unique_idx[i] / n_samples; d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == LastOf(class_id, d_unique_class_ptr)) { // last one needs to be included. size_t last = d_unique_idx[LastOf(class_id, d_unique_class_ptr)]; d_neg_pos[LastOf(class_id, d_class_ptr)] = d_fptp[last - 1]; return; } }); /** * Reduce the result for each class */ auto key_in = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; return class_id; }); auto val_in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; float fp, tp; float fp_prev, tp_prev; if (i == d_unique_class_ptr[class_id]) { // first item is ignored, we use this thread to calculate the last item thrust::tie(fp, tp) = d_fptp[class_id * n_samples + (n_samples - 1)]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[LastOf(class_id, d_unique_class_ptr)]]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } float auc = TrapesoidArea(fp_prev, fp, tp_prev, tp); return auc; }); thrust::reduce_by_key(thrust::hip::par(alloc), key_in, key_in + d_unique_idx.size(), val_in, thrust::make_discard_iterator(), d_auc.begin()); /** * Scale the classes with number of samples for each class. */ dh::TemporaryArray<float> resutls(n_classes * 4); auto d_results = dh::ToSpan(resutls); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); dh::LaunchN(n_classes, [=] __device__(size_t c) { auc[c] = s_d_auc[c]; auto last = d_fptp[n_samples * c + (n_samples - 1)]; fp[c] = last.first; tp[c] = last.second; local_area[c] = last.first * last.second; }); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } namespace { struct RankScanItem { size_t idx; float predt; float w; bst_group_t group_id; }; } // anonymous namespace std::pair<float, uint32_t> GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_); dh::XGBCachingDeviceAllocator<char> alloc; auto d_group_ptr = dh::ToSpan(group_ptr); /** * Validate the dataset */ auto check_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; }); size_t n_valid = thrust::count_if( thrust::hip::par(alloc), check_it, check_it + group_ptr.size() - 1, [=] __device__(size_t len) { return len >= 3; }); if (n_valid < info.group_ptr_.size() - 1) { InvalidGroupAUC(); } if (n_valid == 0) { return std::make_pair(0.0f, 0); } /** * Sort the labels */ auto d_labels = info.labels_.ConstDeviceSpan(); auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_labels, d_group_ptr, d_sorted_idx); auto d_weights = info.weights_.ConstDeviceSpan(); dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0); auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr); // Use max to represent triangle auto n_threads = common::SegmentedTrapezoidThreads( d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max()); // get the coordinate in nested summation auto get_i_j = [=]__device__(size_t idx, size_t query_group_idx) { auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; auto thread_group_begin = d_threads_group_ptr[query_group_idx]; auto idx_in_thread_group = idx - thread_group_begin; size_t i, j; common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j); // we use global index among all groups for sorted idx, so i, j should also be global // index. i += data_group_begin; j += data_group_begin; return thrust::make_pair(i, j); }; // NOLINT auto in = dh::MakeTransformIterator<RankScanItem>( thrust::make_counting_iterator(0), [=] __device__(size_t idx) { bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx); auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; if (n_samples < 3) { // at least 3 documents are required. return RankScanItem{idx, 0, 0, query_group_idx}; } size_t i, j; thrust::tie(i, j) = get_i_j(idx, query_group_idx); float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]]; float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]); if (predt > 0) { predt = 1.0; } else if (predt == 0) { predt = 0.5; } else { predt = 0; } predt *= w; return RankScanItem{idx, predt, w, query_group_idx}; }); dh::TemporaryArray<float> d_auc(group_ptr.size() - 1); auto s_d_auc = dh::ToSpan(d_auc); auto out = thrust::make_transform_output_iterator( dh::TypedDiscard<RankScanItem>{}, [=] __device__(RankScanItem const &item) -> RankScanItem { auto group_id = item.group_id; assert(group_id < d_group_ptr.size()); auto data_group_begin = d_group_ptr[group_id]; size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin; // last item of current group if (item.idx == LastOf(group_id, d_threads_group_ptr)) { if (item.w > 0) { s_d_auc[group_id] = item.predt / item.w; } else { s_d_auc[group_id] = 0; } } return {}; // discard }); dh::InclusiveScan( in, out, [] __device__(RankScanItem const &l, RankScanItem const &r) { if (l.group_id != r.group_id) { return r; } return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id}; }, n_threads); /** * Scale the AUC with number of items in each group. */ float auc = thrust::reduce(thrust::hip::par(alloc), dh::tbegin(s_d_auc), dh::tend(s_d_auc), 0.0f); return std::make_pair(auc, n_valid); } } // namespace metric } // namespace xgboost
0e14df053312c164f1300094363fb7d8a0c3a502.cu
/*! * Copyright 2021 by XGBoost Contributors */ #include <thrust/scan.h> #include <cub/cub.cuh> #include <cassert> #include <limits> #include <memory> #include <utility> #include <tuple> #include "rabit/rabit.h" #include "xgboost/span.h" #include "xgboost/data.h" #include "auc.h" #include "../common/device_helpers.cuh" #include "../common/ranking_utils.cuh" namespace xgboost { namespace metric { namespace { template <typename T> using Discard = thrust::discard_iterator<T>; struct GetWeightOp { common::Span<float const> weights; common::Span<size_t const> sorted_idx; __device__ float operator()(size_t i) const { return weights.empty() ? 1.0f : weights[sorted_idx[i]]; } }; } // namespace /** * A cache to GPU data to avoid reallocating memory. */ struct DeviceAUCCache { // Pair of FP/TP using Pair = thrust::pair<float, float>; // index sorted by prediction value dh::device_vector<size_t> sorted_idx; // track FP/TP for computation on trapesoid area dh::device_vector<Pair> fptp; // track FP_PREV/TP_PREV for computation on trapesoid area dh::device_vector<Pair> neg_pos; // index of unique prediction values. dh::device_vector<size_t> unique_idx; // p^T: transposed prediction matrix, used by MultiClassAUC dh::device_vector<float> predts_t; std::unique_ptr<dh::AllReducer> reducer; void Init(common::Span<float const> predts, bool is_multi, int32_t device) { if (sorted_idx.size() != predts.size()) { sorted_idx.resize(predts.size()); fptp.resize(sorted_idx.size()); unique_idx.resize(sorted_idx.size()); neg_pos.resize(sorted_idx.size()); if (is_multi) { predts_t.resize(sorted_idx.size()); } } if (is_multi && !reducer) { reducer.reset(new dh::AllReducer); reducer->Init(device); } } }; /** * The GPU implementation uses same calculation as CPU with a few more steps to distribute * work across threads: * * - Run scan to obtain TP/FP values, which are right coordinates of trapesoid. * - Find distinct prediction values and get the corresponding FP_PREV/TP_PREV value, * which are left coordinates of trapesoids. * - Reduce the scan array into 1 AUC value. */ std::tuple<float, float, float> GPUBinaryAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); dh::safe_cuda(cudaSetDevice(device)); CHECK(!labels.empty()); CHECK_EQ(labels.size(), predts.size()); /** * Create sorted index for each class */ auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::ArgSort<false>(predts, d_sorted_idx); /** * Linear scan */ auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; float label = labels[idx]; float w = get_weight(i); float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT auto d_fptp = dh::ToSpan(cache->fptp); dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return predts[d_sorted_idx[i]]; }); auto end_unique = thrust::unique_by_key_copy( thrust::cuda::par(alloc), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), thrust::make_discard_iterator(), dh::tbegin(d_unique_idx)); d_unique_idx = d_unique_idx.subspan(0, end_unique.second - dh::tbegin(d_unique_idx)); dh::InclusiveScan( dh::tbegin(d_fptp), dh::tbegin(d_fptp), [=] __device__(Pair const &l, Pair const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }, d_fptp.size()); auto d_neg_pos = dh::ToSpan(cache->neg_pos); // scatter unique negaive/positive values // shift to right by 1 with initial value being 0 dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] == 0) { // first unique index is 0 assert(i == 0); d_neg_pos[0] = {0, 0}; return; } d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == d_unique_idx.size() - 1) { // last one needs to be included, may override above assignment if the last // prediction value is distinct from previous one. d_neg_pos.back() = d_fptp[d_unique_idx[i] - 1]; return; } }); auto in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { float fp, tp; float fp_prev, tp_prev; if (i == 0) { // handle the last element thrust::tie(fp, tp) = d_fptp.back(); thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx.back()]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } return TrapesoidArea(fp_prev, fp, tp_prev, tp); }); Pair last = cache->fptp.back(); float auc = thrust::reduce(thrust::cuda::par(alloc), in, in + d_unique_idx.size()); return std::make_tuple(last.first, last.second, auc); } void Transpose(common::Span<float const> in, common::Span<float> out, size_t m, size_t n, int32_t device) { CHECK_EQ(in.size(), out.size()); CHECK_EQ(in.size(), m * n); dh::LaunchN(in.size(), [=] __device__(size_t i) { size_t col = i / m; size_t row = i % m; size_t idx = row * n + col; out[i] = in[idx]; }); } /** * Last index of a group in a CSR style of index pointer. */ template <typename Idx> XGBOOST_DEVICE size_t LastOf(size_t group, common::Span<Idx> indptr) { return indptr[group + 1] - 1; } float ScaleClasses(common::Span<float> results, common::Span<float> local_area, common::Span<float> fp, common::Span<float> tp, common::Span<float> auc, std::shared_ptr<DeviceAUCCache> cache, size_t n_classes) { dh::XGBDeviceAllocator<char> alloc; if (rabit::IsDistributed()) { CHECK_EQ(dh::CudaGetPointerDevice(results.data()), dh::CurrentDevice()); cache->reducer->AllReduceSum(results.data(), results.data(), results.size()); } auto reduce_in = dh::MakeTransformIterator<thrust::pair<float, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { if (local_area[i] > 0) { return thrust::make_pair(auc[i] / local_area[i] * tp[i], tp[i]); } return thrust::make_pair(std::numeric_limits<float>::quiet_NaN(), 0.0f); }); float tp_sum; float auc_sum; thrust::tie(auc_sum, tp_sum) = thrust::reduce( thrust::cuda::par(alloc), reduce_in, reduce_in + n_classes, thrust::make_pair(0.0f, 0.0f), [=] __device__(auto const &l, auto const &r) { return thrust::make_pair(l.first + r.first, l.second + r.second); }); if (tp_sum != 0 && !std::isnan(auc_sum)) { auc_sum /= tp_sum; } else { return std::numeric_limits<float>::quiet_NaN(); } return auc_sum; } /** * MultiClass implementation is similar to binary classification, except we need to split * up each class in all kernels. */ float GPUMultiClassAUCOVR(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache>* p_cache, size_t n_classes) { dh::safe_cuda(cudaSetDevice(device)); auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, true, device); auto labels = info.labels_.ConstDeviceSpan(); auto weights = info.weights_.ConstDeviceSpan(); size_t n_samples = labels.size(); if (n_samples == 0) { dh::TemporaryArray<float> resutls(n_classes * 4, 0.0f); auto d_results = dh::ToSpan(resutls); dh::LaunchN(n_classes * 4, [=] __device__(size_t i) { d_results[i] = 0.0f; }); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } /** * Create sorted index for each class */ auto d_predts_t = dh::ToSpan(cache->predts_t); Transpose(predts, d_predts_t, n_samples, n_classes, device); dh::TemporaryArray<uint32_t> class_ptr(n_classes + 1, 0); auto d_class_ptr = dh::ToSpan(class_ptr); dh::LaunchN(n_classes + 1, [=] __device__(size_t i) { d_class_ptr[i] = i * n_samples; }); // no out-of-place sort for thrust, cub sort doesn't accept general iterator. So can't // use transform iterator in sorting. auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_predts_t, d_class_ptr, d_sorted_idx); /** * Linear scan */ dh::caching_device_vector<float> d_auc(n_classes, 0); auto s_d_auc = dh::ToSpan(d_auc); auto get_weight = GetWeightOp{weights, d_sorted_idx}; using Pair = thrust::pair<float, float>; auto d_fptp = dh::ToSpan(cache->fptp); auto get_fp_tp = [=]__device__(size_t i) { size_t idx = d_sorted_idx[i]; size_t class_id = i / n_samples; // labels is a vector of size n_samples. float label = labels[idx % n_samples] == class_id; float w = weights.empty() ? 1.0f : weights[d_sorted_idx[i] % n_samples]; float fp = (1.0 - label) * w; float tp = label * w; return thrust::make_pair(fp, tp); }; // NOLINT dh::LaunchN(d_sorted_idx.size(), [=] __device__(size_t i) { d_fptp[i] = get_fp_tp(i); }); /** * Handle duplicated predictions */ dh::XGBDeviceAllocator<char> alloc; auto d_unique_idx = dh::ToSpan(cache->unique_idx); dh::Iota(d_unique_idx); auto uni_key = dh::MakeTransformIterator<thrust::pair<uint32_t, float>>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { uint32_t class_id = i / n_samples; float predt = d_predts_t[d_sorted_idx[i]]; return thrust::make_pair(class_id, predt); }); // unique values are sparse, so we need a CSR style indptr dh::TemporaryArray<uint32_t> unique_class_ptr(class_ptr.size()); auto d_unique_class_ptr = dh::ToSpan(unique_class_ptr); auto n_uniques = dh::SegmentedUniqueByKey( thrust::cuda::par(alloc), dh::tbegin(d_class_ptr), dh::tend(d_class_ptr), uni_key, uni_key + d_sorted_idx.size(), dh::tbegin(d_unique_idx), d_unique_class_ptr.data(), dh::tbegin(d_unique_idx), thrust::equal_to<thrust::pair<uint32_t, float>>{}); d_unique_idx = d_unique_idx.subspan(0, n_uniques); using Triple = thrust::tuple<uint32_t, float, float>; // expand to tuple to include class id auto fptp_it_in = dh::MakeTransformIterator<Triple>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return thrust::make_tuple(i, d_fptp[i].first, d_fptp[i].second); }); // shrink down to pair auto fptp_it_out = thrust::make_transform_output_iterator( dh::TypedDiscard<Triple>{}, [d_fptp] __device__(Triple const &t) { d_fptp[thrust::get<0>(t)] = thrust::make_pair(thrust::get<1>(t), thrust::get<2>(t)); return t; }); dh::InclusiveScan( fptp_it_in, fptp_it_out, [=] __device__(Triple const &l, Triple const &r) { uint32_t l_cid = thrust::get<0>(l) / n_samples; uint32_t r_cid = thrust::get<0>(r) / n_samples; if (l_cid != r_cid) { return r; } return Triple(thrust::get<0>(r), thrust::get<1>(l) + thrust::get<1>(r), // fp thrust::get<2>(l) + thrust::get<2>(r)); // tp }, d_fptp.size()); // scatter unique FP_PREV/TP_PREV values auto d_neg_pos = dh::ToSpan(cache->neg_pos); // When dataset is not empty, each class must have at least 1 (unique) sample // prediction, so no need to handle special case. dh::LaunchN(d_unique_idx.size(), [=] __device__(size_t i) { if (d_unique_idx[i] % n_samples == 0) { // first unique index is 0 assert(d_unique_idx[i] % n_samples == 0); d_neg_pos[d_unique_idx[i]] = {0, 0}; // class_id * n_samples = i return; } uint32_t class_id = d_unique_idx[i] / n_samples; d_neg_pos[d_unique_idx[i]] = d_fptp[d_unique_idx[i] - 1]; if (i == LastOf(class_id, d_unique_class_ptr)) { // last one needs to be included. size_t last = d_unique_idx[LastOf(class_id, d_unique_class_ptr)]; d_neg_pos[LastOf(class_id, d_class_ptr)] = d_fptp[last - 1]; return; } }); /** * Reduce the result for each class */ auto key_in = dh::MakeTransformIterator<uint32_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; return class_id; }); auto val_in = dh::MakeTransformIterator<float>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { size_t class_id = d_unique_idx[i] / n_samples; float fp, tp; float fp_prev, tp_prev; if (i == d_unique_class_ptr[class_id]) { // first item is ignored, we use this thread to calculate the last item thrust::tie(fp, tp) = d_fptp[class_id * n_samples + (n_samples - 1)]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[LastOf(class_id, d_unique_class_ptr)]]; } else { thrust::tie(fp, tp) = d_fptp[d_unique_idx[i] - 1]; thrust::tie(fp_prev, tp_prev) = d_neg_pos[d_unique_idx[i - 1]]; } float auc = TrapesoidArea(fp_prev, fp, tp_prev, tp); return auc; }); thrust::reduce_by_key(thrust::cuda::par(alloc), key_in, key_in + d_unique_idx.size(), val_in, thrust::make_discard_iterator(), d_auc.begin()); /** * Scale the classes with number of samples for each class. */ dh::TemporaryArray<float> resutls(n_classes * 4); auto d_results = dh::ToSpan(resutls); auto local_area = d_results.subspan(0, n_classes); auto fp = d_results.subspan(n_classes, n_classes); auto tp = d_results.subspan(2 * n_classes, n_classes); auto auc = d_results.subspan(3 * n_classes, n_classes); dh::LaunchN(n_classes, [=] __device__(size_t c) { auc[c] = s_d_auc[c]; auto last = d_fptp[n_samples * c + (n_samples - 1)]; fp[c] = last.first; tp[c] = last.second; local_area[c] = last.first * last.second; }); return ScaleClasses(d_results, local_area, fp, tp, auc, cache, n_classes); } namespace { struct RankScanItem { size_t idx; float predt; float w; bst_group_t group_id; }; } // anonymous namespace std::pair<float, uint32_t> GPURankingAUC(common::Span<float const> predts, MetaInfo const &info, int32_t device, std::shared_ptr<DeviceAUCCache> *p_cache) { auto& cache = *p_cache; if (!cache) { cache.reset(new DeviceAUCCache); } cache->Init(predts, false, device); dh::caching_device_vector<bst_group_t> group_ptr(info.group_ptr_); dh::XGBCachingDeviceAllocator<char> alloc; auto d_group_ptr = dh::ToSpan(group_ptr); /** * Validate the dataset */ auto check_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0), [=] __device__(size_t i) { return d_group_ptr[i + 1] - d_group_ptr[i]; }); size_t n_valid = thrust::count_if( thrust::cuda::par(alloc), check_it, check_it + group_ptr.size() - 1, [=] __device__(size_t len) { return len >= 3; }); if (n_valid < info.group_ptr_.size() - 1) { InvalidGroupAUC(); } if (n_valid == 0) { return std::make_pair(0.0f, 0); } /** * Sort the labels */ auto d_labels = info.labels_.ConstDeviceSpan(); auto d_sorted_idx = dh::ToSpan(cache->sorted_idx); dh::SegmentedArgSort<false>(d_labels, d_group_ptr, d_sorted_idx); auto d_weights = info.weights_.ConstDeviceSpan(); dh::caching_device_vector<size_t> threads_group_ptr(group_ptr.size(), 0); auto d_threads_group_ptr = dh::ToSpan(threads_group_ptr); // Use max to represent triangle auto n_threads = common::SegmentedTrapezoidThreads( d_group_ptr, d_threads_group_ptr, std::numeric_limits<size_t>::max()); // get the coordinate in nested summation auto get_i_j = [=]__device__(size_t idx, size_t query_group_idx) { auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; auto thread_group_begin = d_threads_group_ptr[query_group_idx]; auto idx_in_thread_group = idx - thread_group_begin; size_t i, j; common::UnravelTrapeziodIdx(idx_in_thread_group, n_samples, &i, &j); // we use global index among all groups for sorted idx, so i, j should also be global // index. i += data_group_begin; j += data_group_begin; return thrust::make_pair(i, j); }; // NOLINT auto in = dh::MakeTransformIterator<RankScanItem>( thrust::make_counting_iterator(0), [=] __device__(size_t idx) { bst_group_t query_group_idx = dh::SegmentId(d_threads_group_ptr, idx); auto data_group_begin = d_group_ptr[query_group_idx]; size_t n_samples = d_group_ptr[query_group_idx + 1] - data_group_begin; if (n_samples < 3) { // at least 3 documents are required. return RankScanItem{idx, 0, 0, query_group_idx}; } size_t i, j; thrust::tie(i, j) = get_i_j(idx, query_group_idx); float predt = predts[d_sorted_idx[i]] - predts[d_sorted_idx[j]]; float w = common::Sqr(d_weights.empty() ? 1.0f : d_weights[query_group_idx]); if (predt > 0) { predt = 1.0; } else if (predt == 0) { predt = 0.5; } else { predt = 0; } predt *= w; return RankScanItem{idx, predt, w, query_group_idx}; }); dh::TemporaryArray<float> d_auc(group_ptr.size() - 1); auto s_d_auc = dh::ToSpan(d_auc); auto out = thrust::make_transform_output_iterator( dh::TypedDiscard<RankScanItem>{}, [=] __device__(RankScanItem const &item) -> RankScanItem { auto group_id = item.group_id; assert(group_id < d_group_ptr.size()); auto data_group_begin = d_group_ptr[group_id]; size_t n_samples = d_group_ptr[group_id + 1] - data_group_begin; // last item of current group if (item.idx == LastOf(group_id, d_threads_group_ptr)) { if (item.w > 0) { s_d_auc[group_id] = item.predt / item.w; } else { s_d_auc[group_id] = 0; } } return {}; // discard }); dh::InclusiveScan( in, out, [] __device__(RankScanItem const &l, RankScanItem const &r) { if (l.group_id != r.group_id) { return r; } return RankScanItem{r.idx, l.predt + r.predt, l.w + r.w, l.group_id}; }, n_threads); /** * Scale the AUC with number of items in each group. */ float auc = thrust::reduce(thrust::cuda::par(alloc), dh::tbegin(s_d_auc), dh::tend(s_d_auc), 0.0f); return std::make_pair(auc, n_valid); } } // namespace metric } // namespace xgboost
4a5c5d3d8eec805ee150c1e939fc54b18ce96cdd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ctime> #include <cstdio> #include <cmath> #include "../Interpreter.h" #include "../m_global/S_Int.h" #include "../m_global/S_Address.h" #include "../m_global/I_AsArray.h" #include "../m_cuda/M_Cuda.h" #include "../m_cuda/S_Dim3.h" #include "M_Gauss.h" // ============================================================================= // Kernels #define EPSILON 1E-6 __global__ void pivot(int num_rows, int num_cols, float *A, int pivot_row, int pivot_col) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; int row = idx / num_cols; int col = idx % num_cols; // If thread isn't in matrix, return if (row >= num_rows || col >= num_cols) return; int pivot_index = pivot_row * num_cols + pivot_col; float pivot_coeff = A[pivot_index]; // If pivot coeff is 0, don't do anything if (fabs(pivot_coeff) < EPSILON) return; // Normalize pivot row if (row == pivot_row) { A[idx] /= pivot_coeff; } // Synchronize so other threads can pick up the normalized coefficients __threadfence(); float pivot_row_cur_col_coeff = A[pivot_row*num_cols + col]; float cur_row_pivot_col_coeff = A[row*num_cols + pivot_col]; // Eliminate pivot if (row == pivot_row) return; else if (fabs(cur_row_pivot_col_coeff) < EPSILON) return; else A[idx] += -cur_row_pivot_col_coeff * pivot_row_cur_col_coeff; } // ============================================================================= // Words // ( floats num_rows num_cols -- addr ) class W_GpuMatrix : public Word { public: W_GpuMatrix(string name) : Word(name) {}; virtual void Execute(Interpreter *interp) { int num_cols = AsInt(interp->StackPop()); int num_rows = AsInt(interp->StackPop()); auto numbers = AsArray(interp->StackPop()); int num_elements = num_rows * num_cols; int num_bytes = num_elements * sizeof(float); // Allocate memory void* result; auto res = hipMallocManaged((void**)&result, num_bytes); checkCudaCall(res, __FILE__, __LINE__); // Set values float* dst = (float*)result; for (int i=0; i < numbers.size(); i++) { dst[i] = AsFloat(numbers[i]); } interp->StackPush(S_Address::New(result)); } }; // ( num_rows num_cols addr -- ) class W_PrintMatrix : public Word { public: W_PrintMatrix(string name) : Word(name) {}; virtual void Execute(Interpreter *interp) { float* A = AsFloatStar(interp->StackPop()); int num_cols = AsInt(interp->StackPop()); int num_rows = AsInt(interp->StackPop()); interp->Run("CUDA-DEVICE-SYNCHRONIZE"); for (int r=0; r < num_rows; r++) { for (int c=0; c < num_cols; c++) { int index = c + num_cols*r; printf("%6.2f ", A[index]); } printf("\n"); } } }; // ( grid block num_rows num_cols addr pivot_row pivot_col -- ) class W_Pivot : public Word { public: W_Pivot(string name) : Word(name) {}; virtual void Execute(Interpreter *interp) { int pivot_col = AsInt(interp->StackPop()); int pivot_row = AsInt(interp->StackPop()); auto A = AsFloatStar(interp->StackPop()); int num_cols = AsInt(interp->StackPop()); int num_rows = AsInt(interp->StackPop()); dim3 block = AsDim3(interp->StackPop()); dim3 grid = AsDim3(interp->StackPop()); hipLaunchKernelGGL(( pivot), dim3(grid), dim3(block), 0, 0, num_rows, num_cols, A, pivot_row, pivot_col); } }; // ============================================================================= // M_Gauss M_Gauss::M_Gauss() : Module("gauss") { AddWord(new W_GpuMatrix("GPU-MATRIX")); AddWord(new W_PrintMatrix("PRINT-MATRIX")); AddWord(new W_Pivot("PIVOT")); }
4a5c5d3d8eec805ee150c1e939fc54b18ce96cdd.cu
#include <ctime> #include <cstdio> #include <cmath> #include "../Interpreter.h" #include "../m_global/S_Int.h" #include "../m_global/S_Address.h" #include "../m_global/I_AsArray.h" #include "../m_cuda/M_Cuda.h" #include "../m_cuda/S_Dim3.h" #include "M_Gauss.h" // ============================================================================= // Kernels #define EPSILON 1E-6 __global__ void pivot(int num_rows, int num_cols, float *A, int pivot_row, int pivot_col) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; int row = idx / num_cols; int col = idx % num_cols; // If thread isn't in matrix, return if (row >= num_rows || col >= num_cols) return; int pivot_index = pivot_row * num_cols + pivot_col; float pivot_coeff = A[pivot_index]; // If pivot coeff is 0, don't do anything if (fabs(pivot_coeff) < EPSILON) return; // Normalize pivot row if (row == pivot_row) { A[idx] /= pivot_coeff; } // Synchronize so other threads can pick up the normalized coefficients __threadfence(); float pivot_row_cur_col_coeff = A[pivot_row*num_cols + col]; float cur_row_pivot_col_coeff = A[row*num_cols + pivot_col]; // Eliminate pivot if (row == pivot_row) return; else if (fabs(cur_row_pivot_col_coeff) < EPSILON) return; else A[idx] += -cur_row_pivot_col_coeff * pivot_row_cur_col_coeff; } // ============================================================================= // Words // ( floats num_rows num_cols -- addr ) class W_GpuMatrix : public Word { public: W_GpuMatrix(string name) : Word(name) {}; virtual void Execute(Interpreter *interp) { int num_cols = AsInt(interp->StackPop()); int num_rows = AsInt(interp->StackPop()); auto numbers = AsArray(interp->StackPop()); int num_elements = num_rows * num_cols; int num_bytes = num_elements * sizeof(float); // Allocate memory void* result; auto res = cudaMallocManaged((void**)&result, num_bytes); checkCudaCall(res, __FILE__, __LINE__); // Set values float* dst = (float*)result; for (int i=0; i < numbers.size(); i++) { dst[i] = AsFloat(numbers[i]); } interp->StackPush(S_Address::New(result)); } }; // ( num_rows num_cols addr -- ) class W_PrintMatrix : public Word { public: W_PrintMatrix(string name) : Word(name) {}; virtual void Execute(Interpreter *interp) { float* A = AsFloatStar(interp->StackPop()); int num_cols = AsInt(interp->StackPop()); int num_rows = AsInt(interp->StackPop()); interp->Run("CUDA-DEVICE-SYNCHRONIZE"); for (int r=0; r < num_rows; r++) { for (int c=0; c < num_cols; c++) { int index = c + num_cols*r; printf("%6.2f ", A[index]); } printf("\n"); } } }; // ( grid block num_rows num_cols addr pivot_row pivot_col -- ) class W_Pivot : public Word { public: W_Pivot(string name) : Word(name) {}; virtual void Execute(Interpreter *interp) { int pivot_col = AsInt(interp->StackPop()); int pivot_row = AsInt(interp->StackPop()); auto A = AsFloatStar(interp->StackPop()); int num_cols = AsInt(interp->StackPop()); int num_rows = AsInt(interp->StackPop()); dim3 block = AsDim3(interp->StackPop()); dim3 grid = AsDim3(interp->StackPop()); pivot<<<grid, block>>>(num_rows, num_cols, A, pivot_row, pivot_col); } }; // ============================================================================= // M_Gauss M_Gauss::M_Gauss() : Module("gauss") { AddWord(new W_GpuMatrix("GPU-MATRIX")); AddWord(new W_PrintMatrix("PRINT-MATRIX")); AddWord(new W_Pivot("PIVOT")); }
e363fae83e29dcd82d9e2035d41ced3b71c4048c.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <stdexcept> #include <hip/hip_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" template <typename T> void trtri(hipsolverDnHandle_t handle, cusolver_int_t n, T *d_A, cusolver_int_t lda, hipblasFillMode_t uplo, hipblasDiagType_t diag, int *d_info) { void *d_work = nullptr; size_t d_lwork = 0; void *h_work = nullptr; size_t h_lwork = 0; try { printf("Quering required device and host workspace size...\n"); CUSOLVER_CHECK(cusolverDnXtrtri_bufferSize(handle, uplo, diag, n, traits<T>::cuda_data_type, reinterpret_cast<void *>(d_A), lda, &d_lwork, &h_lwork)); printf("Allocating required device workspace...\n"); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_work), d_lwork)); printf("Allocating required host workspace...\n"); if (h_lwork) { h_work = malloc(h_lwork); if (h_work == nullptr) { throw std::bad_alloc(); } } printf("Computing the inverse of a %s triangular matrix...\n", (uplo == HIPBLAS_FILL_MODE_UPPER ? "upper" : "lower")); CUSOLVER_CHECK(cusolverDnXtrtri(handle, uplo, diag, n, traits<T>::cuda_data_type, d_A, lda, d_work, d_lwork, h_work, h_lwork, d_info)); } catch (const std::exception &e) { fprintf(stderr, "error: %s\n", e.what()); } CUDA_CHECK(hipFree(d_work)); free(h_work); } // calculate |I - A * A^-1| and compare with eps template <typename T> void residual_check(cusolver_int_t n, T *d_A, T *d_A_inv, cusolver_int_t lda, double eps) { // create identity matrix T *h_A_res = (T *)calloc(n * lda, sizeof(T)); for (cusolver_int_t i = 0; i < n; i++) { h_A_res[i * lda + i] = T(1); } T alpha = -1; T beta = 1; T *d_A_res; CUDA_CHECK(hipMalloc(&d_A_res, sizeof(T) * n * lda)); CUDA_CHECK(hipMemcpy(d_A_res, h_A_res, sizeof(T) * n * lda, hipMemcpyHostToDevice)); hipblasHandle_t handle; CUBLAS_CHECK(hipblasCreate(&handle)); CUBLAS_CHECK(hipblasGemmEx(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, reinterpret_cast<void *>(&alpha), reinterpret_cast<void *>(d_A), traits<T>::cuda_data_type, lda, reinterpret_cast<void *>(d_A_inv), traits<T>::cuda_data_type, lda, reinterpret_cast<void *>(&beta), reinterpret_cast<void *>(d_A_res), traits<T>::cuda_data_type, lda, traits<T>::cuda_data_type, HIPBLAS_GEMM_DEFAULT)); CUDA_CHECK(hipDeviceSynchronize()); CUDA_CHECK(hipMemcpy(h_A_res, d_A_res, sizeof(T) * n * lda, hipMemcpyDeviceToHost)); double A_res_norm = 0.0; for (cusolver_int_t i = 0; i < n; i++) { double sum = 0.0; for (cusolver_int_t j = 0; j < n; j++) { sum += traits<T>::abs(h_A_res[i + j * lda]); } A_res_norm = ::max(A_res_norm, sum); } printf("Check: %s\n", (A_res_norm > eps ? "FAILED" : "PASSED")); CUBLAS_CHECK(hipblasDestroy(handle)); free(h_A_res); CUDA_CHECK(hipFree(d_A_res)); } int main(int argc, char *argv[]) { using data_type = double; const double eps = 1.e-15; hipsolverDnHandle_t handle; hipStream_t stream; cusolver_int_t n = 1000; cusolver_int_t lda = n + 1; data_type *d_A = nullptr; data_type *d_A_inv = nullptr; data_type *h_A = nullptr; int *d_info; int h_info; const hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_UPPER; const hipblasDiagType_t diag = HIPBLAS_DIAG_NON_UNIT; printf("Generating random diagonal dominant matrix...\n"); generate_random_matrix<data_type>(n, lda, &h_A, &lda); make_diag_dominant_matrix<data_type>(n, lda, h_A, lda); // zero lower triangle for (cusolver_int_t j = 0; j < n; j++) { for (cusolver_int_t i = j + 1; i < n; i++) { h_A[j * lda + i] = 0; } } printf("Initializing required CUDA and cuSOLVER miscelaneous variables...\n"); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); CUSOLVER_CHECK(hipsolverDnCreate(&handle)); CUSOLVER_CHECK(hipsolverDnSetStream(handle, stream)); printf("Allocating required device memory...\n"); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * lda * n)); CUDA_CHECK(hipMalloc(reinterpret_cast<void **>(&d_A_inv), sizeof(data_type) * lda * n)); CUDA_CHECK(hipMalloc(&d_info, sizeof(int))); printf("Copying input data to the device...\n"); CUDA_CHECK(hipMemcpy(d_A, h_A, sizeof(data_type) * lda * n, hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(d_A_inv, d_A, sizeof(data_type) * lda * n, hipMemcpyDeviceToDevice)); CUDA_CHECK(hipMemset(d_info, 0, sizeof(int))); trtri(handle, n, d_A_inv, lda, uplo, diag, d_info); CUDA_CHECK(hipStreamSynchronize(stream)); printf("Copying information back to the host...\n"); CUDA_CHECK(hipMemcpy(&h_info, d_info, sizeof(int), hipMemcpyDeviceToHost)); printf("Checking returned information...\n"); if (h_info > 0) { fprintf(stderr, "warning: leading minor of order %d is not p.d.\n", h_info); } else if (h_info < 0) { fprintf(stderr, "error: %d-th argument had an illegal value\n", h_info); } printf("Verifying results...\n"); residual_check(n, d_A, d_A_inv, lda, eps); printf("Destroying CUDA and cuSOLVER miscelaneous variables...\n"); CUDA_CHECK(hipStreamDestroy(stream)); CUSOLVER_CHECK(hipsolverDnDestroy(handle)); printf("Freeing memory...\n"); free(h_A); CUDA_CHECK(hipFree(d_A)); CUDA_CHECK(hipFree(d_A_inv)); CUDA_CHECK(hipFree(d_info)); printf("Done...\n"); return EXIT_SUCCESS; }
e363fae83e29dcd82d9e2035d41ced3b71c4048c.cu
/* * Copyright 2020 NVIDIA Corporation. All rights reserved. * * NOTICE TO LICENSEE: * * This source code and/or documentation ("Licensed Deliverables") are * subject to NVIDIA intellectual property rights under U.S. and * international Copyright laws. * * These Licensed Deliverables contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a form of NVIDIA software license agreement by and * between NVIDIA and Licensee ("License Agreement") or electronically * accepted by Licensee. Notwithstanding any terms or conditions to * the contrary in the License Agreement, reproduction or disclosure * of the Licensed Deliverables to any third party without the express * written consent of NVIDIA is prohibited. * * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THESE LICENSED DELIVERABLES. * * U.S. Government End Users. These Licensed Deliverables are a * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT * 1995), consisting of "commercial computer software" and "commercial * computer software documentation" as such terms are used in 48 * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government * only as a commercial end item. Consistent with 48 C.F.R.12.212 and * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all * U.S. Government End Users acquire the Licensed Deliverables with * only those rights set forth herein. * * Any use of the Licensed Deliverables in individual and commercial * software must include, in the user documentation and internal * comments to the code, the above Disclaimer and U.S. Government End * Users Notice. */ #include <cstdio> #include <cstdlib> #include <stdexcept> #include <cuda_runtime.h> #include <cusolverDn.h> #include "cusolver_utils.h" template <typename T> void trtri(cusolverDnHandle_t handle, cusolver_int_t n, T *d_A, cusolver_int_t lda, cublasFillMode_t uplo, cublasDiagType_t diag, int *d_info) { void *d_work = nullptr; size_t d_lwork = 0; void *h_work = nullptr; size_t h_lwork = 0; try { printf("Quering required device and host workspace size...\n"); CUSOLVER_CHECK(cusolverDnXtrtri_bufferSize(handle, uplo, diag, n, traits<T>::cuda_data_type, reinterpret_cast<void *>(d_A), lda, &d_lwork, &h_lwork)); printf("Allocating required device workspace...\n"); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_work), d_lwork)); printf("Allocating required host workspace...\n"); if (h_lwork) { h_work = malloc(h_lwork); if (h_work == nullptr) { throw std::bad_alloc(); } } printf("Computing the inverse of a %s triangular matrix...\n", (uplo == CUBLAS_FILL_MODE_UPPER ? "upper" : "lower")); CUSOLVER_CHECK(cusolverDnXtrtri(handle, uplo, diag, n, traits<T>::cuda_data_type, d_A, lda, d_work, d_lwork, h_work, h_lwork, d_info)); } catch (const std::exception &e) { fprintf(stderr, "error: %s\n", e.what()); } CUDA_CHECK(cudaFree(d_work)); free(h_work); } // calculate |I - A * A^-1| and compare with eps template <typename T> void residual_check(cusolver_int_t n, T *d_A, T *d_A_inv, cusolver_int_t lda, double eps) { // create identity matrix T *h_A_res = (T *)calloc(n * lda, sizeof(T)); for (cusolver_int_t i = 0; i < n; i++) { h_A_res[i * lda + i] = T(1); } T alpha = -1; T beta = 1; T *d_A_res; CUDA_CHECK(cudaMalloc(&d_A_res, sizeof(T) * n * lda)); CUDA_CHECK(cudaMemcpy(d_A_res, h_A_res, sizeof(T) * n * lda, cudaMemcpyHostToDevice)); cublasHandle_t handle; CUBLAS_CHECK(cublasCreate(&handle)); CUBLAS_CHECK(cublasGemmEx(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, reinterpret_cast<void *>(&alpha), reinterpret_cast<void *>(d_A), traits<T>::cuda_data_type, lda, reinterpret_cast<void *>(d_A_inv), traits<T>::cuda_data_type, lda, reinterpret_cast<void *>(&beta), reinterpret_cast<void *>(d_A_res), traits<T>::cuda_data_type, lda, traits<T>::cuda_data_type, CUBLAS_GEMM_DEFAULT)); CUDA_CHECK(cudaDeviceSynchronize()); CUDA_CHECK(cudaMemcpy(h_A_res, d_A_res, sizeof(T) * n * lda, cudaMemcpyDeviceToHost)); double A_res_norm = 0.0; for (cusolver_int_t i = 0; i < n; i++) { double sum = 0.0; for (cusolver_int_t j = 0; j < n; j++) { sum += traits<T>::abs(h_A_res[i + j * lda]); } A_res_norm = std::max(A_res_norm, sum); } printf("Check: %s\n", (A_res_norm > eps ? "FAILED" : "PASSED")); CUBLAS_CHECK(cublasDestroy(handle)); free(h_A_res); CUDA_CHECK(cudaFree(d_A_res)); } int main(int argc, char *argv[]) { using data_type = double; const double eps = 1.e-15; cusolverDnHandle_t handle; cudaStream_t stream; cusolver_int_t n = 1000; cusolver_int_t lda = n + 1; data_type *d_A = nullptr; data_type *d_A_inv = nullptr; data_type *h_A = nullptr; int *d_info; int h_info; const cublasFillMode_t uplo = CUBLAS_FILL_MODE_UPPER; const cublasDiagType_t diag = CUBLAS_DIAG_NON_UNIT; printf("Generating random diagonal dominant matrix...\n"); generate_random_matrix<data_type>(n, lda, &h_A, &lda); make_diag_dominant_matrix<data_type>(n, lda, h_A, lda); // zero lower triangle for (cusolver_int_t j = 0; j < n; j++) { for (cusolver_int_t i = j + 1; i < n; i++) { h_A[j * lda + i] = 0; } } printf("Initializing required CUDA and cuSOLVER miscelaneous variables...\n"); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); CUSOLVER_CHECK(cusolverDnCreate(&handle)); CUSOLVER_CHECK(cusolverDnSetStream(handle, stream)); printf("Allocating required device memory...\n"); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A), sizeof(data_type) * lda * n)); CUDA_CHECK(cudaMalloc(reinterpret_cast<void **>(&d_A_inv), sizeof(data_type) * lda * n)); CUDA_CHECK(cudaMalloc(&d_info, sizeof(int))); printf("Copying input data to the device...\n"); CUDA_CHECK(cudaMemcpy(d_A, h_A, sizeof(data_type) * lda * n, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(d_A_inv, d_A, sizeof(data_type) * lda * n, cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaMemset(d_info, 0, sizeof(int))); trtri(handle, n, d_A_inv, lda, uplo, diag, d_info); CUDA_CHECK(cudaStreamSynchronize(stream)); printf("Copying information back to the host...\n"); CUDA_CHECK(cudaMemcpy(&h_info, d_info, sizeof(int), cudaMemcpyDeviceToHost)); printf("Checking returned information...\n"); if (h_info > 0) { fprintf(stderr, "warning: leading minor of order %d is not p.d.\n", h_info); } else if (h_info < 0) { fprintf(stderr, "error: %d-th argument had an illegal value\n", h_info); } printf("Verifying results...\n"); residual_check(n, d_A, d_A_inv, lda, eps); printf("Destroying CUDA and cuSOLVER miscelaneous variables...\n"); CUDA_CHECK(cudaStreamDestroy(stream)); CUSOLVER_CHECK(cusolverDnDestroy(handle)); printf("Freeing memory...\n"); free(h_A); CUDA_CHECK(cudaFree(d_A)); CUDA_CHECK(cudaFree(d_A_inv)); CUDA_CHECK(cudaFree(d_info)); printf("Done...\n"); return EXIT_SUCCESS; }
ac0869399106906ecc7759a173ec785667811969.hip
// !!! This is a file automatically generated by hipify!!! // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <iostream> #include <stdio.h> #include <hiprand/hiprand.h> #include <cmath> #include <hip/hip_runtime.h> #include <hipcub/hipcub.hpp> #include "cub/test/test_util.h" #include "crystal/crystal.cuh" #include "utils/gpu_utils.h" using namespace std; //--------------------------------------------------------------------- // Implements Projection Operator // There are two variants: dot-product and sigmoid //--------------------------------------------------------------------- bool g_verbose = false; // Whether to display input/output to console hipcub::CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory template<int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void project(float* in1, float* in2, float* out, int num_items) { float items[ITEMS_PER_THREAD]; float items2[ITEMS_PER_THREAD]; float res[ITEMS_PER_THREAD]; int tile_offset = blockIdx.x * TILE_SIZE; int num_tiles = (num_items + TILE_SIZE - 1) / TILE_SIZE; int num_tile_items = TILE_SIZE; if (blockIdx.x == num_tiles - 1) { num_tile_items = num_items - tile_offset; } BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in1 + tile_offset, items, num_tile_items); BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in2 + tile_offset, items2, num_tile_items); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (threadIdx.x + (ITEM * BLOCK_THREADS) < num_tile_items) { res[ITEM] = 2*items[ITEM] + 3*items2[ITEM]; } } BlockStore<float, BLOCK_THREADS, ITEMS_PER_THREAD>(out + tile_offset, res, num_tile_items); } template<int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void projectSigmoid(float* in1, float* in2, float* out, int num_items) { float items[ITEMS_PER_THREAD]; float items2[ITEMS_PER_THREAD]; float res[ITEMS_PER_THREAD]; int tile_offset = blockIdx.x * TILE_SIZE; int num_tiles = (num_items + TILE_SIZE - 1) / TILE_SIZE; int num_tile_items = TILE_SIZE; if (blockIdx.x == num_tiles - 1) { num_tile_items = num_items - tile_offset; } BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in1 + tile_offset, items, num_tile_items); BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in2 + tile_offset, items2, num_tile_items); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (threadIdx.x + (ITEM * BLOCK_THREADS) < num_tile_items) { res[ITEM] = 1.0f / (1.0f + expf(-2*items[ITEM] -3*items2[ITEM])); } } BlockStore<float, BLOCK_THREADS, ITEMS_PER_THREAD>(out + tile_offset, res, num_tile_items); } float projectGPU(float* in1, float* in2, float* out, int num_items) { SETUP_TIMING(); float time_proj; int tile_items = 128*4; int num_blocks = (num_items + tile_items - 1)/tile_items; hipLaunchKernelGGL(( TIME_FUNC((project<128,4>), dim3(num_blocks), dim3(128), 0, 0, in1, in2, out, num_items)), time_proj); return time_proj; } float projectSigmoidGPU(float* in1, float* in2, float* out, int num_items) { SETUP_TIMING(); float time_proj; int tile_items = 128*4; int num_blocks = (num_items + tile_items - 1)/tile_items; hipLaunchKernelGGL(( TIME_FUNC((projectSigmoid<128,4>), dim3(num_blocks), dim3(128), 0, 0, in1, in2, out, num_items)), time_proj); return time_proj; } /** * Main */ int main(int argc, char** argv) { int num_items = 1<<28; int num_trials = 1; // Initialize command line CommandLineArgs args(argc, argv); args.GetCmdLineArgument("n", num_items); args.GetCmdLineArgument("t", num_trials); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items>] " "[--t=<num trials>] " "[--device=<device-id>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Allocate problem device arrays float *d_in1 = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in1, sizeof(float) * num_items)); float *d_in2 = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in2, sizeof(float) * num_items)); float *d_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(float) * num_items)); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hiprandGenerator_t generator; int seed = 0; hiprandCreateGenerator(&generator, HIPRAND_RNG_PSEUDO_DEFAULT); hiprandSetPseudoRandomGeneratorSeed(generator,seed); hiprandGenerateUniform(generator, d_in1, num_items); hiprandGenerateUniform(generator, d_in2, num_items); float time_proj_gpu; float time_proj_sigmoid_gpu; for (int t = 0; t < num_trials; t++) { time_proj_gpu = projectGPU(d_in1, d_in2, d_out, num_items); time_proj_sigmoid_gpu = projectSigmoidGPU(d_in1, d_in2, d_out, num_items); cout<< "{" << "\"time_proj_gpu\":" << time_proj_gpu << ",\"time_proj_sigmoid_gpu\":" << time_proj_sigmoid_gpu << "}" << endl; } // Cleanup if (d_in1) CubDebugExit(g_allocator.DeviceFree(d_in1)); if (d_in2) CubDebugExit(g_allocator.DeviceFree(d_in2)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); return 0; }
ac0869399106906ecc7759a173ec785667811969.cu
// Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <iostream> #include <stdio.h> #include <curand.h> #include <cmath> #include <cuda.h> #include <cub/util_allocator.cuh> #include "cub/test/test_util.h" #include "crystal/crystal.cuh" #include "utils/gpu_utils.h" using namespace std; //--------------------------------------------------------------------- // Implements Projection Operator // There are two variants: dot-product and sigmoid //--------------------------------------------------------------------- bool g_verbose = false; // Whether to display input/output to console cub::CachingDeviceAllocator g_allocator(true); // Caching allocator for device memory template<int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void project(float* in1, float* in2, float* out, int num_items) { float items[ITEMS_PER_THREAD]; float items2[ITEMS_PER_THREAD]; float res[ITEMS_PER_THREAD]; int tile_offset = blockIdx.x * TILE_SIZE; int num_tiles = (num_items + TILE_SIZE - 1) / TILE_SIZE; int num_tile_items = TILE_SIZE; if (blockIdx.x == num_tiles - 1) { num_tile_items = num_items - tile_offset; } BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in1 + tile_offset, items, num_tile_items); BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in2 + tile_offset, items2, num_tile_items); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (threadIdx.x + (ITEM * BLOCK_THREADS) < num_tile_items) { res[ITEM] = 2*items[ITEM] + 3*items2[ITEM]; } } BlockStore<float, BLOCK_THREADS, ITEMS_PER_THREAD>(out + tile_offset, res, num_tile_items); } template<int BLOCK_THREADS, int ITEMS_PER_THREAD> __global__ void projectSigmoid(float* in1, float* in2, float* out, int num_items) { float items[ITEMS_PER_THREAD]; float items2[ITEMS_PER_THREAD]; float res[ITEMS_PER_THREAD]; int tile_offset = blockIdx.x * TILE_SIZE; int num_tiles = (num_items + TILE_SIZE - 1) / TILE_SIZE; int num_tile_items = TILE_SIZE; if (blockIdx.x == num_tiles - 1) { num_tile_items = num_items - tile_offset; } BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in1 + tile_offset, items, num_tile_items); BlockLoad<float, BLOCK_THREADS, ITEMS_PER_THREAD>(in2 + tile_offset, items2, num_tile_items); #pragma unroll for (int ITEM = 0; ITEM < ITEMS_PER_THREAD; ITEM++) { if (threadIdx.x + (ITEM * BLOCK_THREADS) < num_tile_items) { res[ITEM] = 1.0f / (1.0f + expf(-2*items[ITEM] -3*items2[ITEM])); } } BlockStore<float, BLOCK_THREADS, ITEMS_PER_THREAD>(out + tile_offset, res, num_tile_items); } float projectGPU(float* in1, float* in2, float* out, int num_items) { SETUP_TIMING(); float time_proj; int tile_items = 128*4; int num_blocks = (num_items + tile_items - 1)/tile_items; TIME_FUNC((project<128,4><<<num_blocks, 128>>>(in1, in2, out, num_items)), time_proj); return time_proj; } float projectSigmoidGPU(float* in1, float* in2, float* out, int num_items) { SETUP_TIMING(); float time_proj; int tile_items = 128*4; int num_blocks = (num_items + tile_items - 1)/tile_items; TIME_FUNC((projectSigmoid<128,4><<<num_blocks, 128>>>(in1, in2, out, num_items)), time_proj); return time_proj; } /** * Main */ int main(int argc, char** argv) { int num_items = 1<<28; int num_trials = 1; // Initialize command line CommandLineArgs args(argc, argv); args.GetCmdLineArgument("n", num_items); args.GetCmdLineArgument("t", num_trials); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items>] " "[--t=<num trials>] " "[--device=<device-id>] " "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); // Allocate problem device arrays float *d_in1 = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in1, sizeof(float) * num_items)); float *d_in2 = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in2, sizeof(float) * num_items)); float *d_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(float) * num_items)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); curandGenerator_t generator; int seed = 0; curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(generator,seed); curandGenerateUniform(generator, d_in1, num_items); curandGenerateUniform(generator, d_in2, num_items); float time_proj_gpu; float time_proj_sigmoid_gpu; for (int t = 0; t < num_trials; t++) { time_proj_gpu = projectGPU(d_in1, d_in2, d_out, num_items); time_proj_sigmoid_gpu = projectSigmoidGPU(d_in1, d_in2, d_out, num_items); cout<< "{" << "\"time_proj_gpu\":" << time_proj_gpu << ",\"time_proj_sigmoid_gpu\":" << time_proj_sigmoid_gpu << "}" << endl; } // Cleanup if (d_in1) CubDebugExit(g_allocator.DeviceFree(d_in1)); if (d_in2) CubDebugExit(g_allocator.DeviceFree(d_in2)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); return 0; }
65e4c572a29e638fc2b02b48ab4270fc18175474.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "utils/utils.h" // ~TODO 3~ // Modify the kernel below such as each element of the // array will be now equal to 0 if it is an even number // or 1, if it is an odd number __global__ void kernel_parity_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < N) { a[i] = a[i] % 2; } } // ~TODO 4~ // Modify the kernel below such as each element will // be equal to the BLOCK ID this computation takes // place. __global__ void kernel_block_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < N) { a[i] = blockIdx.x; } } // ~TODO 5~ // Modify the kernel below such as each element will // be equal to the THREAD ID this computation takes // place. __global__ void kernel_thread_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < N) { a[i] = threadIdx.x; } } int main(void) { int nDevices; // Get the number of CUDA-capable GPU(s) hipGetDeviceCount(&nDevices); // ~TODO 1~ // For each device, show some details in the format below, // then set as active device the first one (assuming there // is at least CUDA-capable device). Pay attention to the // type of the fields in the hipDeviceProp_t structure. // // Device number: <i> // Device name: <name> // Total memory: <mem> // Memory Clock Rate (KHz): <mcr> // Memory Bus Width (bits): <mbw> // // Hint: look for hipGetDeviceProperties and hipSetDevice in // the Cuda Toolkit Documentation. for (int i = 0; i < nDevices; ++i) { hipDeviceProp_t prop; hipGetDeviceProperties(&prop, i); printf("Device number: %d\n", i); printf("\tDevice name: %s\n", prop.name); printf("\tTotal memory: %lu\n", prop.totalGlobalMem); printf("\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("\tMemory bus width (bits): %d\n", prop.memoryBusWidth); } // ~TODO 2~ // With information from example_2.cu, allocate an array with // integers (where a[i] = i). Then, modify the three kernels // above and execute them using 4 blocks, each with 4 threads. // Hint: num_elements = block_size * block_no (see example_2) // // You can use the fill_array_int(int *a, int n) function (from utils) // to fill your array as many times you want. const int num_elements = (1 << 20); const size_t num_blocks = num_elements / 4; size_t block_size = 4; const int num_bytes = num_elements * sizeof(int); int *host_array = (int *)malloc(num_bytes); if(!host_array) { fprintf(stderr, "malloc error\n"); return -1; } int *device_array = NULL; hipMalloc((void **)&device_array, num_bytes); fill_array_int(host_array, num_elements); hipMemcpy(device_array, host_array, num_bytes, hipMemcpyHostToDevice); // ~TODO 3~ // Execute kernel_parity_id kernel and then copy from // the device to the host; call hipDeviceSynchronize() // after a kernel execution for safety purposes. // // Uncomment the line below to check your results /* kernel_parity_id<<<num_blocks, block_size>>>(device_array, num_elements); hipDeviceSynchronize(); hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); check_task_1(3, host_array); */ // ~TODO 4~ // Execute kernel_block_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results /* kernel_block_id<<<num_blocks, block_size>>>(device_array, num_elements); hipDeviceSynchronize(); hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); check_task_1(4, host_array); */ // ~TODO 5~ // Execute kernel_thread_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results hipLaunchKernelGGL(( kernel_thread_id), dim3(num_blocks), dim3(block_size), 0, 0, device_array, num_elements); hipDeviceSynchronize(); hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); check_task_1(5, host_array); // TODO 6: Free the memory free(host_array); hipFree(device_array); return 0; }
65e4c572a29e638fc2b02b48ab4270fc18175474.cu
#include <stdio.h> #include "utils/utils.h" // ~TODO 3~ // Modify the kernel below such as each element of the // array will be now equal to 0 if it is an even number // or 1, if it is an odd number __global__ void kernel_parity_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < N) { a[i] = a[i] % 2; } } // ~TODO 4~ // Modify the kernel below such as each element will // be equal to the BLOCK ID this computation takes // place. __global__ void kernel_block_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < N) { a[i] = blockIdx.x; } } // ~TODO 5~ // Modify the kernel below such as each element will // be equal to the THREAD ID this computation takes // place. __global__ void kernel_thread_id(int *a, int N) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < N) { a[i] = threadIdx.x; } } int main(void) { int nDevices; // Get the number of CUDA-capable GPU(s) cudaGetDeviceCount(&nDevices); // ~TODO 1~ // For each device, show some details in the format below, // then set as active device the first one (assuming there // is at least CUDA-capable device). Pay attention to the // type of the fields in the cudaDeviceProp structure. // // Device number: <i> // Device name: <name> // Total memory: <mem> // Memory Clock Rate (KHz): <mcr> // Memory Bus Width (bits): <mbw> // // Hint: look for cudaGetDeviceProperties and cudaSetDevice in // the Cuda Toolkit Documentation. for (int i = 0; i < nDevices; ++i) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device number: %d\n", i); printf("\tDevice name: %s\n", prop.name); printf("\tTotal memory: %lu\n", prop.totalGlobalMem); printf("\tMemory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("\tMemory bus width (bits): %d\n", prop.memoryBusWidth); } // ~TODO 2~ // With information from example_2.cu, allocate an array with // integers (where a[i] = i). Then, modify the three kernels // above and execute them using 4 blocks, each with 4 threads. // Hint: num_elements = block_size * block_no (see example_2) // // You can use the fill_array_int(int *a, int n) function (from utils) // to fill your array as many times you want. const int num_elements = (1 << 20); const size_t num_blocks = num_elements / 4; size_t block_size = 4; const int num_bytes = num_elements * sizeof(int); int *host_array = (int *)malloc(num_bytes); if(!host_array) { fprintf(stderr, "malloc error\n"); return -1; } int *device_array = NULL; cudaMalloc((void **)&device_array, num_bytes); fill_array_int(host_array, num_elements); cudaMemcpy(device_array, host_array, num_bytes, cudaMemcpyHostToDevice); // ~TODO 3~ // Execute kernel_parity_id kernel and then copy from // the device to the host; call cudaDeviceSynchronize() // after a kernel execution for safety purposes. // // Uncomment the line below to check your results /* kernel_parity_id<<<num_blocks, block_size>>>(device_array, num_elements); cudaDeviceSynchronize(); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); check_task_1(3, host_array); */ // ~TODO 4~ // Execute kernel_block_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results /* kernel_block_id<<<num_blocks, block_size>>>(device_array, num_elements); cudaDeviceSynchronize(); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); check_task_1(4, host_array); */ // ~TODO 5~ // Execute kernel_thread_id kernel and then copy from // the device to the host; // // Uncomment the line below to check your results kernel_thread_id<<<num_blocks, block_size>>>(device_array, num_elements); cudaDeviceSynchronize(); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); check_task_1(5, host_array); // TODO 6: Free the memory free(host_array); cudaFree(device_array); return 0; }
5cce0e51d5b08fc877a4013ee9ea8900b3433513.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/JitLoops.cuh> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/Math.cuh> #include <ATen/native/hip/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward"; void spherical_bessel_j0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() { jitted_gpu_kernel<spherical_bessel_j0_name, scalar_t, scalar_t, 1>(iterator, spherical_bessel_j0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return spherical_bessel_j0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_cuda); } // namespace at::native
5cce0e51d5b08fc877a4013ee9ea8900b3433513.cu
#define TORCH_ASSERT_NO_OPERATORS #include <ATen/native/UnaryOps.h> #include <limits> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/native/Math.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/JitLoops.cuh> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/Math.cuh> #include <ATen/native/cuda/jit_utils.h> #include <ATen/NumericUtils.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <c10/util/complex.h> namespace at::native { namespace { const char spherical_bessel_j0_name[] = "spherical_bessel_j0_forward"; void spherical_bessel_j0_kernel_cuda(TensorIteratorBase& iterator) { #if AT_USE_JITERATOR() AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() { jitted_gpu_kernel<spherical_bessel_j0_name, scalar_t, scalar_t, 1>(iterator, spherical_bessel_j0_string); }); #else AT_DISPATCH_FLOATING_TYPES(iterator.common_dtype(), "spherical_bessel_j0_cuda", [&]() { gpu_kernel(iterator, []GPU_LAMBDA(scalar_t a) -> scalar_t { return spherical_bessel_j0_forward(a); }); }); #endif // AT_USE_JITERATOR() } } REGISTER_DISPATCH(special_spherical_bessel_j0_stub, &spherical_bessel_j0_kernel_cuda); } // namespace at::native
c37a592041dba258ddacb19663837a35225d15c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <accelerate_cuda.h> static TexWord32 arrIn0_2; static TexFloat arrIn0_1; static TexFloat arrIn0_0; extern "C" __global__ void generate(const Int64 shIn0_0, const Int64 shOut_0, Word32* __restrict__ arrOut_2, float* __restrict__ arrOut_1, float* __restrict__ arrOut_0) { const int shapeSize = shOut_0; const int gridSize = __umul24(blockDim.x, gridDim.x); int ix; for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 sh0 = ({ assert(ix >= 0 && ix < shOut_0); ix; }); const Word32 v0 = indexArray(arrIn0_2, sh0); const float v1 = indexArray(arrIn0_1, sh0); const float v2 = indexArray(arrIn0_0, sh0); arrOut_2[ix] = (Word32) sh0; arrOut_1[ix] = v1 / (float) v0; arrOut_0[ix] = v2 / (float) v0; } }
c37a592041dba258ddacb19663837a35225d15c7.cu
#include <accelerate_cuda.h> static TexWord32 arrIn0_2; static TexFloat arrIn0_1; static TexFloat arrIn0_0; extern "C" __global__ void generate(const Int64 shIn0_0, const Int64 shOut_0, Word32* __restrict__ arrOut_2, float* __restrict__ arrOut_1, float* __restrict__ arrOut_0) { const int shapeSize = shOut_0; const int gridSize = __umul24(blockDim.x, gridDim.x); int ix; for (ix = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; ix < shapeSize; ix += gridSize) { const Int64 sh0 = ({ assert(ix >= 0 && ix < shOut_0); ix; }); const Word32 v0 = indexArray(arrIn0_2, sh0); const float v1 = indexArray(arrIn0_1, sh0); const float v2 = indexArray(arrIn0_0, sh0); arrOut_2[ix] = (Word32) sh0; arrOut_1[ix] = v1 / (float) v0; arrOut_0[ix] = v2 / (float) v0; } }
b469f9dc26b9b1249b3ef3670b292f2b81f83253.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/Reductions.cuh> namespace faiss { namespace gpu { // Input: (batch x dim) // Output: (batch norm) // Done under the presumption that the dimension size is not too large // (<10k or so), since there wouldn't be enough parallelism applying a // single block to the problem. Also that each vector is large enough // (>64), since a single block works on multiple rows' norms at the // same time. // T: the type we are doing the math in (e.g., float, half) // TVec: the potentially vectorized type we are loading in (e.g., // float4, half2) template <typename T, typename TVec, typename IndexType, int RowTileSize, bool NormLoop, bool NormSquared> __global__ void l2NormRowMajor(Tensor<TVec, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { extern __shared__ char smemByte[]; // #warps * RowTileSize elements float* smem = (float*) smemByte; IndexType numWarps = utils::divUp(blockDim.x, kWarpSize); IndexType laneId = getLaneId(); IndexType warpId = threadIdx.x / kWarpSize; bool lastRowTile = (blockIdx.x == (gridDim.x - 1)); IndexType rowStart = RowTileSize * blockIdx.x; // accumulate in f32 float rowNorm[RowTileSize]; if (lastRowTile) { // We are handling the very end of the input matrix rows for (IndexType row = 0; row < input.getSize(0) - rowStart; ++row) { if (NormLoop) { rowNorm[0] = 0; for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { TVec val = input[rowStart + row][col]; val = Math<TVec>::mul(val, val); rowNorm[0] = rowNorm[0] + Math<TVec>::reduceAdd(val); } } else { TVec val = input[rowStart + row][threadIdx.x]; val = Math<TVec>::mul(val, val); rowNorm[0] = Math<TVec>::reduceAdd(val); } rowNorm[0] = warpReduceAllSum(rowNorm[0]); if (laneId == 0) { smem[row * numWarps + warpId] = rowNorm[0]; } } } else { // We are guaranteed that all RowTileSize rows are available in // [rowStart, rowStart + RowTileSize) if (NormLoop) { // A single block of threads is not big enough to span each // vector TVec tmp[RowTileSize]; #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = 0; } for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][col]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = rowNorm[row] + Math<TVec>::reduceAdd(tmp[row]); } } } else { TVec tmp[RowTileSize]; // A block of threads is the exact size of the vector #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][threadIdx.x]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = Math<TVec>::reduceAdd(tmp[row]); } } // Sum up all parts in each warp #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { smem[row * numWarps + warpId] = rowNorm[row]; } } } __syncthreads(); // Sum across warps if (warpId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = laneId < numWarps ? smem[row * numWarps + laneId] : 0; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } // Write out answer if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { int outCol = rowStart + row; if (lastRowTile) { if (outCol < output.getSize(0)) { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } else { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } } } } // Input: (dim x batch) // Output: (batch norm) // Handles the case where `input` is column major. A single thread calculates // the norm of each vector instead of a block-wide reduction. template <typename T, typename IndexType, bool NormSquared> __global__ void l2NormColMajor(Tensor<T, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { // grid-stride loop to handle all batch elements for (IndexType batch = blockIdx.x * blockDim.x + threadIdx.x; batch < input.getSize(1); batch += gridDim.x * blockDim.x) { float sum = 0; // This is still a coalesced load from the memory for (IndexType dim = 0; dim < input.getSize(0); ++dim) { // Just do the math in float32, even if the input is float16 float v = ConvertTo<float>::to(input[dim][batch]); sum += v * v; } if (!NormSquared) { sum = sqrtf(sum); } output[batch] = ConvertTo<float>::to(sum); } } template <typename T, typename TVec, typename IndexType> void runL2Norm(Tensor<T, 2, true, IndexType>& input, bool inputRowMajor, Tensor<float, 1, true, IndexType>& output, bool normSquared, hipStream_t stream) { IndexType maxThreads = (IndexType) getMaxThreadsCurrentDevice(); constexpr int rowTileSize = 8; #define RUN_L2_ROW_MAJOR(TYPE_T, TYPE_TVEC, INPUT) \ do { \ if (normLoop) { \ if (normSquared) { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, true>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } else { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, false>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } \ } else { \ if (normSquared) { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, true>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } else { \ hipLaunchKernelGGL(( l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, false>) \ , dim3(grid), dim3(block), smem, stream, INPUT, output); \ } \ } \ } while (0) if (inputRowMajor) { // // Row-major kernel /// if (input.template canCastResize<TVec>()) { // Can load using the vectorized type auto inputV = input.template castResize<TVec>(); auto dim = inputV.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = min(dim, maxThreads); auto grid = dim3(utils::divUp(inputV.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, TVec, inputV); } else { // Can't load using the vectorized type auto dim = input.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = min(dim, maxThreads); auto grid = dim3(utils::divUp(input.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, T, input); } } else { // // Column-major kernel // // Just use a fixed-sized block, since the kernel threads are fully // independent auto block = 128; // Cap the grid size at 2^16 since there is a grid-stride loop to handle // processing everything auto grid = (int) ::min(utils::divUp(input.getSize(1), (IndexType) block), (IndexType) 65536); if (normSquared) { hipLaunchKernelGGL(( l2NormColMajor<T, IndexType, true>), dim3(grid), dim3(block), 0, stream, input, output); } else { hipLaunchKernelGGL(( l2NormColMajor<T, IndexType, false>), dim3(grid), dim3(block), 0, stream, input, output); } } #undef RUN_L2 CUDA_TEST_ERROR(); } void runL2Norm(Tensor<float, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, hipStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<float, float4, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<float, float4, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } void runL2Norm(Tensor<half, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, hipStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<half, half2, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<half, half2, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } } } // namespace
b469f9dc26b9b1249b3ef3670b292f2b81f83253.cu
/** * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <faiss/gpu/impl/L2Norm.cuh> #include <faiss/impl/FaissAssert.h> #include <faiss/gpu/utils/ConversionOperators.cuh> #include <faiss/gpu/utils/DeviceDefs.cuh> #include <faiss/gpu/utils/DeviceUtils.h> #include <faiss/gpu/utils/Float16.cuh> #include <faiss/gpu/utils/MathOperators.cuh> #include <faiss/gpu/utils/PtxUtils.cuh> #include <faiss/gpu/utils/StaticUtils.h> #include <faiss/gpu/utils/Reductions.cuh> namespace faiss { namespace gpu { // Input: (batch x dim) // Output: (batch norm) // Done under the presumption that the dimension size is not too large // (<10k or so), since there wouldn't be enough parallelism applying a // single block to the problem. Also that each vector is large enough // (>64), since a single block works on multiple rows' norms at the // same time. // T: the type we are doing the math in (e.g., float, half) // TVec: the potentially vectorized type we are loading in (e.g., // float4, half2) template <typename T, typename TVec, typename IndexType, int RowTileSize, bool NormLoop, bool NormSquared> __global__ void l2NormRowMajor(Tensor<TVec, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { extern __shared__ char smemByte[]; // #warps * RowTileSize elements float* smem = (float*) smemByte; IndexType numWarps = utils::divUp(blockDim.x, kWarpSize); IndexType laneId = getLaneId(); IndexType warpId = threadIdx.x / kWarpSize; bool lastRowTile = (blockIdx.x == (gridDim.x - 1)); IndexType rowStart = RowTileSize * blockIdx.x; // accumulate in f32 float rowNorm[RowTileSize]; if (lastRowTile) { // We are handling the very end of the input matrix rows for (IndexType row = 0; row < input.getSize(0) - rowStart; ++row) { if (NormLoop) { rowNorm[0] = 0; for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { TVec val = input[rowStart + row][col]; val = Math<TVec>::mul(val, val); rowNorm[0] = rowNorm[0] + Math<TVec>::reduceAdd(val); } } else { TVec val = input[rowStart + row][threadIdx.x]; val = Math<TVec>::mul(val, val); rowNorm[0] = Math<TVec>::reduceAdd(val); } rowNorm[0] = warpReduceAllSum(rowNorm[0]); if (laneId == 0) { smem[row * numWarps + warpId] = rowNorm[0]; } } } else { // We are guaranteed that all RowTileSize rows are available in // [rowStart, rowStart + RowTileSize) if (NormLoop) { // A single block of threads is not big enough to span each // vector TVec tmp[RowTileSize]; #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = 0; } for (IndexType col = threadIdx.x; col < input.getSize(1); col += blockDim.x) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][col]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = rowNorm[row] + Math<TVec>::reduceAdd(tmp[row]); } } } else { TVec tmp[RowTileSize]; // A block of threads is the exact size of the vector #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = input[rowStart + row][threadIdx.x]; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { tmp[row] = Math<TVec>::mul(tmp[row], tmp[row]); } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = Math<TVec>::reduceAdd(tmp[row]); } } // Sum up all parts in each warp #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { smem[row * numWarps + warpId] = rowNorm[row]; } } } __syncthreads(); // Sum across warps if (warpId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = laneId < numWarps ? smem[row * numWarps + laneId] : 0; } #pragma unroll for (int row = 0; row < RowTileSize; ++row) { rowNorm[row] = warpReduceAllSum(rowNorm[row]); } // Write out answer if (laneId == 0) { #pragma unroll for (int row = 0; row < RowTileSize; ++row) { int outCol = rowStart + row; if (lastRowTile) { if (outCol < output.getSize(0)) { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } else { output[outCol] = NormSquared ? ConvertTo<float>::to(rowNorm[row]) : sqrtf(ConvertTo<float>::to(rowNorm[row])); } } } } } // Input: (dim x batch) // Output: (batch norm) // Handles the case where `input` is column major. A single thread calculates // the norm of each vector instead of a block-wide reduction. template <typename T, typename IndexType, bool NormSquared> __global__ void l2NormColMajor(Tensor<T, 2, true, IndexType> input, Tensor<float, 1, true, IndexType> output) { // grid-stride loop to handle all batch elements for (IndexType batch = blockIdx.x * blockDim.x + threadIdx.x; batch < input.getSize(1); batch += gridDim.x * blockDim.x) { float sum = 0; // This is still a coalesced load from the memory for (IndexType dim = 0; dim < input.getSize(0); ++dim) { // Just do the math in float32, even if the input is float16 float v = ConvertTo<float>::to(input[dim][batch]); sum += v * v; } if (!NormSquared) { sum = sqrtf(sum); } output[batch] = ConvertTo<float>::to(sum); } } template <typename T, typename TVec, typename IndexType> void runL2Norm(Tensor<T, 2, true, IndexType>& input, bool inputRowMajor, Tensor<float, 1, true, IndexType>& output, bool normSquared, cudaStream_t stream) { IndexType maxThreads = (IndexType) getMaxThreadsCurrentDevice(); constexpr int rowTileSize = 8; #define RUN_L2_ROW_MAJOR(TYPE_T, TYPE_TVEC, INPUT) \ do { \ if (normLoop) { \ if (normSquared) { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, true> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } else { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, true, false> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } \ } else { \ if (normSquared) { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, true> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } else { \ l2NormRowMajor<TYPE_T, TYPE_TVEC, IndexType, rowTileSize, false, false> \ <<<grid, block, smem, stream>>>(INPUT, output); \ } \ } \ } while (0) if (inputRowMajor) { // // Row-major kernel /// if (input.template canCastResize<TVec>()) { // Can load using the vectorized type auto inputV = input.template castResize<TVec>(); auto dim = inputV.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = min(dim, maxThreads); auto grid = dim3(utils::divUp(inputV.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, TVec, inputV); } else { // Can't load using the vectorized type auto dim = input.getSize(1); bool normLoop = dim > maxThreads; auto numThreads = min(dim, maxThreads); auto grid = dim3(utils::divUp(input.getSize(0), rowTileSize)); auto block = dim3(numThreads); auto smem = sizeof(float) * rowTileSize * utils::divUp(numThreads, kWarpSize); RUN_L2_ROW_MAJOR(T, T, input); } } else { // // Column-major kernel // // Just use a fixed-sized block, since the kernel threads are fully // independent auto block = 128; // Cap the grid size at 2^16 since there is a grid-stride loop to handle // processing everything auto grid = (int) std::min(utils::divUp(input.getSize(1), (IndexType) block), (IndexType) 65536); if (normSquared) { l2NormColMajor<T, IndexType, true><<<grid, block, 0, stream>>>( input, output); } else { l2NormColMajor<T, IndexType, false><<<grid, block, 0, stream>>>( input, output); } } #undef RUN_L2 CUDA_TEST_ERROR(); } void runL2Norm(Tensor<float, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, cudaStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<float, float4, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<float, float4, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } void runL2Norm(Tensor<half, 2, true>& input, bool inputRowMajor, Tensor<float, 1, true>& output, bool normSquared, cudaStream_t stream) { if (input.canUseIndexType<int>()) { runL2Norm<half, half2, int>( input, inputRowMajor, output, normSquared, stream); } else { auto inputCast = input.castIndexType<long>(); auto outputCast = output.castIndexType<long>(); runL2Norm<half, half2, long>( inputCast, inputRowMajor, outputCast, normSquared, stream); } } } } // namespace
197f4f32899a9c0ad27a49abdf523a52952584cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define T 256 #define n 1024 __global__ void reduceToSummation(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { originalData[idx] = originalData[idx] + originalData[idx + stride]; } } __global__ void reduceToMinimum(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { int min = originalData[idx]; if(originalData[idx + stride] < min) { min = originalData[idx + stride]; } originalData[idx] = min; } } __global__ void reduceToMaximum(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { int max = originalData[idx]; if(originalData[idx + stride] > max) { max = originalData[idx + stride]; } originalData[idx] = max; } } int main(int argc, char *argv[]) { int originalData[n]; int sum, min, max; int i; int *deviceOriginalData; int arrayByteSize = n * sizeof(int); printf("ORIGINAL: \n"); for(i = 0; i < n; i++) { originalData[i] = i; printf("%3d ", originalData[i]); } printf("\n\n"); // Allocates Once for all kernels hipMalloc((void**) &deviceOriginalData, arrayByteSize); // KERNEL 1: Find Average by Finding Summation hipMemcpy(deviceOriginalData, originalData, arrayByteSize, hipMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { hipLaunchKernelGGL(( reduceToSummation), dim3((n + T - 1) / T), dim3(T), 0, 0, deviceOriginalData, s); } hipMemcpy(&sum, deviceOriginalData, sizeof(int), hipMemcpyDeviceToHost); double realAverage = sum / (double) n; // KERNEL 2: Find Minimum hipMemcpy(deviceOriginalData, originalData, arrayByteSize, hipMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { hipLaunchKernelGGL(( reduceToMinimum), dim3((n + T - 1) / T), dim3(T), 0, 0, deviceOriginalData, s); } hipMemcpy(&min, deviceOriginalData, sizeof(int), hipMemcpyDeviceToHost); // KERNEL 3: Find Maximum hipMemcpy(deviceOriginalData, originalData, arrayByteSize, hipMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { hipLaunchKernelGGL(( reduceToMaximum), dim3((n + T - 1) / T), dim3(T), 0, 0, deviceOriginalData, s); } hipMemcpy(&max, deviceOriginalData, sizeof(int), hipMemcpyDeviceToHost); // Free the memory hipFree(deviceOriginalData); // Print the results printf("\nAverage is %.2f", realAverage); printf("\nThe Minimum Number is %d\n", min); printf("The Maximum Number is %d\n", max); return 0; }
197f4f32899a9c0ad27a49abdf523a52952584cb.cu
#include <stdio.h> #include <stdlib.h> #define T 256 #define n 1024 __global__ void reduceToSummation(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { originalData[idx] = originalData[idx] + originalData[idx + stride]; } } __global__ void reduceToMinimum(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { int min = originalData[idx]; if(originalData[idx + stride] < min) { min = originalData[idx + stride]; } originalData[idx] = min; } } __global__ void reduceToMaximum(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { int max = originalData[idx]; if(originalData[idx + stride] > max) { max = originalData[idx + stride]; } originalData[idx] = max; } } int main(int argc, char *argv[]) { int originalData[n]; int sum, min, max; int i; int *deviceOriginalData; int arrayByteSize = n * sizeof(int); printf("ORIGINAL: \n"); for(i = 0; i < n; i++) { originalData[i] = i; printf("%3d ", originalData[i]); } printf("\n\n"); // Allocates Once for all kernels cudaMalloc((void**) &deviceOriginalData, arrayByteSize); // KERNEL 1: Find Average by Finding Summation cudaMemcpy(deviceOriginalData, originalData, arrayByteSize, cudaMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { reduceToSummation<<<(n + T - 1) / T, T>>>(deviceOriginalData, s); } cudaMemcpy(&sum, deviceOriginalData, sizeof(int), cudaMemcpyDeviceToHost); double realAverage = sum / (double) n; // KERNEL 2: Find Minimum cudaMemcpy(deviceOriginalData, originalData, arrayByteSize, cudaMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { reduceToMinimum<<<(n + T - 1) / T, T>>>(deviceOriginalData, s); } cudaMemcpy(&min, deviceOriginalData, sizeof(int), cudaMemcpyDeviceToHost); // KERNEL 3: Find Maximum cudaMemcpy(deviceOriginalData, originalData, arrayByteSize, cudaMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { reduceToMaximum<<<(n + T - 1) / T, T>>>(deviceOriginalData, s); } cudaMemcpy(&max, deviceOriginalData, sizeof(int), cudaMemcpyDeviceToHost); // Free the memory cudaFree(deviceOriginalData); // Print the results printf("\nAverage is %.2f", realAverage); printf("\nThe Minimum Number is %d\n", min); printf("The Maximum Number is %d\n", max); return 0; }
d1d8dd80f150a89ca7122ac26bdc64dfb4f1b77a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda.h> #include <sys/time.h> // Device code __device__ void kernel(int* d_A, int pitch,int height,int width) { for (int c = 0; c < height; ++c) { for (int r = 0; r < width; ++r) { int* row = (int*)((char*)d_A + r * pitch); row[c] = row[c]*row[c]; } } } __global__ void three(int * d_out, int pitch,int height, int width,int val1,int val2){ int idx = threadIdx.x; switch(idx%val1){ case 0: kernel(d_out,pitch,height,val2);break; case 1: kernel(d_out,pitch,height,val2);break; case 2: kernel(d_out,pitch,height,val2);break; case 3: kernel(d_out,pitch,height,val2);break; case 4: kernel(d_out,pitch,height,val2);break; case 5: kernel(d_out,pitch,height,val2);break; case 6: kernel(d_out,pitch,height,val2);break; case 7: kernel(d_out,pitch,height,val2);break; case 8: kernel(d_out,pitch,height,val2);break; case 9: kernel(d_out,pitch,height,val2);break; case 10: kernel(d_out,pitch,height,val2);break; case 11: kernel(d_out,pitch,height,val2);break; case 12: kernel(d_out,pitch,height,val2);break; case 13: kernel(d_out,pitch,height,val2);break; case 14: kernel(d_out,pitch,height,val2);break; case 15: kernel(d_out,pitch,height,val2);break; case 16: kernel(d_out,pitch,height,val2);break; case 17: kernel(d_out,pitch,height,val2);break; case 18: kernel(d_out,pitch,height,val2);break; case 19: kernel(d_out,pitch,height,val2);break; case 20: kernel(d_out,pitch,height,val2);break; case 21: kernel(d_out,pitch,height,val2);break; case 22: kernel(d_out,pitch,height,val2);break; case 23: kernel(d_out,pitch,height,val2);break; case 24: kernel(d_out,pitch,height,val2);break; case 25: kernel(d_out,pitch,height,val2);break; case 26: kernel(d_out,pitch,height,val2);break; case 27: kernel(d_out,pitch,height,val2);break; case 28: kernel(d_out,pitch,height,val2);break; case 29: kernel(d_out,pitch,height,val2);break; case 30: kernel(d_out,pitch,height,val2);break; case 31: kernel(d_out,pitch,height,val2);break; } __syncthreads(); } //Host Code int main() { int* d_A; size_t pitch; int *A; int height,width; height = width = 32; int rows = height; int cols = width; A = (int *)malloc(rows*cols*sizeof(int)); for (int i = 0; i < rows*cols; i++) A[i] = i; hipMallocPitch((void**)&d_A, &pitch, width * sizeof(int), height); hipMemcpy2D(d_A, pitch, A, sizeof(int)*cols, sizeof(int)*cols, rows, hipMemcpyHostToDevice); for(int v1=32;v1>=1;v1-=2){ for(int v2=32;v2>=1;v2-=2){ struct timeval tv1, tv2; gettimeofday(&tv1, NULL); for(int j=0;j<1000000;j++) hipLaunchKernelGGL(( three), dim3(1), dim3(1024), 0, 0, d_A, pitch,height,width,v1,v2); gettimeofday(&tv2, NULL); printf ("%d %d %f\n",v1,v2, (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); hipDeviceSynchronize(); }} // for(int i=0;i<rows*cols;i++) // printf("%d %d\n",A[i],d_A[i]); return 0; }
d1d8dd80f150a89ca7122ac26bdc64dfb4f1b77a.cu
#include<stdio.h> #include<cuda.h> #include <sys/time.h> // Device code __device__ void kernel(int* d_A, int pitch,int height,int width) { for (int c = 0; c < height; ++c) { for (int r = 0; r < width; ++r) { int* row = (int*)((char*)d_A + r * pitch); row[c] = row[c]*row[c]; } } } __global__ void three(int * d_out, int pitch,int height, int width,int val1,int val2){ int idx = threadIdx.x; switch(idx%val1){ case 0: kernel(d_out,pitch,height,val2);break; case 1: kernel(d_out,pitch,height,val2);break; case 2: kernel(d_out,pitch,height,val2);break; case 3: kernel(d_out,pitch,height,val2);break; case 4: kernel(d_out,pitch,height,val2);break; case 5: kernel(d_out,pitch,height,val2);break; case 6: kernel(d_out,pitch,height,val2);break; case 7: kernel(d_out,pitch,height,val2);break; case 8: kernel(d_out,pitch,height,val2);break; case 9: kernel(d_out,pitch,height,val2);break; case 10: kernel(d_out,pitch,height,val2);break; case 11: kernel(d_out,pitch,height,val2);break; case 12: kernel(d_out,pitch,height,val2);break; case 13: kernel(d_out,pitch,height,val2);break; case 14: kernel(d_out,pitch,height,val2);break; case 15: kernel(d_out,pitch,height,val2);break; case 16: kernel(d_out,pitch,height,val2);break; case 17: kernel(d_out,pitch,height,val2);break; case 18: kernel(d_out,pitch,height,val2);break; case 19: kernel(d_out,pitch,height,val2);break; case 20: kernel(d_out,pitch,height,val2);break; case 21: kernel(d_out,pitch,height,val2);break; case 22: kernel(d_out,pitch,height,val2);break; case 23: kernel(d_out,pitch,height,val2);break; case 24: kernel(d_out,pitch,height,val2);break; case 25: kernel(d_out,pitch,height,val2);break; case 26: kernel(d_out,pitch,height,val2);break; case 27: kernel(d_out,pitch,height,val2);break; case 28: kernel(d_out,pitch,height,val2);break; case 29: kernel(d_out,pitch,height,val2);break; case 30: kernel(d_out,pitch,height,val2);break; case 31: kernel(d_out,pitch,height,val2);break; } __syncthreads(); } //Host Code int main() { int* d_A; size_t pitch; int *A; int height,width; height = width = 32; int rows = height; int cols = width; A = (int *)malloc(rows*cols*sizeof(int)); for (int i = 0; i < rows*cols; i++) A[i] = i; cudaMallocPitch((void**)&d_A, &pitch, width * sizeof(int), height); cudaMemcpy2D(d_A, pitch, A, sizeof(int)*cols, sizeof(int)*cols, rows, cudaMemcpyHostToDevice); for(int v1=32;v1>=1;v1-=2){ for(int v2=32;v2>=1;v2-=2){ struct timeval tv1, tv2; gettimeofday(&tv1, NULL); for(int j=0;j<1000000;j++) three<<<1, 1024>>>(d_A, pitch,height,width,v1,v2); gettimeofday(&tv2, NULL); printf ("%d %d %f\n",v1,v2, (double) (tv2.tv_usec - tv1.tv_usec) / 1000000 + (double) (tv2.tv_sec - tv1.tv_sec)); cudaDeviceSynchronize(); }} // for(int i=0;i<rows*cols;i++) // printf("%d %d\n",A[i],d_A[i]); return 0; }
2f0006181554f47541308b983d81fd41585314e8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/polybenchUtilFuncts.h" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define NX 8192 #define NY 8192 #define GPU_DEVICE 1 #ifndef M_PI #define M_PI 3.14159 #endif typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A) { int i, j; for (i = 0; i < NX; i++) { x [i] = i * M_PI; for (j = 0; j < NY; j++) { A [i * NY + j] = ((DATA_TYPE)i * (j)) / NX; } } } void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < NY; i++) { if (percentDiff(z [i], z_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void CPU__atax(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i, j; for (i = 0; i < NY; i++) { y [i] = 0; } for (i = 0; i < NX; i++) { tmp [i] = 0; for (j = 0; j < NY; j++) { tmp [i] = tmp [i] + A [i * NY + j] * x [j]; } for (j = 0; j < NY; j++) { y [j] = y [j] + A [i * NY + j] * tmp [i]; } } } __global__ void __generated_kernel_region_0(DATA_TYPE * y); __global__ void __generated_kernel_region_1(DATA_TYPE * tmp,DATA_TYPE * A,DATA_TYPE * x); __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * tmp,DATA_TYPE * y); void GPU__atax(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation y\n"); acc_present_or_create((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin y\n"); acc_pcopyin((void*)y,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_0), dim3((((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)y)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout y\n"); acc_copyout_and_keep((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation x\n"); acc_present_or_create((void*)x,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation tmp\n"); acc_present_or_create((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin x\n"); acc_pcopyin((void*)x,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin tmp\n"); acc_pcopyin((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_1), dim3((((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)tmp), (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)x)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout x\n"); acc_copyout_and_keep((void*)x,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout tmp\n"); acc_copyout_and_keep((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation y\n"); acc_present_or_create((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation tmp\n"); acc_present_or_create((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin y\n"); acc_pcopyin((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin tmp\n"); acc_pcopyin((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_2), dim3((((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)tmp), (DATA_TYPE *)acc_deviceptr((void*)y)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout y\n"); acc_copyout_and_keep((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout tmp\n"); acc_copyout_and_keep((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; A = (DATA_TYPE*)malloc(NX * NY * sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); fprintf(stdout, "<< Matrix Transpose and Vector Multiplication >>\n"); init_array(x, A); t_start = rtclock(); GPU__atax(A, x, y_outputFromGpu, tmp); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); CPU__atax(A, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); free(A); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; } __global__ void __generated_kernel_region_0(DATA_TYPE * y){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; { { { i=0+(__kernel_getuid_x); if( i < NY) { y [i] = 0; } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(DATA_TYPE * tmp,DATA_TYPE * A,DATA_TYPE * x){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NX) { tmp [i] = 0; int j; for(j = 0; j < NY; j++) { tmp [i] = tmp [i] + A [i * NY + j] * x [j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * tmp,DATA_TYPE * y){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { j=0+(__kernel_getuid_x); if( j < NY) { for(i = 0; i < NX; i++) { { y [j] = y [j] + A [i * NY + j] * tmp [i]; } } } } } } //append writeback of scalar variables }
2f0006181554f47541308b983d81fd41585314e8.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/polybenchUtilFuncts.h" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define NX 8192 #define NY 8192 #define GPU_DEVICE 1 #ifndef M_PI #define M_PI 3.14159 #endif typedef float DATA_TYPE; void init_array(DATA_TYPE *x, DATA_TYPE *A) { int i, j; for (i = 0; i < NX; i++) { x [i] = i * M_PI; for (j = 0; j < NY; j++) { A [i * NY + j] = ((DATA_TYPE)i * (j)) / NX; } } } void compareResults(DATA_TYPE *z, DATA_TYPE *z_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < NY; i++) { if (percentDiff(z [i], z_outputFromGpu [i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void CPU__atax(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i, j; for (i = 0; i < NY; i++) { y [i] = 0; } for (i = 0; i < NX; i++) { tmp [i] = 0; for (j = 0; j < NY; j++) { tmp [i] = tmp [i] + A [i * NY + j] * x [j]; } for (j = 0; j < NY; j++) { y [j] = y [j] + A [i * NY + j] * tmp [i]; } } } __global__ void __generated_kernel_region_0(DATA_TYPE * y); __global__ void __generated_kernel_region_1(DATA_TYPE * tmp,DATA_TYPE * A,DATA_TYPE * x); __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * tmp,DATA_TYPE * y); void GPU__atax(DATA_TYPE* A, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation y\n"); acc_present_or_create((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin y\n"); acc_pcopyin((void*)y,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_0<<<(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)y)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout y\n"); acc_copyout_and_keep((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation x\n"); acc_present_or_create((void*)x,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation tmp\n"); acc_present_or_create((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin x\n"); acc_pcopyin((void*)x,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin tmp\n"); acc_pcopyin((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_1<<<(((abs((int)((NX))-(0+0)))/(1)))/256+(((((abs((int)((NX))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)tmp), (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)x)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout x\n"); acc_copyout_and_keep((void*)x,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout tmp\n"); acc_copyout_and_keep((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation y\n"); acc_present_or_create((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation tmp\n"); acc_present_or_create((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin y\n"); acc_pcopyin((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin tmp\n"); acc_pcopyin((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); { /* kernel call statement [2, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 2 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_2<<<(((abs((int)((NY))-(0+0)))/(1)))/256+(((((abs((int)((NY))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)tmp), (DATA_TYPE *)acc_deviceptr((void*)y)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(67108863+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout y\n"); acc_copyout_and_keep((void*)y,(8191+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout tmp\n"); acc_copyout_and_keep((void*)tmp,(8191+0)*sizeof(DATA_TYPE )); } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; A = (DATA_TYPE*)malloc(NX * NY * sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(NY * sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(NX * sizeof(DATA_TYPE)); fprintf(stdout, "<< Matrix Transpose and Vector Multiplication >>\n"); init_array(x, A); t_start = rtclock(); GPU__atax(A, x, y_outputFromGpu, tmp); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); CPU__atax(A, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(y, y_outputFromGpu); free(A); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; } __global__ void __generated_kernel_region_0(DATA_TYPE * y){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; { { { i=0+(__kernel_getuid_x); if( i < NY) { y [i] = 0; } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(DATA_TYPE * tmp,DATA_TYPE * A,DATA_TYPE * x){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NX) { tmp [i] = 0; int j; for(j = 0; j < NY; j++) { tmp [i] = tmp [i] + A [i * NY + j] * x [j]; } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_2(DATA_TYPE * A,DATA_TYPE * tmp,DATA_TYPE * y){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { j=0+(__kernel_getuid_x); if( j < NY) { for(i = 0; i < NX; i++) { { y [j] = y [j] + A [i * NY + j] * tmp [i]; } } } } } } //append writeback of scalar variables }
040f38ee9c0cf2bd6114260703f8d6f269eaf735.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #include <cassert> const int blocksize = 96; // kernel prototypes __global__ void k1(void); __global__ void k2(void); // array of values to fill __device__ int data[blocksize]; // kernel that fills the array in device memory __global__ void k2(void) { int idx = threadIdx.x; if (idx < blocksize) { data[idx] = idx; } } // kernel that calls the fill kernel __global__ void k1(void) { int idx = threadIdx.x; if (idx == 0) { hipLaunchKernelGGL(( k2), dim3(1), dim3(blocksize), 0, 0, ); hipDeviceSynchronize(); } __syncthreads(); printf("Thread %i has value %i\n", idx, data[idx]); } int main(void) { hipLaunchKernelGGL(( k1), dim3(1), dim3(blocksize), 0, 0, ); hipDeviceSynchronize(); return 0; }
040f38ee9c0cf2bd6114260703f8d6f269eaf735.cu
#include <cstdio> #include <cassert> const int blocksize = 96; // kernel prototypes __global__ void k1(void); __global__ void k2(void); // array of values to fill __device__ int data[blocksize]; // kernel that fills the array in device memory __global__ void k2(void) { int idx = threadIdx.x; if (idx < blocksize) { data[idx] = idx; } } // kernel that calls the fill kernel __global__ void k1(void) { int idx = threadIdx.x; if (idx == 0) { k2<<<1, blocksize>>>(); cudaDeviceSynchronize(); } __syncthreads(); printf("Thread %i has value %i\n", idx, data[idx]); } int main(void) { k1<<<1, blocksize>>>(); cudaDeviceSynchronize(); return 0; }
87253fcc659a913d45f3249a0dc4ad376f88a114.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/optimizers/adamw_op.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, typename MT> __global__ void AdamWKernelREG(MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, MT beta1_pow_, MT beta2_pow_, const MT* moment1, MT* moment1_out, const MT* moment2, MT* moment2_out, const MT* lr_, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, int ndim) { MT lr = *lr_ * lr_ratio; MT beta1_pow = beta1_pow_; MT beta2_pow = beta2_pow_; int id = blockIdx.x * blockDim.x + threadIdx.x; for (; id < ndim; id += gridDim.x * blockDim.x) { MT p = master_param ? master_param[id] : static_cast<MT>(param[id]); MT g = static_cast<MT>(grad[id]); MT mom1 = static_cast<MT>(moment1[id]); MT mom2 = static_cast<MT>(moment2[id]); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); moment1_out[id] = mom1; moment2_out[id] = mom2; param_out[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } template <typename T, typename MT> __global__ void AdamWKernelMEM( MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, const MT* beta1_pow_, const MT* beta2_pow_, const MT* moment1, MT* moment1_out, const MT* moment2, MT* moment2_out, const MT* lr_, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, int ndim) { MT lr = *lr_ * lr_ratio; MT beta1_pow = *beta1_pow_; MT beta2_pow = *beta2_pow_; int id = blockIdx.x * blockDim.x + threadIdx.x; for (; id < ndim; id += gridDim.x * blockDim.x) { MT p = master_param ? master_param[id] : static_cast<MT>(param[id]); MT g = static_cast<MT>(grad[id]); MT mom1 = static_cast<MT>(moment1[id]); MT mom2 = static_cast<MT>(moment2[id]); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); moment1_out[id] = mom1; moment2_out[id] = mom2; param_out[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } template <typename T> __global__ void UpdateAdamWBetaPow(T beta1, T beta2, const T* beta1_pow_, const T* beta2_pow_, T* beta1_pow_out, T* beta2_pow_out) { *beta1_pow_out = beta1 * beta1_pow_[0]; *beta2_pow_out = beta2 * beta2_pow_[0]; } template <typename T, typename MT> __global__ void SparseAdamWCUDAKernelREG( MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, const MT beta1_pow, const MT beta2_pow, const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_, const MT* lr_, const T* grad_, const T* param_, T* param_out_, const MT* master_param, MT* master_param_out, const int64_t* rows_, int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) { int id = blockIdx.x * blockDim.x + threadIdx.x; MT lr = *lr_ * lr_ratio; for (; id < ndim; id += blockDim.x * gridDim.x) { auto row_idx = math::BinarySearch<int64_t>(rows_, row_count, id / row_numel); if (lazy_mode && row_idx < 0) { return; } else { MT mom1 = static_cast<MT>(mom1_[id]); MT mom2 = static_cast<MT>(mom2_[id]); MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]); MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel]) : static_cast<MT>(0); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); // Write back to global memory mom1_out_[id] = mom1; mom2_out_[id] = mom2; param_out_[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } } template <typename T> class AdamWOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true, platform::errors::InvalidArgument( "The Var(%s)'s type should be LoDTensor, " "but the received is %s", ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; using MPDType = typename details::MPTypeTrait<T>::Type; int64_t min_row_size_to_use_multithread = ctx.Attr<int64_t>("min_row_size_to_use_multithread"); bool lazy_mode = ctx.Attr<bool>("lazy_mode"); bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow"); VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; MPDType coeff = static_cast<MPDType>(ctx.Attr<float>("coeff")); MPDType lr_ratio = static_cast<MPDType>(ctx.Attr<float>("lr_ratio")); auto* param = ctx.Input<LoDTensor>("Param"); auto* grad_var = ctx.InputVar("Grad"); auto* mom1 = ctx.Input<LoDTensor>("Moment1"); auto* mom2 = ctx.Input<LoDTensor>("Moment2"); auto* lr = ctx.Input<LoDTensor>("LearningRate"); auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow"); auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow"); auto* param_out = ctx.Output<LoDTensor>("ParamOut"); auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out"); auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out"); auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut"); auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut"); bool skip_update = false; if (ctx.HasInput("SkipUpdate")) { auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate"); PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(SkipUpdate) size must be 1, but get %d", skip_update_tensor->numel())); std::vector<bool> skip_update_vec; TensorToVector(*skip_update_tensor, ctx.device_context(), &skip_update_vec); skip_update = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update) { VLOG(4) << "Adamw skip update"; framework::TensorCopy( *param, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), param_out); framework::TensorCopy( *mom1, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom1_out); framework::TensorCopy( *mom2, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom2_out); framework::TensorCopy( *beta1_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta1_pow_out); framework::TensorCopy( *beta2_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta2_pow_out); return; } // if with_decay = false, coeff = 0 bool with_decay = ctx.Attr<bool>("with_decay"); if (!with_decay) { coeff = static_cast<float>(0.0); } MPDType beta1 = static_cast<MPDType>(ctx.Attr<float>("beta1")); if (ctx.HasInput("Beta1Tensor")) { auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor"); PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta1Tensor) size must be 1, but get %d", beta1_tensor->numel())); beta1 = static_cast<MPDType>(GetAttrFromTensor(beta1_tensor)); } MPDType beta2 = static_cast<MPDType>(ctx.Attr<float>("beta2")); if (ctx.HasInput("Beta2Tensor")) { auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor"); PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta2Tensor) size must be 1, but get %d", beta2_tensor->numel())); beta2 = static_cast<MPDType>(GetAttrFromTensor(beta2_tensor)); } MPDType epsilon = static_cast<MPDType>(ctx.Attr<float>("epsilon")); if (ctx.HasInput("EpsilonTensor")) { auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor"); PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(EpsilonTensor) size must be 1, but get %d", epsilon_tensor->numel())); epsilon = static_cast<MPDType>(GetAttrFromTensor(epsilon_tensor)); } VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel() << "beta2_pow.numel() : " << beta2_pow->numel(); VLOG(3) << "param.numel(): " << param->numel(); PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); const bool multi_precision = ctx.Attr<bool>("multi_precision"); const LoDTensor* master_param = nullptr; LoDTensor* master_param_out = nullptr; if (multi_precision) { bool has_master = ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut"); PADDLE_ENFORCE_EQ(has_master, true, platform::errors::InvalidArgument( "The Input(MasterParam) and Output(MasterParamOut) " "should not be null when " "the attr `multi_precision` is true")); master_param = ctx.Input<LoDTensor>("MasterParam"); master_param_out = ctx.Output<LoDTensor>("MasterParamOut"); } const MPDType* master_in_data = multi_precision ? master_param->data<MPDType>() : nullptr; MPDType* master_out_data = multi_precision ? master_param_out->mutable_data<MPDType>(ctx.GetPlace()) : nullptr; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); if (grad_var->IsType<framework::LoDTensor>()) { auto* grad = ctx.Input<LoDTensor>("Grad"); // update param and moment int threads = 512; int blocks = (param->numel() + threads - 1) / threads; if (beta1_pow->place() == platform::CPUPlace() && beta2_pow->place() == platform::CPUPlace()) { // Compute with betapow in REG hipLaunchKernelGGL(( AdamWKernelREG<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), beta1, beta2, epsilon, coeff, lr_ratio, *beta1_pow->data<MPDType>(), *beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad->data<T>(), param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, param->numel()); if (!use_global_beta_pow) { // Cpu update beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta1 * beta1_pow->data<MPDType>()[0]; beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta2 * beta2_pow->data<MPDType>()[0]; } } else { hipLaunchKernelGGL(( AdamWKernelMEM<T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), beta1, beta2, epsilon, coeff, lr_ratio, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad->data<T>(), param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, param->numel()); if (!use_global_beta_pow) { // Update with gpu hipLaunchKernelGGL(( UpdateAdamWBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(), beta1, beta2, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()), beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace())); } } } else if (grad_var->IsType<framework::SelectedRows>()) { auto* grad = ctx.Input<framework::SelectedRows>("Grad"); if (grad->rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } framework::SelectedRows tmp_grad_merge; const framework::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; merge_func(ctx.template device_context<platform::CUDADeviceContext>(), *grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); if (beta1_pow->place() == platform::CPUPlace() && beta2_pow->place() == platform::CPUPlace()) { int threads = 512; int ndim = param->numel(); int blocks = (ndim + threads - 1) / threads; hipLaunchKernelGGL(( SparseAdamWCUDAKernelREG< T, MPDType>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), beta1, beta2, epsilon, coeff, lr_ratio, *beta1_pow->data<MPDType>(), *beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode, ndim); if (!use_global_beta_pow) { // Update with cpu beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta1 * beta1_pow->data<MPDType>()[0]; beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta2 * beta2_pow->data<MPDType>()[0]; } } else { SparseAdamWFunctor<T, GPUAdamW, MPDType> functor( beta1, beta2, epsilon, coeff, lr_ratio, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode); // FIXME(minqiyang): remove BinarySearch in GPU later platform::ForRange<platform::CUDADeviceContext> for_range( static_cast<const platform::CUDADeviceContext&>( ctx.device_context()), param->numel()); for_range(functor); if (!use_global_beta_pow) { // update beta1 and beta2 hipLaunchKernelGGL(( UpdateAdamWBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(), beta1, beta2, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()), beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace())); } } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Variable type not supported by adamw_op")); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(adamw, ops::AdamWOpCUDAKernel<float>, ops::AdamWOpCUDAKernel<double>, ops::AdamWOpCUDAKernel<plat::float16>);
87253fcc659a913d45f3249a0dc4ad376f88a114.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/optimizers/adamw_op.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace operators { template <typename T, typename MT> __global__ void AdamWKernelREG(MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, MT beta1_pow_, MT beta2_pow_, const MT* moment1, MT* moment1_out, const MT* moment2, MT* moment2_out, const MT* lr_, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, int ndim) { MT lr = *lr_ * lr_ratio; MT beta1_pow = beta1_pow_; MT beta2_pow = beta2_pow_; int id = blockIdx.x * blockDim.x + threadIdx.x; for (; id < ndim; id += gridDim.x * blockDim.x) { MT p = master_param ? master_param[id] : static_cast<MT>(param[id]); MT g = static_cast<MT>(grad[id]); MT mom1 = static_cast<MT>(moment1[id]); MT mom2 = static_cast<MT>(moment2[id]); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); moment1_out[id] = mom1; moment2_out[id] = mom2; param_out[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } template <typename T, typename MT> __global__ void AdamWKernelMEM( MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, const MT* beta1_pow_, const MT* beta2_pow_, const MT* moment1, MT* moment1_out, const MT* moment2, MT* moment2_out, const MT* lr_, const T* grad, const T* param, T* param_out, const MT* master_param, MT* master_param_out, int ndim) { MT lr = *lr_ * lr_ratio; MT beta1_pow = *beta1_pow_; MT beta2_pow = *beta2_pow_; int id = blockIdx.x * blockDim.x + threadIdx.x; for (; id < ndim; id += gridDim.x * blockDim.x) { MT p = master_param ? master_param[id] : static_cast<MT>(param[id]); MT g = static_cast<MT>(grad[id]); MT mom1 = static_cast<MT>(moment1[id]); MT mom2 = static_cast<MT>(moment2[id]); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); moment1_out[id] = mom1; moment2_out[id] = mom2; param_out[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } template <typename T> __global__ void UpdateAdamWBetaPow(T beta1, T beta2, const T* beta1_pow_, const T* beta2_pow_, T* beta1_pow_out, T* beta2_pow_out) { *beta1_pow_out = beta1 * beta1_pow_[0]; *beta2_pow_out = beta2 * beta2_pow_[0]; } template <typename T, typename MT> __global__ void SparseAdamWCUDAKernelREG( MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, const MT beta1_pow, const MT beta2_pow, const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_, const MT* lr_, const T* grad_, const T* param_, T* param_out_, const MT* master_param, MT* master_param_out, const int64_t* rows_, int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) { int id = blockIdx.x * blockDim.x + threadIdx.x; MT lr = *lr_ * lr_ratio; for (; id < ndim; id += blockDim.x * gridDim.x) { auto row_idx = math::BinarySearch<int64_t>(rows_, row_count, id / row_numel); if (lazy_mode && row_idx < 0) { return; } else { MT mom1 = static_cast<MT>(mom1_[id]); MT mom2 = static_cast<MT>(mom2_[id]); MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]); MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel]) : static_cast<MT>(0); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); // Write back to global memory mom1_out_[id] = mom1; mom2_out_[id] = mom2; param_out_[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } } template <typename T> class AdamWOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const auto* param_var = ctx.InputVar("Param"); PADDLE_ENFORCE_EQ(param_var->IsType<framework::LoDTensor>(), true, platform::errors::InvalidArgument( "The Var(%s)'s type should be LoDTensor, " "but the received is %s", ctx.InputNames("Param").front(), framework::ToTypeName(param_var->Type()))); using paddle::framework::LoDTensor; using MPDType = typename details::MPTypeTrait<T>::Type; int64_t min_row_size_to_use_multithread = ctx.Attr<int64_t>("min_row_size_to_use_multithread"); bool lazy_mode = ctx.Attr<bool>("lazy_mode"); bool use_global_beta_pow = ctx.Attr<bool>("use_global_beta_pow"); VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; MPDType coeff = static_cast<MPDType>(ctx.Attr<float>("coeff")); MPDType lr_ratio = static_cast<MPDType>(ctx.Attr<float>("lr_ratio")); auto* param = ctx.Input<LoDTensor>("Param"); auto* grad_var = ctx.InputVar("Grad"); auto* mom1 = ctx.Input<LoDTensor>("Moment1"); auto* mom2 = ctx.Input<LoDTensor>("Moment2"); auto* lr = ctx.Input<LoDTensor>("LearningRate"); auto* beta1_pow = ctx.Input<LoDTensor>("Beta1Pow"); auto* beta2_pow = ctx.Input<LoDTensor>("Beta2Pow"); auto* param_out = ctx.Output<LoDTensor>("ParamOut"); auto* mom1_out = ctx.Output<LoDTensor>("Moment1Out"); auto* mom2_out = ctx.Output<LoDTensor>("Moment2Out"); auto* beta1_pow_out = ctx.Output<LoDTensor>("Beta1PowOut"); auto* beta2_pow_out = ctx.Output<LoDTensor>("Beta2PowOut"); bool skip_update = false; if (ctx.HasInput("SkipUpdate")) { auto* skip_update_tensor = ctx.Input<framework::Tensor>("SkipUpdate"); PADDLE_ENFORCE_EQ(skip_update_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(SkipUpdate) size must be 1, but get %d", skip_update_tensor->numel())); std::vector<bool> skip_update_vec; TensorToVector(*skip_update_tensor, ctx.device_context(), &skip_update_vec); skip_update = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update) { VLOG(4) << "Adamw skip update"; framework::TensorCopy( *param, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), param_out); framework::TensorCopy( *mom1, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom1_out); framework::TensorCopy( *mom2, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), mom2_out); framework::TensorCopy( *beta1_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta1_pow_out); framework::TensorCopy( *beta2_pow, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), beta2_pow_out); return; } // if with_decay = false, coeff = 0 bool with_decay = ctx.Attr<bool>("with_decay"); if (!with_decay) { coeff = static_cast<float>(0.0); } MPDType beta1 = static_cast<MPDType>(ctx.Attr<float>("beta1")); if (ctx.HasInput("Beta1Tensor")) { auto* beta1_tensor = ctx.Input<framework::Tensor>("Beta1Tensor"); PADDLE_ENFORCE_EQ(beta1_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta1Tensor) size must be 1, but get %d", beta1_tensor->numel())); beta1 = static_cast<MPDType>(GetAttrFromTensor(beta1_tensor)); } MPDType beta2 = static_cast<MPDType>(ctx.Attr<float>("beta2")); if (ctx.HasInput("Beta2Tensor")) { auto* beta2_tensor = ctx.Input<framework::Tensor>("Beta2Tensor"); PADDLE_ENFORCE_EQ(beta2_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(Beta2Tensor) size must be 1, but get %d", beta2_tensor->numel())); beta2 = static_cast<MPDType>(GetAttrFromTensor(beta2_tensor)); } MPDType epsilon = static_cast<MPDType>(ctx.Attr<float>("epsilon")); if (ctx.HasInput("EpsilonTensor")) { auto* epsilon_tensor = ctx.Input<framework::Tensor>("EpsilonTensor"); PADDLE_ENFORCE_EQ(epsilon_tensor->numel(), 1, platform::errors::InvalidArgument( "Input(EpsilonTensor) size must be 1, but get %d", epsilon_tensor->numel())); epsilon = static_cast<MPDType>(GetAttrFromTensor(epsilon_tensor)); } VLOG(3) << "beta1_pow.numel() : " << beta1_pow->numel() << "beta2_pow.numel() : " << beta2_pow->numel(); VLOG(3) << "param.numel(): " << param->numel(); PADDLE_ENFORCE_EQ(beta1_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ(beta2_pow_out->numel(), 1, platform::errors::InvalidArgument( "beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); const bool multi_precision = ctx.Attr<bool>("multi_precision"); const LoDTensor* master_param = nullptr; LoDTensor* master_param_out = nullptr; if (multi_precision) { bool has_master = ctx.HasInput("MasterParam") && ctx.HasOutput("MasterParamOut"); PADDLE_ENFORCE_EQ(has_master, true, platform::errors::InvalidArgument( "The Input(MasterParam) and Output(MasterParamOut) " "should not be null when " "the attr `multi_precision` is true")); master_param = ctx.Input<LoDTensor>("MasterParam"); master_param_out = ctx.Output<LoDTensor>("MasterParamOut"); } const MPDType* master_in_data = multi_precision ? master_param->data<MPDType>() : nullptr; MPDType* master_out_data = multi_precision ? master_param_out->mutable_data<MPDType>(ctx.GetPlace()) : nullptr; auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); if (grad_var->IsType<framework::LoDTensor>()) { auto* grad = ctx.Input<LoDTensor>("Grad"); // update param and moment int threads = 512; int blocks = (param->numel() + threads - 1) / threads; if (beta1_pow->place() == platform::CPUPlace() && beta2_pow->place() == platform::CPUPlace()) { // Compute with betapow in REG AdamWKernelREG<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>( beta1, beta2, epsilon, coeff, lr_ratio, *beta1_pow->data<MPDType>(), *beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad->data<T>(), param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, param->numel()); if (!use_global_beta_pow) { // Cpu update beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta1 * beta1_pow->data<MPDType>()[0]; beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta2 * beta2_pow->data<MPDType>()[0]; } } else { AdamWKernelMEM<T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>( beta1, beta2, epsilon, coeff, lr_ratio, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad->data<T>(), param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, param->numel()); if (!use_global_beta_pow) { // Update with gpu UpdateAdamWBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>( beta1, beta2, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()), beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace())); } } } else if (grad_var->IsType<framework::SelectedRows>()) { auto* grad = ctx.Input<framework::SelectedRows>("Grad"); if (grad->rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad->rows().begin(), grad->rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } framework::SelectedRows tmp_grad_merge; const framework::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor scatter::MergeAdd<platform::CUDADeviceContext, T> merge_func; merge_func(ctx.template device_context<platform::CUDADeviceContext>(), *grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); const int64_t* rows = grad_merge.rows().Data(ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); if (beta1_pow->place() == platform::CPUPlace() && beta2_pow->place() == platform::CPUPlace()) { int threads = 512; int ndim = param->numel(); int blocks = (ndim + threads - 1) / threads; SparseAdamWCUDAKernelREG< T, MPDType><<<blocks, threads, 0, dev_ctx.stream()>>>( beta1, beta2, epsilon, coeff, lr_ratio, *beta1_pow->data<MPDType>(), *beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode, ndim); if (!use_global_beta_pow) { // Update with cpu beta1_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta1 * beta1_pow->data<MPDType>()[0]; beta2_pow_out->mutable_data<MPDType>(platform::CPUPlace())[0] = beta2 * beta2_pow->data<MPDType>()[0]; } } else { SparseAdamWFunctor<T, GPUAdamW, MPDType> functor( beta1, beta2, epsilon, coeff, lr_ratio, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), mom1->data<MPDType>(), mom1_out->mutable_data<MPDType>(ctx.GetPlace()), mom2->data<MPDType>(), mom2_out->mutable_data<MPDType>(ctx.GetPlace()), lr->data<MPDType>(), grad_data, param->data<T>(), param_out->mutable_data<T>(ctx.GetPlace()), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode); // FIXME(minqiyang): remove BinarySearch in GPU later platform::ForRange<platform::CUDADeviceContext> for_range( static_cast<const platform::CUDADeviceContext&>( ctx.device_context()), param->numel()); for_range(functor); if (!use_global_beta_pow) { // update beta1 and beta2 UpdateAdamWBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>( beta1, beta2, beta1_pow->data<MPDType>(), beta2_pow->data<MPDType>(), beta1_pow_out->mutable_data<MPDType>(ctx.GetPlace()), beta2_pow_out->mutable_data<MPDType>(ctx.GetPlace())); } } } else { PADDLE_THROW(platform::errors::InvalidArgument( "Variable type not supported by adamw_op")); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL(adamw, ops::AdamWOpCUDAKernel<float>, ops::AdamWOpCUDAKernel<double>, ops::AdamWOpCUDAKernel<plat::float16>);
0bcf95d33c935a5e3844e9b93b385e847292f9e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void mandel(int *disp_width, int *disp_height, int *array, int *max_iter) { double scale_real, scale_imag; double x, y, u, v, u2, v2; int i,j,iter; scale_real = 3.5 / (double)*disp_width; scale_imag = 3.5 / (double)*disp_height; int tidx = blockIdx.x * blockDim.x + threadIdx.x; int tidy = blockIdx.y * blockDim.y + threadIdx.y; if (tidx >= *disp_width || tidy >= *disp_height) return; //for(i = 0; i < disp_width; i++) { x = ((double)tidx * scale_real) - 2.25; //x = ((double)i * scale_real) - 2.25; // for(j = 0; j < *disp_height; j++) { y = ((double)tidy * scale_imag) - 1.75; //y = ((double)j * scale_imag) - 1.75; u = 0.0; v = 0.0; u2 = 0.0; v2 = 0.0; iter = 0; while ( u2 + v2 < 4.0 && iter < *max_iter ) { v = 2 * v * u + y; u = u2 - v2 + x; u2 = u*u; v2 = v*v; iter = iter + 1; } // if we exceed max_iter, reset to zero iter = iter == *max_iter ? 0 : iter; // array[i*(*disp_height)+ j] = iter; array[tidx*(*disp_height) + tidy] = iter; //array[tidx*(*disp_height) + j] = iter; //} //} }
0bcf95d33c935a5e3844e9b93b385e847292f9e7.cu
#include <stdio.h> __global__ void mandel(int *disp_width, int *disp_height, int *array, int *max_iter) { double scale_real, scale_imag; double x, y, u, v, u2, v2; int i,j,iter; scale_real = 3.5 / (double)*disp_width; scale_imag = 3.5 / (double)*disp_height; int tidx = blockIdx.x * blockDim.x + threadIdx.x; int tidy = blockIdx.y * blockDim.y + threadIdx.y; if (tidx >= *disp_width || tidy >= *disp_height) return; //for(i = 0; i < disp_width; i++) { x = ((double)tidx * scale_real) - 2.25; //x = ((double)i * scale_real) - 2.25; // for(j = 0; j < *disp_height; j++) { y = ((double)tidy * scale_imag) - 1.75; //y = ((double)j * scale_imag) - 1.75; u = 0.0; v = 0.0; u2 = 0.0; v2 = 0.0; iter = 0; while ( u2 + v2 < 4.0 && iter < *max_iter ) { v = 2 * v * u + y; u = u2 - v2 + x; u2 = u*u; v2 = v*v; iter = iter + 1; } // if we exceed max_iter, reset to zero iter = iter == *max_iter ? 0 : iter; // array[i*(*disp_height)+ j] = iter; array[tidx*(*disp_height) + tidy] = iter; //array[tidx*(*disp_height) + j] = iter; //} //} }
bb05bfe93edd21022ae336e8291434f16123643e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <stdio.h> # include <math.h> # include <sys/time.h> # define N 1000000 # define RADIUS 100 # define THREADS 32 __global__ void QuarterAreaOfCircle ( float *area , float *start, float *end){ //int i = blockDim.x*blockIdx.x+threadIdx.x; int i = 0; float threadStartX; float x, dx; float segmentArea; // x starting value of each block threadStartX = ((float)RADIUS/(float)blockDim.x); start[threadIdx.x] = (float)threadIdx.x * threadStartX; x = start[threadIdx.x]; end[threadIdx.x] = x; // increasing value of x dx = (float)RADIUS/(float)N; // calculate segment area for(i = 0; i < ((float)N/(float)blockDim.x); i++){ x += dx; segmentArea += sqrt(fabs((float)RADIUS*(float)RADIUS-x*x)) * dx; } end[threadIdx.x] = x; area[threadIdx.x] = segmentArea; } int main(int argc, char *argv[]) { float *reduceArea_d, reduceArea[THREADS], Area = 0; float *start_d, start[THREADS]; float *end_d, end[THREADS]; int i; dim3 dimBlock(THREADS); dim3 dimGrid(1); for( i = 0; i < dimBlock.x; i++){ reduceArea[i] = 0; } hipMalloc( (void**) &reduceArea_d, sizeof(float) * THREADS ); hipMalloc( (void**) &start_d, sizeof(float) * THREADS ); hipMalloc( (void**) &end_d, sizeof(float) * THREADS ); hipLaunchKernelGGL(( QuarterAreaOfCircle), dim3(dimGrid), dim3(dimBlock), 0, 0, reduceArea_d, start_d, end_d); hipMemcpy(reduceArea, reduceArea_d, sizeof(float)*dimBlock.x, hipMemcpyDeviceToHost); hipMemcpy(start, start_d, sizeof(float)*dimBlock.x, hipMemcpyDeviceToHost); hipMemcpy(end, end_d, sizeof(float)*dimBlock.x, hipMemcpyDeviceToHost); for(i = 0; i < dimBlock.x; i++){ Area += reduceArea[i]; printf("reduced area : %5.10f , grid : %d, area : %5.10f, sart : %5.10f, end : %5.10f \n", reduceArea[i], i, Area, start[i], end[i]); } printf("area : %5.10f \n",Area*4); hipFree(reduceArea_d); hipFree(start_d); hipFree(end_d); }
bb05bfe93edd21022ae336e8291434f16123643e.cu
# include <stdio.h> # include <math.h> # include <sys/time.h> # define N 1000000 # define RADIUS 100 # define THREADS 32 __global__ void QuarterAreaOfCircle ( float *area , float *start, float *end){ //int i = blockDim.x*blockIdx.x+threadIdx.x; int i = 0; float threadStartX; float x, dx; float segmentArea; // x starting value of each block threadStartX = ((float)RADIUS/(float)blockDim.x); start[threadIdx.x] = (float)threadIdx.x * threadStartX; x = start[threadIdx.x]; end[threadIdx.x] = x; // increasing value of x dx = (float)RADIUS/(float)N; // calculate segment area for(i = 0; i < ((float)N/(float)blockDim.x); i++){ x += dx; segmentArea += sqrt(fabs((float)RADIUS*(float)RADIUS-x*x)) * dx; } end[threadIdx.x] = x; area[threadIdx.x] = segmentArea; } int main(int argc, char *argv[]) { float *reduceArea_d, reduceArea[THREADS], Area = 0; float *start_d, start[THREADS]; float *end_d, end[THREADS]; int i; dim3 dimBlock(THREADS); dim3 dimGrid(1); for( i = 0; i < dimBlock.x; i++){ reduceArea[i] = 0; } cudaMalloc( (void**) &reduceArea_d, sizeof(float) * THREADS ); cudaMalloc( (void**) &start_d, sizeof(float) * THREADS ); cudaMalloc( (void**) &end_d, sizeof(float) * THREADS ); QuarterAreaOfCircle<<<dimGrid, dimBlock>>>(reduceArea_d, start_d, end_d); cudaMemcpy(reduceArea, reduceArea_d, sizeof(float)*dimBlock.x, cudaMemcpyDeviceToHost); cudaMemcpy(start, start_d, sizeof(float)*dimBlock.x, cudaMemcpyDeviceToHost); cudaMemcpy(end, end_d, sizeof(float)*dimBlock.x, cudaMemcpyDeviceToHost); for(i = 0; i < dimBlock.x; i++){ Area += reduceArea[i]; printf("reduced area : %5.10f , grid : %d, area : %5.10f, sart : %5.10f, end : %5.10f \n", reduceArea[i], i, Area, start[i], end[i]); } printf("area : %5.10f \n",Area*4); cudaFree(reduceArea_d); cudaFree(start_d); cudaFree(end_d); }
92ae2a91e03031f9961606e355aecf039cc9644a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Viola-Jones Object Detection Framework // // The algorithm and code are explained in the upcoming GPU Computing Gems // chapter in detail: // // Anton Obukhov, "Haar Classifiers for Object Detection with CUDA" // PDF URL placeholder // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov. // //////////////////////////////////////////////////////////////////////////////// #include <algorithm> #include <cstdio> #include "opencv2/cudev.hpp" #include "opencv2/core/persistence.hpp" #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_OBJDETECT # include "opencv2/objdetect.hpp" //# include "opencv2/objdetect/objdetect_c.h" #endif #include "opencv2/cudalegacy/NCV.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp" #include "NCVRuntimeTemplates.hpp" #include "NCVAlg.hpp" //============================================================================== // // HaarClassifierCascade file // //============================================================================== const Ncv32u MAX_GRID_DIM = 65535; const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64; #define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6 #define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2) /** \internal * Haar features solid array. */ texture<uint2, 1, hipReadModeElementType> texHaarFeatures; /** \internal * Haar classifiers flattened trees container. * Two parts: first contains root nodes, second - nodes that are referred by root nodes. * Drawback: breaks tree locality (might cause more cache misses * Advantage: No need to introduce additional 32-bit field to index root nodes offsets */ texture<uint4, 1, hipReadModeElementType> texHaarClassifierNodes; texture<Ncv32u, 1, hipReadModeElementType> texIImage; __device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages) { return d_Stages[iStage]; } template <NcvBool tbCacheTextureCascade> __device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes) { HaarClassifierNode128 tmpNode; if (tbCacheTextureCascade) { tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode); } else { tmpNode = d_ClassifierNodes[iNode]; } return tmpNode; } template <NcvBool tbCacheTextureCascade> __device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features, Ncv32f *weight, Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight) { HaarFeature64 feature; if (tbCacheTextureCascade) { feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature); } else { feature = d_Features[iFeature]; } feature.getRect(rectX, rectY, rectWidth, rectHeight); *weight = feature.getWeight(); } template <NcvBool tbCacheTextureIImg> __device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg) { if (tbCacheTextureIImg) { return tex1Dfetch(texIImage, x); } else { return d_IImg[x]; } } __device__ Ncv32u d_outMaskPosition; __device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 __shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL]; __shared__ Ncv32u numPassed; __shared__ Ncv32u outMaskOffset; Ncv32u incScan = cv::cudev::blockScanInclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem, threadIdx.x); __syncthreads(); if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1) { numPassed = incScan; outMaskOffset = atomicAdd(&d_outMaskPosition, incScan); } if (threadPassFlag) { Ncv32u excScan = incScan - threadPassFlag; shmem[excScan] = threadElem; } __syncthreads(); if (threadIdx.x < numPassed) { vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x]; } #endif } template <NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u y_offs; Ncv32u x_offs; Ncv32u maskOffset; Ncv32u outMaskVal; NcvBool bInactiveThread = false; if (tbReadPixelIndexFromVector) { maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (maskOffset >= mask1Dlen) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { outMaskVal = d_inMask[maskOffset]; y_offs = outMaskVal >> 16; x_offs = outMaskVal & 0xFFFF; } } else { y_offs = blockIdx.y; x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (x_offs >= mask2Dstride) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { maskOffset = y_offs * mask2Dstride + x_offs; if ((x_offs >= anchorsRoi.width) || (!tbInitMaskPositively && d_inMask != d_outMask && d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U)) { if (tbDoAtomicCompaction) { bInactiveThread = true; } else { d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U; return; } } outMaskVal = (y_offs << 16) | x_offs; } } NcvBool bPass = true; if (!tbDoAtomicCompaction || tbDoAtomicCompaction) { Ncv32f pixelStdDev = 0.0f; if (!bInactiveThread) pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++) { Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset(); Ncv32f stageThreshold = curStage.getStageThreshold(); while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u iNode = curRootNodeOffset; if (bPass && !bInactiveThread) { while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset++; } if (curStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; } } } __syncthreads(); if (!tbDoAtomicCompaction) { if (!tbReadPixelIndexFromVector || (tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask))) { d_outMask[maskOffset] = outMaskVal; } } else { compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread, outMaskVal, d_outMask); } } template <NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x; if (maskOffset >= mask1Dlen) { return; } Ncv32u outMaskVal = d_inMask[maskOffset]; Ncv32u y_offs = outMaskVal >> 16; Ncv32u x_offs = outMaskVal & 0xFFFF; Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; NcvBool bPass = true; for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++) { //this variable is subject to reduction Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x; Ncv32f stageThreshold = curStage.getStageThreshold(); Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2; for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++) { NcvBool bMoreNodesToTraverse = true; if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage) { Ncv32u iNode = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; //TODO: fetch into shmem if size suffices. Shmem can be shared with reduce for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL; } Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum); if (finalStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } if (!tbDoAtomicCompaction) { if (!bPass || d_inMask != d_outMask) { if (!threadIdx.x) { d_outMask[maskOffset] = outMaskVal; } } } else { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 if (bPass && !threadIdx.x) { Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1); d_outMask[outMaskOffset] = outMaskVal; } #endif } } template <NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction> __global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { Ncv32u y_offs = blockIdx.y; Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs; Ncv32u y_offs_upsc = step * y_offs; Ncv32u x_offs_upsc = step * x_offs; Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc; Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U; if (x_offs_upsc < anchorsRoi.width && (!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U)) { outElem = (y_offs_upsc << 16) | x_offs_upsc; } if (!tbDoAtomicCompaction) { d_outMask[outMaskOffset] = outElem; } else { compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U, outElem, d_outMask); } } struct applyHaarClassifierAnchorParallelFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { CV_UNUSED(tl); hipLaunchKernelGGL(( applyHaarClassifierAnchorParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value, Loki::TL::TypeAt<TList, 3>::Result::value, Loki::TL::TypeAt<TList, 4>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor> ::call( &functor, tbInitMaskPositively, tbCacheTextureIImg, tbCacheTextureCascade, tbReadPixelIndexFromVector, tbDoAtomicCompaction); } struct applyHaarClassifierClassifierParallelFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { CV_UNUSED(tl); hipLaunchKernelGGL(( applyHaarClassifierClassifierParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor> ::call( &functor, tbCacheTextureIImg, tbCacheTextureCascade, tbDoAtomicCompaction); } struct initializeMaskVectorFunctor { dim3 gridConf, blockConf; hipStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u step; //Arguments are passed through the constructor initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, hipStream_t _cuStream, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _step) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), step(_step) {} template<class TList> void call(TList tl) { CV_UNUSED(tl); hipLaunchKernelGGL(( initializeMaskVector < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value >) , dim3(gridConf), dim3(blockConf), 0, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); } }; void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, hipStream_t cuStream, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor> ::call( &functor, tbMaskByInmask, tbDoAtomicCompaction); } Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages) { Ncv32u i = 0; for (; i<haar.NumStages; i++) { if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N) { break; } } return i; } NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral, NCVMatrix<Ncv32f> &d_weights, NCVMatrixAlloc<Ncv32u> &d_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea, INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, hipDeviceProp_t &devProp, hipStream_t cuStream) { ncvAssertReturn(integral.memType() == d_weights.memType()&& integral.memType() == d_pixelMask.memType() && integral.memType() == gpuAllocator.memType() && (integral.memType() == NCVMemoryTypeDevice || integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height && d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height && integral.width() >= anchorsRoi.width + haar.ClassifierSize.width && integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); #if defined _SELF_TEST_ NCVStatus ncvStat; NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch); ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch); ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length); ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length); ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN ncvStat = d_pixelMask.copySolid(h_pixelMask, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = integral.copySolid(h_integralImage, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_weights.copySolid(h_weights, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(0), NCV_CUDA_ERROR); for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++) { for (Ncv32u j=0; j<d_pixelMask.stride(); j++) { if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width)) { if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U) { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j; } } else { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U; } } } NCV_SKIP_COND_END #endif NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride()); ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length())); ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2); ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); Ncv32u *hp_zero = &hp_pool32u.ptr()[0]; Ncv32u *hp_numDet = &hp_pool32u.ptr()[1]; NCV_SKIP_COND_BEGIN *hp_zero = 0; *hp_numDet = 0; NCV_SKIP_COND_END Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); NcvBool bTexCacheCascade = devProp.major < 2; NcvBool bTexCacheIImg = true; //this works better even on Fermi so far NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3); NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask; NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp; Ncv32u szNppCompactTmpBuf; nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp); if (bDoAtomicCompaction) { szNppCompactTmpBuf = 0; } NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf); NCV_SKIP_COND_BEGIN if (bTexCacheIImg) { hipChannelFormatDesc cfdTexIImage; cfdTexIImage = hipCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage, (anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } if (bTexCacheCascade) { hipChannelFormatDesc cfdTexHaarFeatures; hipChannelFormatDesc cfdTexHaarClassifierNodes; cfdTexHaarFeatures = hipCreateChannelDesc<uint2>(); cfdTexHaarClassifierNodes = hipCreateChannelDesc<uint4>(); size_t alignmentOffset; ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarFeatures, d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(hipBindTexture(&alignmentOffset, texHaarClassifierNodes, d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } Ncv32u stageStartAnchorParallel = 0; Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL, haar, h_HaarStages); Ncv32u stageEndClassifierParallel = haar.NumStages; if (stageMiddleSwitch == 0) { stageMiddleSwitch = 1; } //create stages subdivision for pixel-parallel processing const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1; Ncv32u curStop = stageStartAnchorParallel; std::vector<Ncv32u> pixParallelStageStops; while (curStop < stageMiddleSwitch) { pixParallelStageStops.push_back(curStop); curStop += compactEveryNstage; } if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2) { pixParallelStageStops[pixParallelStageStops.size()-1] = (stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2; } pixParallelStageStops.push_back(stageMiddleSwitch); Ncv32u pixParallelStageStopsIndex = 0; if (pixelStep != 1 || bMaskElements) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), (anchorsRoi.height + pixelStep - 1) / pixelStep); dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL); if (gridInit.x == 0 || gridInit.y == 0) { numDetections = 0; return NCV_SUCCESS; } initializeMaskVectorDynTemplate(bMaskElements, bDoAtomicCompaction, gridInit, blockInit, cuStream, d_ptrNowData->ptr(), d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(), anchorsRoi, pixelStep); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); swap(d_ptrNowData, d_ptrNowTmp); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR); } numDetections = *hp_numDet; } else { // // 1. Run the first pixel-input pixel-parallel classifier for few stages // if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), anchorsRoi.height); dim3 block1(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( true, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid1, block1, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), 0, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; pixParallelStageStopsIndex++; } // // 2. Run pixel-parallel stages // for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++) { if (numDetections == 0) { break; } if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL); if (numDetections > MAX_GRID_DIM) { grid2.x = MAX_GRID_DIM; grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block2(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( false, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid2, block2, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } // // 3. Run all left stages in one stage-parallel kernel // if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, hipMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid3(numDetections); if (numDetections > MAX_GRID_DIM) { grid3.x = MAX_GRID_DIM; grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL); applyHaarClassifierClassifierParallelDynTemplate( bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade bDoAtomicCompaction, //tbDoAtomicCompaction grid3, block3, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, stageMiddleSwitch, stageEndClassifierParallel, scaleAreaPixels); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, hipMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } if (d_ptrNowData != &d_vecPixelMask) { d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } #if defined _SELF_TEST_ ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections); } Ncv32u fpu_oldcw, fpu_cw; _controlfp_s(&fpu_cw, 0, 0); fpu_oldcw = fpu_cw; _controlfp_s(&fpu_cw, _PC_24, _MCW_PC); Ncv32u numDetGold; ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, bMaskElements, anchorsRoi, pixelStep, scaleArea); ncvAssertReturnNcvStat(ncvStat); _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC); bool bPass = true; if (numDetGold != numDetections) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections); bPass = false; } else { for (Ncv32u i=0; i<::max(numDetGold, numDetections) && bPass; i++) { if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i]) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]); bPass = false; } } } printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED"); #endif NCV_SKIP_COND_END return NCV_SUCCESS; } //============================================================================== // // HypothesesOperations file // //============================================================================== const Ncv32u NUM_GROW_THREADS = 128; __device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale) { NcvRect32u res; res.x = (Ncv32u)(scale * (pixel & 0xFFFF)); res.y = (Ncv32u)(scale * (pixel >> 16)); res.width = (Ncv32u)(scale * width); res.height = (Ncv32u)(scale * height); return res; } __global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements, NcvRect32u *hypotheses, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x; if (elemAddr >= numElements) { return; } hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale); } NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale, hipStream_t cuStream) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } dim3 block(NUM_GROW_THREADS); dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } hipLaunchKernelGGL(( growDetectionsKernel), dim3(grid), dim3(block), 0, cuStream, pixelMask.ptr(), numDetsToCopy, hypotheses.ptr() + totalDetections, rectWidth, rectHeight, curScale); ncvAssertCUDAReturn(hipGetLastError(), NCV_CUDA_ERROR); totalDetections += numDetsToCopy; return ncvStat; } //============================================================================== // // Pipeline file // //============================================================================== NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg, NcvSize32u srcRoi, NCVVector<NcvRect32u> &d_dstRects, Ncv32u &dstNumRects, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvSize32u minObjSize, Ncv32u minNeighbors, //default 4 Ncv32f scaleStep, //default 1.2f Ncv32u pixelStep, //default 1 Ncv32u flags, //default NCVPipeObjDet_Default INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, hipDeviceProp_t &devProp, hipStream_t cuStream) { ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() && d_srcImg.memType() == gpuAllocator.memType() && (d_srcImg.memType() == NCVMemoryTypeDevice || d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 && d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height && srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height && d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); //TODO: set NPP active stream to cuStream NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); Ncv32u integralWidth = d_srcImg.width() + 1; Ncv32u integralHeight = d_srcImg.height() + 1; NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVStatus nppStat; Ncv32u szTmpBufIntegral, szTmpBufSqIntegral; nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, ::max(szTmpBufIntegral, szTmpBufSqIntegral)); ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), integral.ptr(), integral.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END dstNumRects = 0; Ncv32u lastCheckedScale = 0; NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0); std::vector<Ncv32u> scalesVector; NcvBool bFoundLargestFace = false; for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep) { Ncv32u scale = (Ncv32u)scaleIter; if (lastCheckedScale == scale) { continue; } lastCheckedScale = scale; if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width || haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height) { continue; } NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRo_i.width = srcRoi_.width + 1; srcIIRo_i.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRo_i.width / scale; scaledIIRoi.height = srcIIRo_i.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; if (searchRoi.width <= 0 || searchRoi.height <= 0) { break; } scalesVector.push_back(scale); if (gpuAllocator.isCounting()) { break; } } if (bReverseTraverseScale) { std::reverse(scalesVector.begin(), scalesVector.end()); } //TODO: handle _fair_scale_ flag for (Ncv32u i=0; i<scalesVector.size(); i++) { Ncv32u scale = scalesVector[i]; NcvSize32u srcRoi_, scaledIIRoi, searchRoi; NcvSize32u srcIIRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRoi.width = srcRoi_.width + 1; srcIIRoi.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRoi.width / scale; scaledIIRoi.height = srcIIRoi.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; NCV_SKIP_COND_BEGIN nppStat = nppiStDecimate_32u_C1R( integral.ptr(), integral.pitch(), d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStDecimate_64u_C1R( d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); const NcvRect32u rect( HAAR_STDDEV_BORDER, HAAR_STDDEV_BORDER, haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER, haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER); nppStat = nppiStRectStdDev_32f_C1R( d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), d_rectStdDev.ptr(), d_rectStdDev.pitch(), NcvSize32u(searchRoi.width, searchRoi.height), rect, (Ncv32f)scale*scale, true); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END Ncv32u detectionsOnThisScale; ncvStat = ncvApplyHaarClassifierCascade_device( d_scaledIntegralImage, d_rectStdDev, d_pixelMask, detectionsOnThisScale, haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false, searchRoi, pixelStep, (Ncv32f)scale*scale, gpuAllocator, cpuAllocator, devProp, cuStream); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_BEGIN NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment()); ncvStat = ncvGrowDetectionsVector_device( d_vecPixelMask, detectionsOnThisScale, d_hypothesesIntermediate, dstNumRects, static_cast<Ncv32u>(d_hypothesesIntermediate.length()), haar.ClassifierSize.width, haar.ClassifierSize.height, (Ncv32f)scale, cuStream); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); if (flags & NCVPipeObjDet_FindLargestObject) { if (dstNumRects == 0) { continue; } if (dstNumRects != 0) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } Ncv32u numStrongHypothesesNow = dstNumRects; ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, numStrongHypothesesNow, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (numStrongHypothesesNow > 0) { NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0]; for (Ncv32u j=1; j<numStrongHypothesesNow; j++) { if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width) { maxRect = h_hypothesesIntermediate.ptr()[j]; } } h_hypothesesIntermediate.ptr()[0] = maxRect; dstNumRects = 1; ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); bFoundLargestFace = true; break; } } NCV_SKIP_COND_END if (gpuAllocator.isCounting()) { break; } } NCVStatus ncvRetCode = NCV_SUCCESS; NCV_SKIP_COND_BEGIN if (flags & NCVPipeObjDet_FindLargestObject) { if (!bFoundLargestFace) { dstNumRects = 0; } } else { //TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left) if (dstNumRects != 0) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); } ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, dstNumRects, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (dstNumRects > d_dstRects.length()) { ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; dstNumRects = static_cast<Ncv32u>(d_dstRects.length()); } if (dstNumRects != 0) { ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); } } if (flags & NCVPipeObjDet_VisualizeInPlace) { ncvAssertCUDAReturn(hipStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(), d_srcImg.width(), d_srcImg.height(), d_dstRects.ptr(), dstNumRects, 255, cuStream); } NCV_SKIP_COND_END return ncvRetCode; } //============================================================================== // // Purely Host code: classifier IO, mock-ups // //============================================================================== #ifdef _SELF_TEST_ #include <float.h> #endif NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage, NCVMatrix<Ncv32f> &h_weights, NCVMatrixAlloc<Ncv32u> &h_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea) { ncvAssertReturn(h_integralImage.memType() == h_weights.memType() && h_integralImage.memType() == h_pixelMask.memType() && (h_integralImage.memType() == NCVMemoryTypeHostPageable || h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() && h_HaarStages.memType() == h_HaarFeatures.memType() && (h_HaarStages.memType() == NCVMemoryTypeHostPageable || h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height && h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height && h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width && h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(h_HaarStages.length() >= haar.NumStages && h_HaarNodes.length() >= haar.NumClassifierTotalNodes && h_HaarFeatures.length() >= haar.NumFeatures && h_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); for (Ncv32u i=0; i<anchorsRoi.height; i++) { for (Ncv32u j=0; j<h_pixelMask.stride(); j++) { if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width) { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; } else { for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++) { Ncv32f curStageSum = 0.0f; Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset(); if (iStage == 0) { if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } else { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j); } } else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u curNodeOffset = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset]; HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures(); Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect]; Ncv32u rectX, rectY, rectWidth, rectHeight; feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight); Ncv32f rectWeight = feature.getWeight(); Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride(); Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL]; Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR]; Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL]; Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR]; Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR; curNodeVal += (Ncv32f)rectSum * rectWeight; } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { curNodeOffset = nextNodeDescriptor.getNextNodeOffset(); } } curRootNodeOffset++; } Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold(); if (curStageSum < tmpStageThreshold) { //drop h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } } } } std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride()); Ncv32u i = 0; for (; i<anchorsRoi.height * h_pixelMask.stride(); i++) { if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } } numDetections = i; return NCV_SUCCESS; } NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } for (Ncv32u i=0; i<numDetsToCopy; i++) { hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale); } totalDetections += numDetsToCopy; return ncvStat; } #define RECT_X_IDX 0 #define RECT_Y_IDX 1 #define RECT_W_IDX 2 #define RECT_H_IDX 3 #define RECT_WEIGHT_IDX 4 #define CUDA_CC_SIZE_W 0 #define CUDA_CC_SIZE_H 1 static NCVStatus loadFromXML(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { const char *CUDA_CC_SIZE = "size"; const char *CUDA_CC_STAGES = "stages"; const char *CUDA_CC_STAGE_THRESHOLD = "stage_threshold"; const char *CUDA_CC_TREES = "trees"; const char *CUDA_CC_FEATURE = "feature"; const char *CUDA_CC_RECT = "rects"; const char *CUDA_CC_TILTED = "tilted"; const char *CUDA_CC_THRESHOLD = "threshold"; const char *CUDA_CC_LEFT_VAL = "left_val"; const char *CUDA_CC_RIGHT_VAL = "right_val"; const char *CUDA_CC_LEFT_NODE = "left_node"; const char *CUDA_CC_RIGHT_NODE = "right_node"; NCVStatus ncvStat; haar.NumStages = 0; haar.NumClassifierRootNodes = 0; haar.NumClassifierTotalNodes = 0; haar.NumFeatures = 0; haar.ClassifierSize.width = 0; haar.ClassifierSize.height = 0; haar.bHasStumpsOnly = true; haar.bNeedsTiltedII = false; Ncv32u curMaxTreeDepth = 0; std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes; haarStages.resize(0); haarClassifierNodes.resize(0); haarFeatures.resize(0); cv::FileStorage fs(filename, cv::FileStorage::READ | cv::FileStorage::FORMAT_XML); if (!fs.isOpened()) return NCV_FILE_ERROR; const cv::FileNode &root = fs.getFirstTopLevelNode(); const cv::FileNode &fnSize = root[CUDA_CC_SIZE]; // collect the cascade classifier window size haar.ClassifierSize.width = (int)fnSize[CUDA_CC_SIZE_W]; haar.ClassifierSize.height = (int)fnSize[CUDA_CC_SIZE_H]; CV_Assert(haar.ClassifierSize.height > 0 && haar.ClassifierSize.width > 0); const cv::FileNode &fnStages = root[CUDA_CC_STAGES]; cv::FileNodeIterator it = fnStages.begin(), it_end = fnStages.end(); for (; it != it_end; ++it) // by stages { cv::FileNode fnStage = *it; HaarStage64 curStage; curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size())); curStage.setStageThreshold((float)fnStage[CUDA_CC_STAGE_THRESHOLD]); // iterate over the trees const cv::FileNode &fnTrees = fnStage[CUDA_CC_TREES]; cv::FileNodeIterator it1 = fnTrees.begin(), it1_end = fnTrees.end(); for (; it1 != it1_end; ++it1) // by trees { cv::FileNode tree = *it1; Ncv32u nodeId = (size_t)0; HaarClassifierNode128 curNode; curNode.setThreshold((float)tree[0][CUDA_CC_THRESHOLD]); NcvBool bIsLeftNodeLeaf = false; NcvBool bIsRightNodeLeaf = false; HaarClassifierNodeDescriptor32 nodeLeft; cv::FileNode leftNode = tree[0][CUDA_CC_LEFT_NODE]; if (leftNode.fs == NULL) { Ncv32f leftVal = tree[0][CUDA_CC_LEFT_VAL]; ncvStat = nodeLeft.create(leftVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsLeftNodeLeaf = true; } else { Ncv32u leftNodeOffset = (int)tree[0][CUDA_CC_LEFT_NODE]; nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight; cv::FileNode rightNode = tree[0][CUDA_CC_RIGHT_NODE]; if (rightNode.fs == NULL) { Ncv32f rightVal = tree[0][CUDA_CC_RIGHT_VAL]; ncvStat = nodeRight.create(rightVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsRightNodeLeaf = true; } else { Ncv32u rightNodeOffset = (int)tree[0][CUDA_CC_RIGHT_NODE]; nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setRightNodeDesc(nodeRight); cv::FileNode fnFeature = tree[0][CUDA_CC_FEATURE]; Ncv32u tiltedVal = (int)fnFeature[CUDA_CC_TILTED]; haar.bNeedsTiltedII = (tiltedVal != 0); cv::FileNodeIterator it2 = fnFeature[CUDA_CC_RECT].begin(), it2_end = fnFeature[CUDA_CC_RECT].end(); Ncv32u featureId = 0; for (; it2 != it2_end; ++it2) // by feature { cv::FileNode rect = *it2; Ncv32u rectX = (int)rect[RECT_X_IDX]; Ncv32u rectY = (int)rect[RECT_Y_IDX]; Ncv32u rectWidth = (int)rect[RECT_W_IDX]; Ncv32u rectHeight = (int)rect[RECT_H_IDX]; Ncv32f rectWeight = (float)rect[RECT_WEIGHT_IDX]; if (rectWeight == 0) break; HaarFeature64 curFeature; ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height); curFeature.setWeight(rectWeight); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); haarFeatures.push_back(curFeature); featureId++; } HaarFeatureDescriptor32 tmpFeatureDesc; ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf, featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); curNode.setFeatureDesc(tmpFeatureDesc); if (!nodeId) { //root node haarClassifierNodes.push_back(curNode); curMaxTreeDepth = 1; } else { //other node h_TmpClassifierNotRootNodes.push_back(curNode); curMaxTreeDepth++; } nodeId++; } curStage.setNumClassifierRootNodes((Ncv32u)fnTrees.size()); haarStages.push_back(curStage); } //fill in cascade stats haar.NumStages = static_cast<Ncv32u>(haarStages.size()); haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size()); haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size()); haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size()); //merge root and leaf nodes in one classifiers array Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size()); for (Ncv32u i=0; i<haarClassifierNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } haarClassifierNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } haarClassifierNodes[i].setRightNodeDesc(nodeRight); } for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight); haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]); } return NCV_SUCCESS; } #define NVBIN_HAAR_SIZERESERVED 16 #define NVBIN_HAAR_VERSION 0x1 static NCVStatus loadFromNVBIN(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { size_t readCount; FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); Ncv32u fsize; readCount = fread(&fsize, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fseek(fp, 0, SEEK_END); Ncv32u fsizeActual = ftell(fp); ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR); std::vector<unsigned char> fdata; fdata.resize(fsize); Ncv32u dataOffset = 0; fseek(fp, 0, SEEK_SET); readCount = fread(&fdata[0], fsize, 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); //data dataOffset = NVBIN_HAAR_SIZERESERVED; haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvSize32u); haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haarStages.resize(haar.NumStages); haarClassifierNodes.resize(haar.NumClassifierTotalNodes); haarFeatures.resize(haar.NumFeatures); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages); dataOffset += szStages; memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers); dataOffset += szClassifiers; memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures); dataOffset += szFeatures; return NCV_SUCCESS; } NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages, Ncv32u &numNodes, Ncv32u &numFeatures) { size_t readCount; NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); if (fext == "nvbin") { FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET); Ncv32u tmp; readCount = fread(&numStages, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&tmp, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); } else if (fext == "xml") { HaarClassifierCascadeDescriptor haar; std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); numStages = haar.NumStages; numNodes = haar.NumClassifierTotalNodes; numFeatures = haar.NumFeatures; } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } return NCV_SUCCESS; } NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; if (fext == "nvbin") { ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else if (fext == "xml") { ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY); memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64)); memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128)); memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64)); return NCV_SUCCESS; } NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename, HaarClassifierCascadeDescriptor haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); Ncv32u dataOffset = 0; std::vector<unsigned char> fdata; fdata.resize(szStages+szClassifiers+szFeatures+1024, 0); //header *(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION; //data dataOffset = NVBIN_HAAR_SIZERESERVED; *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures; dataOffset += sizeof(Ncv32u); *(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize; dataOffset += sizeof(NcvSize32u); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII; dataOffset += sizeof(NcvBool); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly; dataOffset += sizeof(NcvBool); memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages); dataOffset += szStages; memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers); dataOffset += szClassifiers; memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures); dataOffset += szFeatures; Ncv32u fsize = dataOffset; //TODO: CRC32 here //update header dataOffset = sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = fsize; FILE *fp = fopen(filename.c_str(), "wb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); fwrite(&fdata[0], fsize, 1, fp); fclose(fp); return NCV_SUCCESS; }
92ae2a91e03031f9961606e355aecf039cc9644a.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ //////////////////////////////////////////////////////////////////////////////// // // NVIDIA CUDA implementation of Viola-Jones Object Detection Framework // // The algorithm and code are explained in the upcoming GPU Computing Gems // chapter in detail: // // Anton Obukhov, "Haar Classifiers for Object Detection with CUDA" // PDF URL placeholder // email: [email protected], [email protected] // // Credits for help with the code to: // Alexey Mendelenko, Cyril Crassin, and Mikhail Smirnov. // //////////////////////////////////////////////////////////////////////////////// #include <algorithm> #include <cstdio> #include "opencv2/cudev.hpp" #include "opencv2/core/persistence.hpp" #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_OBJDETECT # include "opencv2/objdetect.hpp" //# include "opencv2/objdetect/objdetect_c.h" #endif #include "opencv2/cudalegacy/NCV.hpp" #include "opencv2/cudalegacy/NPP_staging.hpp" #include "opencv2/cudalegacy/NCVHaarObjectDetection.hpp" #include "NCVRuntimeTemplates.hpp" #include "NCVAlg.hpp" //============================================================================== // // HaarClassifierCascade file // //============================================================================== const Ncv32u MAX_GRID_DIM = 65535; const Ncv32u NUM_THREADS_ANCHORSPARALLEL = 64; #define NUM_THREADS_CLASSIFIERPARALLEL_LOG2 6 #define NUM_THREADS_CLASSIFIERPARALLEL (1 << NUM_THREADS_CLASSIFIERPARALLEL_LOG2) /** \internal * Haar features solid array. */ texture<uint2, 1, cudaReadModeElementType> texHaarFeatures; /** \internal * Haar classifiers flattened trees container. * Two parts: first contains root nodes, second - nodes that are referred by root nodes. * Drawback: breaks tree locality (might cause more cache misses * Advantage: No need to introduce additional 32-bit field to index root nodes offsets */ texture<uint4, 1, cudaReadModeElementType> texHaarClassifierNodes; texture<Ncv32u, 1, cudaReadModeElementType> texIImage; __device__ HaarStage64 getStage(Ncv32u iStage, HaarStage64 *d_Stages) { return d_Stages[iStage]; } template <NcvBool tbCacheTextureCascade> __device__ HaarClassifierNode128 getClassifierNode(Ncv32u iNode, HaarClassifierNode128 *d_ClassifierNodes) { HaarClassifierNode128 tmpNode; if (tbCacheTextureCascade) { tmpNode._ui4 = tex1Dfetch(texHaarClassifierNodes, iNode); } else { tmpNode = d_ClassifierNodes[iNode]; } return tmpNode; } template <NcvBool tbCacheTextureCascade> __device__ void getFeature(Ncv32u iFeature, HaarFeature64 *d_Features, Ncv32f *weight, Ncv32u *rectX, Ncv32u *rectY, Ncv32u *rectWidth, Ncv32u *rectHeight) { HaarFeature64 feature; if (tbCacheTextureCascade) { feature._ui2 = tex1Dfetch(texHaarFeatures, iFeature); } else { feature = d_Features[iFeature]; } feature.getRect(rectX, rectY, rectWidth, rectHeight); *weight = feature.getWeight(); } template <NcvBool tbCacheTextureIImg> __device__ Ncv32u getElemIImg(Ncv32u x, Ncv32u *d_IImg) { if (tbCacheTextureIImg) { return tex1Dfetch(texIImage, x); } else { return d_IImg[x]; } } __device__ Ncv32u d_outMaskPosition; __device__ void compactBlockWriteOutAnchorParallel(Ncv32u threadPassFlag, Ncv32u threadElem, Ncv32u *vectorOut) { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 __shared__ Ncv32u shmem[NUM_THREADS_ANCHORSPARALLEL]; __shared__ Ncv32u numPassed; __shared__ Ncv32u outMaskOffset; Ncv32u incScan = cv::cudev::blockScanInclusive<NUM_THREADS_ANCHORSPARALLEL>(threadPassFlag, shmem, threadIdx.x); __syncthreads(); if (threadIdx.x == NUM_THREADS_ANCHORSPARALLEL-1) { numPassed = incScan; outMaskOffset = atomicAdd(&d_outMaskPosition, incScan); } if (threadPassFlag) { Ncv32u excScan = incScan - threadPassFlag; shmem[excScan] = threadElem; } __syncthreads(); if (threadIdx.x < numPassed) { vectorOut[outMaskOffset + threadIdx.x] = shmem[threadIdx.x]; } #endif } template <NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierAnchorParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u y_offs; Ncv32u x_offs; Ncv32u maskOffset; Ncv32u outMaskVal; NcvBool bInactiveThread = false; if (tbReadPixelIndexFromVector) { maskOffset = (MAX_GRID_DIM * blockIdx.y + blockIdx.x) * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (maskOffset >= mask1Dlen) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { outMaskVal = d_inMask[maskOffset]; y_offs = outMaskVal >> 16; x_offs = outMaskVal & 0xFFFF; } } else { y_offs = blockIdx.y; x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; if (x_offs >= mask2Dstride) { if (tbDoAtomicCompaction) bInactiveThread = true; else return; } if (!tbDoAtomicCompaction || tbDoAtomicCompaction && !bInactiveThread) { maskOffset = y_offs * mask2Dstride + x_offs; if ((x_offs >= anchorsRoi.width) || (!tbInitMaskPositively && d_inMask != d_outMask && d_inMask[maskOffset] == OBJDET_MASK_ELEMENT_INVALID_32U)) { if (tbDoAtomicCompaction) { bInactiveThread = true; } else { d_outMask[maskOffset] = OBJDET_MASK_ELEMENT_INVALID_32U; return; } } outMaskVal = (y_offs << 16) | x_offs; } } NcvBool bPass = true; if (!tbDoAtomicCompaction || tbDoAtomicCompaction) { Ncv32f pixelStdDev = 0.0f; if (!bInactiveThread) pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; for (Ncv32u iStage = startStageInc; iStage < endStageExc; iStage++) { Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32u numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset(); Ncv32f stageThreshold = curStage.getStageThreshold(); while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u iNode = curRootNodeOffset; if (bPass && !bInactiveThread) { while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset++; } if (curStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; } } } __syncthreads(); if (!tbDoAtomicCompaction) { if (!tbReadPixelIndexFromVector || (tbReadPixelIndexFromVector && (!bPass || d_inMask != d_outMask))) { d_outMask[maskOffset] = outMaskVal; } } else { compactBlockWriteOutAnchorParallel(bPass && !bInactiveThread, outMaskVal, d_outMask); } } template <NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction> __global__ void applyHaarClassifierClassifierParallel(Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { Ncv32u maskOffset = MAX_GRID_DIM * blockIdx.y + blockIdx.x; if (maskOffset >= mask1Dlen) { return; } Ncv32u outMaskVal = d_inMask[maskOffset]; Ncv32u y_offs = outMaskVal >> 16; Ncv32u x_offs = outMaskVal & 0xFFFF; Ncv32f pixelStdDev = d_weights[y_offs * weightsStride + x_offs]; NcvBool bPass = true; for (Ncv32u iStage = startStageInc; iStage<endStageExc; iStage++) { //this variable is subject to reduction Ncv32f curStageSum = 0.0f; HaarStage64 curStage = getStage(iStage, d_Stages); Ncv32s numRootNodesInStage = curStage.getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = curStage.getStartClassifierRootNodeOffset() + threadIdx.x; Ncv32f stageThreshold = curStage.getStageThreshold(); Ncv32u numRootChunks = (numRootNodesInStage + NUM_THREADS_CLASSIFIERPARALLEL - 1) >> NUM_THREADS_CLASSIFIERPARALLEL_LOG2; for (Ncv32u chunkId=0; chunkId<numRootChunks; chunkId++) { NcvBool bMoreNodesToTraverse = true; if (chunkId * NUM_THREADS_CLASSIFIERPARALLEL + threadIdx.x < numRootNodesInStage) { Ncv32u iNode = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = getClassifierNode<tbCacheTextureCascade>(iNode, d_ClassifierNodes); HaarFeatureDescriptor32 featuresDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = featuresDesc.getNumFeatures(); Ncv32u iFeature = featuresDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.0f; //TODO: fetch into shmem if size suffices. Shmem can be shared with reduce for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { Ncv32f rectWeight; Ncv32u rectX, rectY, rectWidth, rectHeight; getFeature<tbCacheTextureCascade> (iFeature + iRect, d_Features, &rectWeight, &rectX, &rectY, &rectWidth, &rectHeight); Ncv32u iioffsTL = (y_offs + rectY) * IImgStride + (x_offs + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * IImgStride; Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u rectSum = getElemIImg<tbCacheTextureIImg>(iioffsBR, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsBL, d_IImg) + getElemIImg<tbCacheTextureIImg>(iioffsTL, d_IImg) - getElemIImg<tbCacheTextureIImg>(iioffsTR, d_IImg); #if defined CPU_FP_COMPLIANCE || defined DISABLE_MAD_SELECTIVELY curNodeVal += __fmul_rn((Ncv32f)rectSum, rectWeight); #else curNodeVal += (Ncv32f)rectSum * rectWeight; #endif } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleArea * pixelStdDev * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = featuresDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = featuresDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValue(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { iNode = nextNodeDescriptor.getNextNodeOffset(); } } } __syncthreads(); curRootNodeOffset += NUM_THREADS_CLASSIFIERPARALLEL; } Ncv32f finalStageSum = subReduce<Ncv32f, functorAddValues<Ncv32f>, NUM_THREADS_CLASSIFIERPARALLEL>(curStageSum); if (finalStageSum < stageThreshold) { bPass = false; outMaskVal = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } if (!tbDoAtomicCompaction) { if (!bPass || d_inMask != d_outMask) { if (!threadIdx.x) { d_outMask[maskOffset] = outMaskVal; } } } else { #if __CUDA_ARCH__ && __CUDA_ARCH__ >= 110 if (bPass && !threadIdx.x) { Ncv32u outMaskOffset = atomicAdd(&d_outMaskPosition, 1); d_outMask[outMaskOffset] = outMaskVal; } #endif } } template <NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction> __global__ void initializeMaskVector(Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { Ncv32u y_offs = blockIdx.y; Ncv32u x_offs = blockIdx.x * NUM_THREADS_ANCHORSPARALLEL + threadIdx.x; Ncv32u outMaskOffset = y_offs * gridDim.x * blockDim.x + x_offs; Ncv32u y_offs_upsc = step * y_offs; Ncv32u x_offs_upsc = step * x_offs; Ncv32u inMaskOffset = y_offs_upsc * mask2Dstride + x_offs_upsc; Ncv32u outElem = OBJDET_MASK_ELEMENT_INVALID_32U; if (x_offs_upsc < anchorsRoi.width && (!tbMaskByInmask || d_inMask[inMaskOffset] != OBJDET_MASK_ELEMENT_INVALID_32U)) { outElem = (y_offs_upsc << 16) | x_offs_upsc; } if (!tbDoAtomicCompaction) { d_outMask[outMaskOffset] = outElem; } else { compactBlockWriteOutAnchorParallel(outElem != OBJDET_MASK_ELEMENT_INVALID_32U, outElem, d_outMask); } } struct applyHaarClassifierAnchorParallelFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierAnchorParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { CV_UNUSED(tl); applyHaarClassifierAnchorParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value, Loki::TL::TypeAt<TList, 3>::Result::value, Loki::TL::TypeAt<TList, 4>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierAnchorParallelDynTemplate(NcvBool tbInitMaskPositively, NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbReadPixelIndexFromVector, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierAnchorParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 5, applyHaarClassifierAnchorParallelFunctor> ::call( &functor, tbInitMaskPositively, tbCacheTextureIImg, tbCacheTextureCascade, tbReadPixelIndexFromVector, tbDoAtomicCompaction); } struct applyHaarClassifierClassifierParallelFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_IImg; Ncv32u IImgStride; Ncv32f *d_weights; Ncv32u weightsStride; HaarFeature64 *d_Features; HaarClassifierNode128 *d_ClassifierNodes; HaarStage64 *d_Stages; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u startStageInc; Ncv32u endStageExc; Ncv32f scaleArea; //Arguments are passed through the constructor applyHaarClassifierClassifierParallelFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_IImg, Ncv32u _IImgStride, Ncv32f *_d_weights, Ncv32u _weightsStride, HaarFeature64 *_d_Features, HaarClassifierNode128 *_d_ClassifierNodes, HaarStage64 *_d_Stages, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _startStageInc, Ncv32u _endStageExc, Ncv32f _scaleArea) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_IImg(_d_IImg), IImgStride(_IImgStride), d_weights(_d_weights), weightsStride(_weightsStride), d_Features(_d_Features), d_ClassifierNodes(_d_ClassifierNodes), d_Stages(_d_Stages), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), startStageInc(_startStageInc), endStageExc(_endStageExc), scaleArea(_scaleArea) {} template<class TList> void call(TList tl) { CV_UNUSED(tl); applyHaarClassifierClassifierParallel < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value, Loki::TL::TypeAt<TList, 2>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); } }; void applyHaarClassifierClassifierParallelDynTemplate(NcvBool tbCacheTextureIImg, NcvBool tbCacheTextureCascade, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_IImg, Ncv32u IImgStride, Ncv32f *d_weights, Ncv32u weightsStride, HaarFeature64 *d_Features, HaarClassifierNode128 *d_ClassifierNodes, HaarStage64 *d_Stages, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u startStageInc, Ncv32u endStageExc, Ncv32f scaleArea) { applyHaarClassifierClassifierParallelFunctor functor(gridConf, blockConf, cuStream, d_IImg, IImgStride, d_weights, weightsStride, d_Features, d_ClassifierNodes, d_Stages, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, startStageInc, endStageExc, scaleArea); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 3, applyHaarClassifierClassifierParallelFunctor> ::call( &functor, tbCacheTextureIImg, tbCacheTextureCascade, tbDoAtomicCompaction); } struct initializeMaskVectorFunctor { dim3 gridConf, blockConf; cudaStream_t cuStream; //Kernel arguments are stored as members; Ncv32u *d_inMask; Ncv32u *d_outMask; Ncv32u mask1Dlen; Ncv32u mask2Dstride; NcvSize32u anchorsRoi; Ncv32u step; //Arguments are passed through the constructor initializeMaskVectorFunctor(dim3 _gridConf, dim3 _blockConf, cudaStream_t _cuStream, Ncv32u *_d_inMask, Ncv32u *_d_outMask, Ncv32u _mask1Dlen, Ncv32u _mask2Dstride, NcvSize32u _anchorsRoi, Ncv32u _step) : gridConf(_gridConf), blockConf(_blockConf), cuStream(_cuStream), d_inMask(_d_inMask), d_outMask(_d_outMask), mask1Dlen(_mask1Dlen), mask2Dstride(_mask2Dstride), anchorsRoi(_anchorsRoi), step(_step) {} template<class TList> void call(TList tl) { CV_UNUSED(tl); initializeMaskVector < Loki::TL::TypeAt<TList, 0>::Result::value, Loki::TL::TypeAt<TList, 1>::Result::value > <<<gridConf, blockConf, 0, cuStream>>> (d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); } }; void initializeMaskVectorDynTemplate(NcvBool tbMaskByInmask, NcvBool tbDoAtomicCompaction, dim3 gridConf, dim3 blockConf, cudaStream_t cuStream, Ncv32u *d_inMask, Ncv32u *d_outMask, Ncv32u mask1Dlen, Ncv32u mask2Dstride, NcvSize32u anchorsRoi, Ncv32u step) { initializeMaskVectorFunctor functor(gridConf, blockConf, cuStream, d_inMask, d_outMask, mask1Dlen, mask2Dstride, anchorsRoi, step); //Second parameter is the number of "dynamic" template parameters NCVRuntimeTemplateBool::KernelCaller<Loki::NullType, 2, initializeMaskVectorFunctor> ::call( &functor, tbMaskByInmask, tbDoAtomicCompaction); } Ncv32u getStageNumWithNotLessThanNclassifiers(Ncv32u N, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages) { Ncv32u i = 0; for (; i<haar.NumStages; i++) { if (h_HaarStages.ptr()[i].getNumClassifierRootNodes() >= N) { break; } } return i; } NCVStatus ncvApplyHaarClassifierCascade_device(NCVMatrix<Ncv32u> &integral, NCVMatrix<Ncv32f> &d_weights, NCVMatrixAlloc<Ncv32u> &d_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea, INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, cudaDeviceProp &devProp, cudaStream_t cuStream) { ncvAssertReturn(integral.memType() == d_weights.memType()&& integral.memType() == d_pixelMask.memType() && integral.memType() == gpuAllocator.memType() && (integral.memType() == NCVMemoryTypeDevice || integral.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((integral.ptr() != NULL && d_weights.ptr() != NULL && d_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && d_pixelMask.width() >= anchorsRoi.width && d_pixelMask.height() >= anchorsRoi.height && d_weights.width() >= anchorsRoi.width && d_weights.height() >= anchorsRoi.height && integral.width() >= anchorsRoi.width + haar.ClassifierSize.width && integral.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false || gpuAllocator.isCounting(), NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); NCV_SET_SKIP_COND(gpuAllocator.isCounting()); #if defined _SELF_TEST_ NCVStatus ncvStat; NCVMatrixAlloc<Ncv32u> h_integralImage(cpuAllocator, integral.width, integral.height, integral.pitch); ncvAssertReturn(h_integralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> h_weights(cpuAllocator, d_weights.width, d_weights.height, d_weights.pitch); ncvAssertReturn(h_weights.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarClassifierNode128> h_HaarNodes(cpuAllocator, d_HaarNodes.length); ncvAssertReturn(h_HaarNodes.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<HaarFeature64> h_HaarFeatures(cpuAllocator, d_HaarFeatures.length); ncvAssertReturn(h_HaarFeatures.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> h_pixelMask_d(cpuAllocator, d_pixelMask.width, d_pixelMask.height, d_pixelMask.pitch); ncvAssertReturn(h_pixelMask_d.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN ncvStat = d_pixelMask.copySolid(h_pixelMask, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = integral.copySolid(h_integralImage, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_weights.copySolid(h_weights, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarNodes.copySolid(h_HaarNodes, 0); ncvAssertReturnNcvStat(ncvStat); ncvStat = d_HaarFeatures.copySolid(h_HaarFeatures, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(0), NCV_CUDA_ERROR); for (Ncv32u i=0; i<(Ncv32u)anchorsRoi.height; i++) { for (Ncv32u j=0; j<d_pixelMask.stride(); j++) { if ((i%pixelStep==0) && (j%pixelStep==0) && (j<(Ncv32u)anchorsRoi.width)) { if (!bMaskElements || h_pixelMask.ptr[i*d_pixelMask.stride()+j] != OBJDET_MASK_ELEMENT_INVALID_32U) { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = (i << 16) | j; } } else { h_pixelMask.ptr[i*d_pixelMask.stride()+j] = OBJDET_MASK_ELEMENT_INVALID_32U; } } } NCV_SKIP_COND_END #endif NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment(), anchorsRoi.height * d_pixelMask.stride()); ncvAssertReturn(d_vecPixelMask.isMemReused(), NCV_ALLOCATOR_BAD_REUSE); NCVVectorAlloc<Ncv32u> d_vecPixelMaskTmp(gpuAllocator, static_cast<Ncv32u>(d_vecPixelMask.length())); ncvAssertReturn(d_vecPixelMaskTmp.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<Ncv32u> hp_pool32u(cpuAllocator, 2); ncvAssertReturn(hp_pool32u.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); Ncv32u *hp_zero = &hp_pool32u.ptr()[0]; Ncv32u *hp_numDet = &hp_pool32u.ptr()[1]; NCV_SKIP_COND_BEGIN *hp_zero = 0; *hp_numDet = 0; NCV_SKIP_COND_END Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); NcvBool bTexCacheCascade = devProp.major < 2; NcvBool bTexCacheIImg = true; //this works better even on Fermi so far NcvBool bDoAtomicCompaction = devProp.major >= 2 || (devProp.major == 1 && devProp.minor >= 3); NCVVector<Ncv32u> *d_ptrNowData = &d_vecPixelMask; NCVVector<Ncv32u> *d_ptrNowTmp = &d_vecPixelMaskTmp; Ncv32u szNppCompactTmpBuf; nppsStCompactGetSize_32u(static_cast<Ncv32u>(d_vecPixelMask.length()), &szNppCompactTmpBuf, devProp); if (bDoAtomicCompaction) { szNppCompactTmpBuf = 0; } NCVVectorAlloc<Ncv8u> d_tmpBufCompact(gpuAllocator, szNppCompactTmpBuf); NCV_SKIP_COND_BEGIN if (bTexCacheIImg) { cudaChannelFormatDesc cfdTexIImage; cfdTexIImage = cudaCreateChannelDesc<Ncv32u>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texIImage, integral.ptr(), cfdTexIImage, (anchorsRoi.height + haar.ClassifierSize.height) * integral.pitch()), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } if (bTexCacheCascade) { cudaChannelFormatDesc cfdTexHaarFeatures; cudaChannelFormatDesc cfdTexHaarClassifierNodes; cfdTexHaarFeatures = cudaCreateChannelDesc<uint2>(); cfdTexHaarClassifierNodes = cudaCreateChannelDesc<uint4>(); size_t alignmentOffset; ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarFeatures, d_HaarFeatures.ptr(), cfdTexHaarFeatures,sizeof(HaarFeature64) * haar.NumFeatures), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); ncvAssertCUDAReturn(cudaBindTexture(&alignmentOffset, texHaarClassifierNodes, d_HaarNodes.ptr(), cfdTexHaarClassifierNodes, sizeof(HaarClassifierNode128) * haar.NumClassifierTotalNodes), NCV_CUDA_ERROR); ncvAssertReturn(alignmentOffset==0, NCV_TEXTURE_BIND_ERROR); } Ncv32u stageStartAnchorParallel = 0; Ncv32u stageMiddleSwitch = getStageNumWithNotLessThanNclassifiers(NUM_THREADS_CLASSIFIERPARALLEL, haar, h_HaarStages); Ncv32u stageEndClassifierParallel = haar.NumStages; if (stageMiddleSwitch == 0) { stageMiddleSwitch = 1; } //create stages subdivision for pixel-parallel processing const Ncv32u compactEveryNstage = bDoAtomicCompaction ? 7 : 1; Ncv32u curStop = stageStartAnchorParallel; std::vector<Ncv32u> pixParallelStageStops; while (curStop < stageMiddleSwitch) { pixParallelStageStops.push_back(curStop); curStop += compactEveryNstage; } if (curStop > compactEveryNstage && curStop - stageMiddleSwitch > compactEveryNstage / 2) { pixParallelStageStops[pixParallelStageStops.size()-1] = (stageMiddleSwitch - (curStop - 2 * compactEveryNstage)) / 2; } pixParallelStageStops.push_back(stageMiddleSwitch); Ncv32u pixParallelStageStopsIndex = 0; if (pixelStep != 1 || bMaskElements) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 gridInit((((anchorsRoi.width + pixelStep - 1) / pixelStep + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), (anchorsRoi.height + pixelStep - 1) / pixelStep); dim3 blockInit(NUM_THREADS_ANCHORSPARALLEL); if (gridInit.x == 0 || gridInit.y == 0) { numDetections = 0; return NCV_SUCCESS; } initializeMaskVectorDynTemplate(bMaskElements, bDoAtomicCompaction, gridInit, blockInit, cuStream, d_ptrNowData->ptr(), d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_pixelMask.stride(), anchorsRoi, pixelStep); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); swap(d_ptrNowData, d_ptrNowTmp); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowTmp->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowData->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturn(nppSt == NPPST_SUCCESS, NCV_NPP_ERROR); } numDetections = *hp_numDet; } else { // // 1. Run the first pixel-input pixel-parallel classifier for few stages // if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid1(((d_pixelMask.stride() + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL), anchorsRoi.height); dim3 block1(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( true, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid1, block1, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), 0, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), static_cast<Ncv32u>(d_vecPixelMask.length()), d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; pixParallelStageStopsIndex++; } // // 2. Run pixel-parallel stages // for (; pixParallelStageStopsIndex < pixParallelStageStops.size()-1; pixParallelStageStopsIndex++) { if (numDetections == 0) { break; } if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid2((numDetections + NUM_THREADS_ANCHORSPARALLEL - 1) / NUM_THREADS_ANCHORSPARALLEL); if (numDetections > MAX_GRID_DIM) { grid2.x = MAX_GRID_DIM; grid2.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block2(NUM_THREADS_ANCHORSPARALLEL); applyHaarClassifierAnchorParallelDynTemplate( false, //tbInitMaskPositively bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade pixParallelStageStops[pixParallelStageStopsIndex] != 0 || pixelStep != 1 || bMaskElements,//tbReadPixelIndexFromVector bDoAtomicCompaction, //tbDoAtomicCompaction grid2, block2, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, pixParallelStageStops[pixParallelStageStopsIndex], pixParallelStageStops[pixParallelStageStopsIndex+1], scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } // // 3. Run all left stages in one stage-parallel kernel // if (numDetections > 0 && stageMiddleSwitch < stageEndClassifierParallel) { if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaMemcpyToSymbolAsync(d_outMaskPosition, hp_zero, sizeof(Ncv32u), 0, cudaMemcpyHostToDevice, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } dim3 grid3(numDetections); if (numDetections > MAX_GRID_DIM) { grid3.x = MAX_GRID_DIM; grid3.y = (numDetections + MAX_GRID_DIM - 1) / MAX_GRID_DIM; } dim3 block3(NUM_THREADS_CLASSIFIERPARALLEL); applyHaarClassifierClassifierParallelDynTemplate( bTexCacheIImg, //tbCacheTextureIImg bTexCacheCascade, //tbCacheTextureCascade bDoAtomicCompaction, //tbDoAtomicCompaction grid3, block3, cuStream, integral.ptr(), integral.stride(), d_weights.ptr(), d_weights.stride(), d_HaarFeatures.ptr(), d_HaarNodes.ptr(), d_HaarStages.ptr(), d_ptrNowData->ptr(), bDoAtomicCompaction ? d_ptrNowTmp->ptr() : d_ptrNowData->ptr(), numDetections, d_pixelMask.stride(), anchorsRoi, stageMiddleSwitch, stageEndClassifierParallel, scaleAreaPixels); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaMemcpyFromSymbolAsync(hp_numDet, d_outMaskPosition, sizeof(Ncv32u), 0, cudaMemcpyDeviceToHost, cuStream), NCV_CUDA_ERROR); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } else { NCVStatus nppSt; nppSt = nppsStCompact_32u(d_ptrNowData->ptr(), numDetections, d_ptrNowTmp->ptr(), hp_numDet, OBJDET_MASK_ELEMENT_INVALID_32U, d_tmpBufCompact.ptr(), szNppCompactTmpBuf, devProp); ncvAssertReturnNcvStat(nppSt); } swap(d_ptrNowData, d_ptrNowTmp); numDetections = *hp_numDet; } if (d_ptrNowData != &d_vecPixelMask) { d_vecPixelMaskTmp.copySolid(d_vecPixelMask, cuStream); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } #if defined _SELF_TEST_ ncvStat = d_pixelMask.copySolid(h_pixelMask_d, 0); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); if (bDoAtomicCompaction) { std::sort(h_pixelMask_d.ptr, h_pixelMask_d.ptr + numDetections); } Ncv32u fpu_oldcw, fpu_cw; _controlfp_s(&fpu_cw, 0, 0); fpu_oldcw = fpu_cw; _controlfp_s(&fpu_cw, _PC_24, _MCW_PC); Ncv32u numDetGold; ncvStat = ncvApplyHaarClassifierCascade_host(h_integralImage, h_weights, h_pixelMask, numDetGold, haar, h_HaarStages, h_HaarNodes, h_HaarFeatures, bMaskElements, anchorsRoi, pixelStep, scaleArea); ncvAssertReturnNcvStat(ncvStat); _controlfp_s(&fpu_cw, fpu_oldcw, _MCW_PC); bool bPass = true; if (numDetGold != numDetections) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade numdetections don't match: cpu=%d, gpu=%d\n", numDetGold, numDetections); bPass = false; } else { for (Ncv32u i=0; i<std::max(numDetGold, numDetections) && bPass; i++) { if (h_pixelMask.ptr[i] != h_pixelMask_d.ptr[i]) { printf("NCVHaarClassifierCascade::applyHaarClassifierCascade self test failed: i=%d, cpu=%d, gpu=%d\n", i, h_pixelMask.ptr[i], h_pixelMask_d.ptr[i]); bPass = false; } } } printf("NCVHaarClassifierCascade::applyHaarClassifierCascade %s\n", bPass?"PASSED":"FAILED"); #endif NCV_SKIP_COND_END return NCV_SUCCESS; } //============================================================================== // // HypothesesOperations file // //============================================================================== const Ncv32u NUM_GROW_THREADS = 128; __device__ __host__ NcvRect32u pixelToRect(Ncv32u pixel, Ncv32u width, Ncv32u height, Ncv32f scale) { NcvRect32u res; res.x = (Ncv32u)(scale * (pixel & 0xFFFF)); res.y = (Ncv32u)(scale * (pixel >> 16)); res.width = (Ncv32u)(scale * width); res.height = (Ncv32u)(scale * height); return res; } __global__ void growDetectionsKernel(Ncv32u *pixelMask, Ncv32u numElements, NcvRect32u *hypotheses, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { Ncv32u blockId = blockIdx.y * 65535 + blockIdx.x; Ncv32u elemAddr = blockId * NUM_GROW_THREADS + threadIdx.x; if (elemAddr >= numElements) { return; } hypotheses[elemAddr] = pixelToRect(pixelMask[elemAddr], rectWidth, rectHeight, curScale); } NCVStatus ncvGrowDetectionsVector_device(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale, cudaStream_t cuStream) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() == NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } dim3 block(NUM_GROW_THREADS); dim3 grid((numDetsToCopy + NUM_GROW_THREADS - 1) / NUM_GROW_THREADS); if (grid.x > 65535) { grid.y = (grid.x + 65534) / 65535; grid.x = 65535; } growDetectionsKernel<<<grid, block, 0, cuStream>>>(pixelMask.ptr(), numDetsToCopy, hypotheses.ptr() + totalDetections, rectWidth, rectHeight, curScale); ncvAssertCUDAReturn(cudaGetLastError(), NCV_CUDA_ERROR); totalDetections += numDetsToCopy; return ncvStat; } //============================================================================== // // Pipeline file // //============================================================================== NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg, NcvSize32u srcRoi, NCVVector<NcvRect32u> &d_dstRects, Ncv32u &dstNumRects, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarStage64> &d_HaarStages, NCVVector<HaarClassifierNode128> &d_HaarNodes, NCVVector<HaarFeature64> &d_HaarFeatures, NcvSize32u minObjSize, Ncv32u minNeighbors, //default 4 Ncv32f scaleStep, //default 1.2f Ncv32u pixelStep, //default 1 Ncv32u flags, //default NCVPipeObjDet_Default INCVMemAllocator &gpuAllocator, INCVMemAllocator &cpuAllocator, cudaDeviceProp &devProp, cudaStream_t cuStream) { ncvAssertReturn(d_srcImg.memType() == d_dstRects.memType() && d_srcImg.memType() == gpuAllocator.memType() && (d_srcImg.memType() == NCVMemoryTypeDevice || d_srcImg.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(d_HaarStages.memType() == d_HaarNodes.memType() && d_HaarStages.memType() == d_HaarFeatures.memType() && (d_HaarStages.memType() == NCVMemoryTypeDevice || d_HaarStages.memType() == NCVMemoryTypeNone), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(gpuAllocator.isInitialized() && cpuAllocator.isInitialized(), NCV_ALLOCATOR_NOT_INITIALIZED); ncvAssertReturn((d_srcImg.ptr() != NULL && d_dstRects.ptr() != NULL && h_HaarStages.ptr() != NULL && d_HaarStages.ptr() != NULL && d_HaarNodes.ptr() != NULL && d_HaarFeatures.ptr() != NULL) || gpuAllocator.isCounting(), NCV_NULL_PTR); ncvAssertReturn(srcRoi.width > 0 && srcRoi.height > 0 && d_srcImg.width() >= srcRoi.width && d_srcImg.height() >= srcRoi.height && srcRoi.width >= minObjSize.width && srcRoi.height >= minObjSize.height && d_dstRects.length() >= 1, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleStep > 1.0f, NCV_INVALID_SCALE); ncvAssertReturn(d_HaarStages.length() >= haar.NumStages && d_HaarNodes.length() >= haar.NumClassifierTotalNodes && d_HaarFeatures.length() >= haar.NumFeatures && d_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); //TODO: set NPP active stream to cuStream NCVStatus ncvStat; NCV_SET_SKIP_COND(gpuAllocator.isCounting()); Ncv32u integralWidth = d_srcImg.width() + 1; Ncv32u integralHeight = d_srcImg.height() + 1; NCVMatrixAlloc<Ncv32u> integral(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(integral.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_sqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_sqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32f> d_rectStdDev(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_rectStdDev.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_pixelMask(gpuAllocator, d_srcImg.width(), d_srcImg.height()); ncvAssertReturn(d_pixelMask.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv32u> d_scaledIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVMatrixAlloc<Ncv64u> d_scaledSqIntegralImage(gpuAllocator, integralWidth, integralHeight); ncvAssertReturn(d_scaledSqIntegralImage.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> d_hypothesesIntermediate(gpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(d_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVVectorAlloc<NcvRect32u> h_hypothesesIntermediate(cpuAllocator, d_srcImg.width() * d_srcImg.height()); ncvAssertReturn(h_hypothesesIntermediate.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCVStatus nppStat; Ncv32u szTmpBufIntegral, szTmpBufSqIntegral; nppStat = nppiStIntegralGetSize_8u32u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegralGetSize_8u64u(NcvSize32u(d_srcImg.width(), d_srcImg.height()), &szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCVVectorAlloc<Ncv8u> d_tmpIIbuf(gpuAllocator, std::max(szTmpBufIntegral, szTmpBufSqIntegral)); ncvAssertReturn(d_tmpIIbuf.isMemAllocated(), NCV_ALLOCATOR_BAD_ALLOC); NCV_SKIP_COND_BEGIN nppStat = nppiStIntegral_8u32u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), integral.ptr(), integral.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufIntegral, devProp); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStSqrIntegral_8u64u_C1R(d_srcImg.ptr(), d_srcImg.pitch(), d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), NcvSize32u(d_srcImg.width(), d_srcImg.height()), d_tmpIIbuf.ptr(), szTmpBufSqIntegral, devProp); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END dstNumRects = 0; Ncv32u lastCheckedScale = 0; NcvBool bReverseTraverseScale = ((flags & NCVPipeObjDet_FindLargestObject) != 0); std::vector<Ncv32u> scalesVector; NcvBool bFoundLargestFace = false; for (Ncv32f scaleIter = 1.0f; ; scaleIter *= scaleStep) { Ncv32u scale = (Ncv32u)scaleIter; if (lastCheckedScale == scale) { continue; } lastCheckedScale = scale; if (haar.ClassifierSize.width * (Ncv32s)scale < minObjSize.width || haar.ClassifierSize.height * (Ncv32s)scale < minObjSize.height) { continue; } NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRo_i.width = srcRoi_.width + 1; srcIIRo_i.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRo_i.width / scale; scaledIIRoi.height = srcIIRo_i.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; if (searchRoi.width <= 0 || searchRoi.height <= 0) { break; } scalesVector.push_back(scale); if (gpuAllocator.isCounting()) { break; } } if (bReverseTraverseScale) { std::reverse(scalesVector.begin(), scalesVector.end()); } //TODO: handle _fair_scale_ flag for (Ncv32u i=0; i<scalesVector.size(); i++) { Ncv32u scale = scalesVector[i]; NcvSize32u srcRoi_, scaledIIRoi, searchRoi; NcvSize32u srcIIRoi; srcRoi_.width = d_srcImg.width(); srcRoi_.height = d_srcImg.height(); srcIIRoi.width = srcRoi_.width + 1; srcIIRoi.height = srcRoi_.height + 1; scaledIIRoi.width = srcIIRoi.width / scale; scaledIIRoi.height = srcIIRoi.height / scale; searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width; searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height; NCV_SKIP_COND_BEGIN nppStat = nppiStDecimate_32u_C1R( integral.ptr(), integral.pitch(), d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); nppStat = nppiStDecimate_64u_C1R( d_sqIntegralImage.ptr(), d_sqIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), srcIIRoi, scale, true); ncvAssertReturnNcvStat(nppStat); const NcvRect32u rect( HAAR_STDDEV_BORDER, HAAR_STDDEV_BORDER, haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER, haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER); nppStat = nppiStRectStdDev_32f_C1R( d_scaledIntegralImage.ptr(), d_scaledIntegralImage.pitch(), d_scaledSqIntegralImage.ptr(), d_scaledSqIntegralImage.pitch(), d_rectStdDev.ptr(), d_rectStdDev.pitch(), NcvSize32u(searchRoi.width, searchRoi.height), rect, (Ncv32f)scale*scale, true); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_END Ncv32u detectionsOnThisScale; ncvStat = ncvApplyHaarClassifierCascade_device( d_scaledIntegralImage, d_rectStdDev, d_pixelMask, detectionsOnThisScale, haar, h_HaarStages, d_HaarStages, d_HaarNodes, d_HaarFeatures, false, searchRoi, pixelStep, (Ncv32f)scale*scale, gpuAllocator, cpuAllocator, devProp, cuStream); ncvAssertReturnNcvStat(nppStat); NCV_SKIP_COND_BEGIN NCVVectorReuse<Ncv32u> d_vecPixelMask(d_pixelMask.getSegment()); ncvStat = ncvGrowDetectionsVector_device( d_vecPixelMask, detectionsOnThisScale, d_hypothesesIntermediate, dstNumRects, static_cast<Ncv32u>(d_hypothesesIntermediate.length()), haar.ClassifierSize.width, haar.ClassifierSize.height, (Ncv32f)scale, cuStream); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); if (flags & NCVPipeObjDet_FindLargestObject) { if (dstNumRects == 0) { continue; } if (dstNumRects != 0) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } Ncv32u numStrongHypothesesNow = dstNumRects; ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, numStrongHypothesesNow, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (numStrongHypothesesNow > 0) { NcvRect32u maxRect = h_hypothesesIntermediate.ptr()[0]; for (Ncv32u j=1; j<numStrongHypothesesNow; j++) { if (maxRect.width < h_hypothesesIntermediate.ptr()[j].width) { maxRect = h_hypothesesIntermediate.ptr()[j]; } } h_hypothesesIntermediate.ptr()[0] = maxRect; dstNumRects = 1; ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); bFoundLargestFace = true; break; } } NCV_SKIP_COND_END if (gpuAllocator.isCounting()) { break; } } NCVStatus ncvRetCode = NCV_SUCCESS; NCV_SKIP_COND_BEGIN if (flags & NCVPipeObjDet_FindLargestObject) { if (!bFoundLargestFace) { dstNumRects = 0; } } else { //TODO: move hypotheses filtration to GPU pipeline (the only CPU-resident element of the pipeline left) if (dstNumRects != 0) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvStat = d_hypothesesIntermediate.copySolid(h_hypothesesIntermediate, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); } ncvStat = ncvGroupRectangles_host( h_hypothesesIntermediate, dstNumRects, minNeighbors, RECT_SIMILARITY_PROPORTION, NULL); ncvAssertReturnNcvStat(ncvStat); if (dstNumRects > d_dstRects.length()) { ncvRetCode = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; dstNumRects = static_cast<Ncv32u>(d_dstRects.length()); } if (dstNumRects != 0) { ncvStat = h_hypothesesIntermediate.copySolid(d_dstRects, cuStream, dstNumRects * sizeof(NcvRect32u)); ncvAssertReturnNcvStat(ncvStat); } } if (flags & NCVPipeObjDet_VisualizeInPlace) { ncvAssertCUDAReturn(cudaStreamSynchronize(cuStream), NCV_CUDA_ERROR); ncvDrawRects_8u_device(d_srcImg.ptr(), d_srcImg.stride(), d_srcImg.width(), d_srcImg.height(), d_dstRects.ptr(), dstNumRects, 255, cuStream); } NCV_SKIP_COND_END return ncvRetCode; } //============================================================================== // // Purely Host code: classifier IO, mock-ups // //============================================================================== #ifdef _SELF_TEST_ #include <float.h> #endif NCVStatus ncvApplyHaarClassifierCascade_host(NCVMatrix<Ncv32u> &h_integralImage, NCVMatrix<Ncv32f> &h_weights, NCVMatrixAlloc<Ncv32u> &h_pixelMask, Ncv32u &numDetections, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures, NcvBool bMaskElements, NcvSize32u anchorsRoi, Ncv32u pixelStep, Ncv32f scaleArea) { ncvAssertReturn(h_integralImage.memType() == h_weights.memType() && h_integralImage.memType() == h_pixelMask.memType() && (h_integralImage.memType() == NCVMemoryTypeHostPageable || h_integralImage.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_HaarStages.memType() == h_HaarNodes.memType() && h_HaarStages.memType() == h_HaarFeatures.memType() && (h_HaarStages.memType() == NCVMemoryTypeHostPageable || h_HaarStages.memType() == NCVMemoryTypeHostPinned), NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(h_integralImage.ptr() != NULL && h_weights.ptr() != NULL && h_pixelMask.ptr() != NULL && h_HaarStages.ptr() != NULL && h_HaarNodes.ptr() != NULL && h_HaarFeatures.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(anchorsRoi.width > 0 && anchorsRoi.height > 0 && h_pixelMask.width() >= anchorsRoi.width && h_pixelMask.height() >= anchorsRoi.height && h_weights.width() >= anchorsRoi.width && h_weights.height() >= anchorsRoi.height && h_integralImage.width() >= anchorsRoi.width + haar.ClassifierSize.width && h_integralImage.height() >= anchorsRoi.height + haar.ClassifierSize.height, NCV_DIMENSIONS_INVALID); ncvAssertReturn(scaleArea > 0, NCV_INVALID_SCALE); ncvAssertReturn(h_HaarStages.length() >= haar.NumStages && h_HaarNodes.length() >= haar.NumClassifierTotalNodes && h_HaarFeatures.length() >= haar.NumFeatures && h_HaarStages.length() == h_HaarStages.length() && haar.NumClassifierRootNodes <= haar.NumClassifierTotalNodes, NCV_DIMENSIONS_INVALID); ncvAssertReturn(haar.bNeedsTiltedII == false, NCV_NOIMPL_HAAR_TILTED_FEATURES); ncvAssertReturn(pixelStep == 1 || pixelStep == 2, NCV_HAAR_INVALID_PIXEL_STEP); Ncv32f scaleAreaPixels = scaleArea * ((haar.ClassifierSize.width - 2*HAAR_STDDEV_BORDER) * (haar.ClassifierSize.height - 2*HAAR_STDDEV_BORDER)); for (Ncv32u i=0; i<anchorsRoi.height; i++) { for (Ncv32u j=0; j<h_pixelMask.stride(); j++) { if (i % pixelStep != 0 || j % pixelStep != 0 || j >= anchorsRoi.width) { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; } else { for (Ncv32u iStage = 0; iStage < haar.NumStages; iStage++) { Ncv32f curStageSum = 0.0f; Ncv32u numRootNodesInStage = h_HaarStages.ptr()[iStage].getNumClassifierRootNodes(); Ncv32u curRootNodeOffset = h_HaarStages.ptr()[iStage].getStartClassifierRootNodeOffset(); if (iStage == 0) { if (bMaskElements && h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } else { h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = ((i << 16) | j); } } else if (h_pixelMask.ptr()[i * h_pixelMask.stride() + j] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } while (numRootNodesInStage--) { NcvBool bMoreNodesToTraverse = true; Ncv32u curNodeOffset = curRootNodeOffset; while (bMoreNodesToTraverse) { HaarClassifierNode128 curNode = h_HaarNodes.ptr()[curNodeOffset]; HaarFeatureDescriptor32 curFeatDesc = curNode.getFeatureDesc(); Ncv32u curNodeFeaturesNum = curFeatDesc.getNumFeatures(); Ncv32u curNodeFeaturesOffs = curFeatDesc.getFeaturesOffset(); Ncv32f curNodeVal = 0.f; for (Ncv32u iRect=0; iRect<curNodeFeaturesNum; iRect++) { HaarFeature64 feature = h_HaarFeatures.ptr()[curNodeFeaturesOffs + iRect]; Ncv32u rectX, rectY, rectWidth, rectHeight; feature.getRect(&rectX, &rectY, &rectWidth, &rectHeight); Ncv32f rectWeight = feature.getWeight(); Ncv32u iioffsTL = (i + rectY) * h_integralImage.stride() + (j + rectX); Ncv32u iioffsTR = iioffsTL + rectWidth; Ncv32u iioffsBL = iioffsTL + rectHeight * h_integralImage.stride(); Ncv32u iioffsBR = iioffsBL + rectWidth; Ncv32u iivalTL = h_integralImage.ptr()[iioffsTL]; Ncv32u iivalTR = h_integralImage.ptr()[iioffsTR]; Ncv32u iivalBL = h_integralImage.ptr()[iioffsBL]; Ncv32u iivalBR = h_integralImage.ptr()[iioffsBR]; Ncv32u rectSum = iivalBR - iivalBL + iivalTL - iivalTR; curNodeVal += (Ncv32f)rectSum * rectWeight; } HaarClassifierNodeDescriptor32 nodeLeft = curNode.getLeftNodeDesc(); HaarClassifierNodeDescriptor32 nodeRight = curNode.getRightNodeDesc(); Ncv32f nodeThreshold = curNode.getThreshold(); HaarClassifierNodeDescriptor32 nextNodeDescriptor; NcvBool nextNodeIsLeaf; if (curNodeVal < scaleAreaPixels * h_weights.ptr()[i * h_weights.stride() + j] * nodeThreshold) { nextNodeDescriptor = nodeLeft; nextNodeIsLeaf = curFeatDesc.isLeftNodeLeaf(); } else { nextNodeDescriptor = nodeRight; nextNodeIsLeaf = curFeatDesc.isRightNodeLeaf(); } if (nextNodeIsLeaf) { Ncv32f tmpLeafValue = nextNodeDescriptor.getLeafValueHost(); curStageSum += tmpLeafValue; bMoreNodesToTraverse = false; } else { curNodeOffset = nextNodeDescriptor.getNextNodeOffset(); } } curRootNodeOffset++; } Ncv32f tmpStageThreshold = h_HaarStages.ptr()[iStage].getStageThreshold(); if (curStageSum < tmpStageThreshold) { //drop h_pixelMask.ptr()[i * h_pixelMask.stride() + j] = OBJDET_MASK_ELEMENT_INVALID_32U; break; } } } } } std::sort(h_pixelMask.ptr(), h_pixelMask.ptr() + anchorsRoi.height * h_pixelMask.stride()); Ncv32u i = 0; for (; i<anchorsRoi.height * h_pixelMask.stride(); i++) { if (h_pixelMask.ptr()[i] == OBJDET_MASK_ELEMENT_INVALID_32U) { break; } } numDetections = i; return NCV_SUCCESS; } NCVStatus ncvGrowDetectionsVector_host(NCVVector<Ncv32u> &pixelMask, Ncv32u numPixelMaskDetections, NCVVector<NcvRect32u> &hypotheses, Ncv32u &totalDetections, Ncv32u totalMaxDetections, Ncv32u rectWidth, Ncv32u rectHeight, Ncv32f curScale) { ncvAssertReturn(pixelMask.ptr() != NULL && hypotheses.ptr() != NULL, NCV_NULL_PTR); ncvAssertReturn(pixelMask.memType() == hypotheses.memType() && pixelMask.memType() != NCVMemoryTypeDevice, NCV_MEM_RESIDENCE_ERROR); ncvAssertReturn(rectWidth > 0 && rectHeight > 0 && curScale > 0, NCV_INVALID_ROI); ncvAssertReturn(curScale > 0, NCV_INVALID_SCALE); ncvAssertReturn(totalMaxDetections <= hypotheses.length() && numPixelMaskDetections <= pixelMask.length() && totalDetections <= totalMaxDetections, NCV_INCONSISTENT_INPUT); NCVStatus ncvStat = NCV_SUCCESS; Ncv32u numDetsToCopy = numPixelMaskDetections; if (numDetsToCopy == 0) { return ncvStat; } if (totalDetections + numPixelMaskDetections > totalMaxDetections) { ncvStat = NCV_WARNING_HAAR_DETECTIONS_VECTOR_OVERFLOW; numDetsToCopy = totalMaxDetections - totalDetections; } for (Ncv32u i=0; i<numDetsToCopy; i++) { hypotheses.ptr()[totalDetections + i] = pixelToRect(pixelMask.ptr()[i], rectWidth, rectHeight, curScale); } totalDetections += numDetsToCopy; return ncvStat; } #define RECT_X_IDX 0 #define RECT_Y_IDX 1 #define RECT_W_IDX 2 #define RECT_H_IDX 3 #define RECT_WEIGHT_IDX 4 #define CUDA_CC_SIZE_W 0 #define CUDA_CC_SIZE_H 1 static NCVStatus loadFromXML(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { const char *CUDA_CC_SIZE = "size"; const char *CUDA_CC_STAGES = "stages"; const char *CUDA_CC_STAGE_THRESHOLD = "stage_threshold"; const char *CUDA_CC_TREES = "trees"; const char *CUDA_CC_FEATURE = "feature"; const char *CUDA_CC_RECT = "rects"; const char *CUDA_CC_TILTED = "tilted"; const char *CUDA_CC_THRESHOLD = "threshold"; const char *CUDA_CC_LEFT_VAL = "left_val"; const char *CUDA_CC_RIGHT_VAL = "right_val"; const char *CUDA_CC_LEFT_NODE = "left_node"; const char *CUDA_CC_RIGHT_NODE = "right_node"; NCVStatus ncvStat; haar.NumStages = 0; haar.NumClassifierRootNodes = 0; haar.NumClassifierTotalNodes = 0; haar.NumFeatures = 0; haar.ClassifierSize.width = 0; haar.ClassifierSize.height = 0; haar.bHasStumpsOnly = true; haar.bNeedsTiltedII = false; Ncv32u curMaxTreeDepth = 0; std::vector<HaarClassifierNode128> h_TmpClassifierNotRootNodes; haarStages.resize(0); haarClassifierNodes.resize(0); haarFeatures.resize(0); cv::FileStorage fs(filename, cv::FileStorage::READ | cv::FileStorage::FORMAT_XML); if (!fs.isOpened()) return NCV_FILE_ERROR; const cv::FileNode &root = fs.getFirstTopLevelNode(); const cv::FileNode &fnSize = root[CUDA_CC_SIZE]; // collect the cascade classifier window size haar.ClassifierSize.width = (int)fnSize[CUDA_CC_SIZE_W]; haar.ClassifierSize.height = (int)fnSize[CUDA_CC_SIZE_H]; CV_Assert(haar.ClassifierSize.height > 0 && haar.ClassifierSize.width > 0); const cv::FileNode &fnStages = root[CUDA_CC_STAGES]; cv::FileNodeIterator it = fnStages.begin(), it_end = fnStages.end(); for (; it != it_end; ++it) // by stages { cv::FileNode fnStage = *it; HaarStage64 curStage; curStage.setStartClassifierRootNodeOffset(static_cast<Ncv32u>(haarClassifierNodes.size())); curStage.setStageThreshold((float)fnStage[CUDA_CC_STAGE_THRESHOLD]); // iterate over the trees const cv::FileNode &fnTrees = fnStage[CUDA_CC_TREES]; cv::FileNodeIterator it1 = fnTrees.begin(), it1_end = fnTrees.end(); for (; it1 != it1_end; ++it1) // by trees { cv::FileNode tree = *it1; Ncv32u nodeId = (size_t)0; HaarClassifierNode128 curNode; curNode.setThreshold((float)tree[0][CUDA_CC_THRESHOLD]); NcvBool bIsLeftNodeLeaf = false; NcvBool bIsRightNodeLeaf = false; HaarClassifierNodeDescriptor32 nodeLeft; cv::FileNode leftNode = tree[0][CUDA_CC_LEFT_NODE]; if (leftNode.fs == NULL) { Ncv32f leftVal = tree[0][CUDA_CC_LEFT_VAL]; ncvStat = nodeLeft.create(leftVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsLeftNodeLeaf = true; } else { Ncv32u leftNodeOffset = (int)tree[0][CUDA_CC_LEFT_NODE]; nodeLeft.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + leftNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight; cv::FileNode rightNode = tree[0][CUDA_CC_RIGHT_NODE]; if (rightNode.fs == NULL) { Ncv32f rightVal = tree[0][CUDA_CC_RIGHT_VAL]; ncvStat = nodeRight.create(rightVal); ncvAssertReturn(ncvStat == NCV_SUCCESS, ncvStat); bIsRightNodeLeaf = true; } else { Ncv32u rightNodeOffset = (int)tree[0][CUDA_CC_RIGHT_NODE]; nodeRight.create((Ncv32u)(h_TmpClassifierNotRootNodes.size() + rightNodeOffset - 1)); haar.bHasStumpsOnly = false; } curNode.setRightNodeDesc(nodeRight); cv::FileNode fnFeature = tree[0][CUDA_CC_FEATURE]; Ncv32u tiltedVal = (int)fnFeature[CUDA_CC_TILTED]; haar.bNeedsTiltedII = (tiltedVal != 0); cv::FileNodeIterator it2 = fnFeature[CUDA_CC_RECT].begin(), it2_end = fnFeature[CUDA_CC_RECT].end(); Ncv32u featureId = 0; for (; it2 != it2_end; ++it2) // by feature { cv::FileNode rect = *it2; Ncv32u rectX = (int)rect[RECT_X_IDX]; Ncv32u rectY = (int)rect[RECT_Y_IDX]; Ncv32u rectWidth = (int)rect[RECT_W_IDX]; Ncv32u rectHeight = (int)rect[RECT_H_IDX]; Ncv32f rectWeight = (float)rect[RECT_WEIGHT_IDX]; if (rectWeight == 0) break; HaarFeature64 curFeature; ncvStat = curFeature.setRect(rectX, rectY, rectWidth, rectHeight, haar.ClassifierSize.width, haar.ClassifierSize.height); curFeature.setWeight(rectWeight); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); haarFeatures.push_back(curFeature); featureId++; } HaarFeatureDescriptor32 tmpFeatureDesc; ncvStat = tmpFeatureDesc.create(haar.bNeedsTiltedII, bIsLeftNodeLeaf, bIsRightNodeLeaf, featureId, static_cast<Ncv32u>(haarFeatures.size()) - featureId); ncvAssertReturn(NCV_SUCCESS == ncvStat, ncvStat); curNode.setFeatureDesc(tmpFeatureDesc); if (!nodeId) { //root node haarClassifierNodes.push_back(curNode); curMaxTreeDepth = 1; } else { //other node h_TmpClassifierNotRootNodes.push_back(curNode); curMaxTreeDepth++; } nodeId++; } curStage.setNumClassifierRootNodes((Ncv32u)fnTrees.size()); haarStages.push_back(curStage); } //fill in cascade stats haar.NumStages = static_cast<Ncv32u>(haarStages.size()); haar.NumClassifierRootNodes = static_cast<Ncv32u>(haarClassifierNodes.size()); haar.NumClassifierTotalNodes = static_cast<Ncv32u>(haar.NumClassifierRootNodes + h_TmpClassifierNotRootNodes.size()); haar.NumFeatures = static_cast<Ncv32u>(haarFeatures.size()); //merge root and leaf nodes in one classifiers array Ncv32u offsetRoot = static_cast<Ncv32u>(haarClassifierNodes.size()); for (Ncv32u i=0; i<haarClassifierNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = haarClassifierNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = haarClassifierNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } haarClassifierNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = haarClassifierNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } haarClassifierNodes[i].setRightNodeDesc(nodeRight); } for (Ncv32u i=0; i<h_TmpClassifierNotRootNodes.size(); i++) { HaarFeatureDescriptor32 featureDesc = h_TmpClassifierNotRootNodes[i].getFeatureDesc(); HaarClassifierNodeDescriptor32 nodeLeft = h_TmpClassifierNotRootNodes[i].getLeftNodeDesc(); if (!featureDesc.isLeftNodeLeaf()) { Ncv32u newOffset = nodeLeft.getNextNodeOffset() + offsetRoot; nodeLeft.create(newOffset); } h_TmpClassifierNotRootNodes[i].setLeftNodeDesc(nodeLeft); HaarClassifierNodeDescriptor32 nodeRight = h_TmpClassifierNotRootNodes[i].getRightNodeDesc(); if (!featureDesc.isRightNodeLeaf()) { Ncv32u newOffset = nodeRight.getNextNodeOffset() + offsetRoot; nodeRight.create(newOffset); } h_TmpClassifierNotRootNodes[i].setRightNodeDesc(nodeRight); haarClassifierNodes.push_back(h_TmpClassifierNotRootNodes[i]); } return NCV_SUCCESS; } #define NVBIN_HAAR_SIZERESERVED 16 #define NVBIN_HAAR_VERSION 0x1 static NCVStatus loadFromNVBIN(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, std::vector<HaarStage64> &haarStages, std::vector<HaarClassifierNode128> &haarClassifierNodes, std::vector<HaarFeature64> &haarFeatures) { size_t readCount; FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); Ncv32u fsize; readCount = fread(&fsize, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fseek(fp, 0, SEEK_END); Ncv32u fsizeActual = ftell(fp); ncvAssertReturn(fsize == fsizeActual, NCV_FILE_ERROR); std::vector<unsigned char> fdata; fdata.resize(fsize); Ncv32u dataOffset = 0; fseek(fp, 0, SEEK_SET); readCount = fread(&fdata[0], fsize, 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); //data dataOffset = NVBIN_HAAR_SIZERESERVED; haar.NumStages = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierRootNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumClassifierTotalNodes = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.NumFeatures = *(Ncv32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(Ncv32u); haar.ClassifierSize = *(NcvSize32u *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvSize32u); haar.bNeedsTiltedII = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haar.bHasStumpsOnly = *(NcvBool *)(&fdata[0]+dataOffset); dataOffset += sizeof(NcvBool); haarStages.resize(haar.NumStages); haarClassifierNodes.resize(haar.NumClassifierTotalNodes); haarFeatures.resize(haar.NumFeatures); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); memcpy(&haarStages[0], &fdata[0]+dataOffset, szStages); dataOffset += szStages; memcpy(&haarClassifierNodes[0], &fdata[0]+dataOffset, szClassifiers); dataOffset += szClassifiers; memcpy(&haarFeatures[0], &fdata[0]+dataOffset, szFeatures); dataOffset += szFeatures; return NCV_SUCCESS; } NCVStatus ncvHaarGetClassifierSize(const cv::String &filename, Ncv32u &numStages, Ncv32u &numNodes, Ncv32u &numFeatures) { size_t readCount; NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); if (fext == "nvbin") { FILE *fp = fopen(filename.c_str(), "rb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); Ncv32u fileVersion; readCount = fread(&fileVersion, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); ncvAssertReturn(fileVersion == NVBIN_HAAR_VERSION, NCV_FILE_ERROR); fseek(fp, NVBIN_HAAR_SIZERESERVED, SEEK_SET); Ncv32u tmp; readCount = fread(&numStages, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&tmp, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numNodes, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); readCount = fread(&numFeatures, sizeof(Ncv32u), 1, fp); ncvAssertReturn(1 == readCount, NCV_FILE_ERROR); fclose(fp); } else if (fext == "xml") { HaarClassifierCascadeDescriptor haar; std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); numStages = haar.NumStages; numNodes = haar.NumClassifierTotalNodes; numFeatures = haar.NumFeatures; } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } return NCV_SUCCESS; } NCVStatus ncvHaarLoadFromFile_host(const cv::String &filename, HaarClassifierCascadeDescriptor &haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); NCVStatus ncvStat; cv::String fext = filename.substr(filename.find_last_of(".") + 1); std::transform(fext.begin(), fext.end(), fext.begin(), ::tolower); std::vector<HaarStage64> haarStages; std::vector<HaarClassifierNode128> haarNodes; std::vector<HaarFeature64> haarFeatures; if (fext == "nvbin") { ncvStat = loadFromNVBIN(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else if (fext == "xml") { ncvStat = loadFromXML(filename, haar, haarStages, haarNodes, haarFeatures); ncvAssertReturnNcvStat(ncvStat); } else { return NCV_HAAR_XML_LOADING_EXCEPTION; } ncvAssertReturn(h_HaarStages.length() >= haarStages.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarNodes.length() >= haarNodes.size(), NCV_MEM_INSUFFICIENT_CAPACITY); ncvAssertReturn(h_HaarFeatures.length() >= haarFeatures.size(), NCV_MEM_INSUFFICIENT_CAPACITY); memcpy(h_HaarStages.ptr(), &haarStages[0], haarStages.size()*sizeof(HaarStage64)); memcpy(h_HaarNodes.ptr(), &haarNodes[0], haarNodes.size()*sizeof(HaarClassifierNode128)); memcpy(h_HaarFeatures.ptr(), &haarFeatures[0], haarFeatures.size()*sizeof(HaarFeature64)); return NCV_SUCCESS; } NCVStatus ncvHaarStoreNVBIN_host(const cv::String &filename, HaarClassifierCascadeDescriptor haar, NCVVector<HaarStage64> &h_HaarStages, NCVVector<HaarClassifierNode128> &h_HaarNodes, NCVVector<HaarFeature64> &h_HaarFeatures) { ncvAssertReturn(h_HaarStages.length() >= haar.NumStages, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarNodes.length() >= haar.NumClassifierTotalNodes, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarFeatures.length() >= haar.NumFeatures, NCV_INCONSISTENT_INPUT); ncvAssertReturn(h_HaarStages.memType() == NCVMemoryTypeHostPinned && h_HaarNodes.memType() == NCVMemoryTypeHostPinned && h_HaarFeatures.memType() == NCVMemoryTypeHostPinned, NCV_MEM_RESIDENCE_ERROR); Ncv32u szStages = haar.NumStages * sizeof(HaarStage64); Ncv32u szClassifiers = haar.NumClassifierTotalNodes * sizeof(HaarClassifierNode128); Ncv32u szFeatures = haar.NumFeatures * sizeof(HaarFeature64); Ncv32u dataOffset = 0; std::vector<unsigned char> fdata; fdata.resize(szStages+szClassifiers+szFeatures+1024, 0); //header *(Ncv32u *)(&fdata[0]+dataOffset) = NVBIN_HAAR_VERSION; //data dataOffset = NVBIN_HAAR_SIZERESERVED; *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumStages; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierRootNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumClassifierTotalNodes; dataOffset += sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = haar.NumFeatures; dataOffset += sizeof(Ncv32u); *(NcvSize32u *)(&fdata[0]+dataOffset) = haar.ClassifierSize; dataOffset += sizeof(NcvSize32u); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bNeedsTiltedII; dataOffset += sizeof(NcvBool); *(NcvBool *)(&fdata[0]+dataOffset) = haar.bHasStumpsOnly; dataOffset += sizeof(NcvBool); memcpy(&fdata[0]+dataOffset, h_HaarStages.ptr(), szStages); dataOffset += szStages; memcpy(&fdata[0]+dataOffset, h_HaarNodes.ptr(), szClassifiers); dataOffset += szClassifiers; memcpy(&fdata[0]+dataOffset, h_HaarFeatures.ptr(), szFeatures); dataOffset += szFeatures; Ncv32u fsize = dataOffset; //TODO: CRC32 here //update header dataOffset = sizeof(Ncv32u); *(Ncv32u *)(&fdata[0]+dataOffset) = fsize; FILE *fp = fopen(filename.c_str(), "wb"); ncvAssertReturn(fp != NULL, NCV_FILE_ERROR); fwrite(&fdata[0], fsize, 1, fp); fclose(fp); return NCV_SUCCESS; }
e910718332f30a8279ecc5575a050b34fc859036.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/NestedTensorImpl.h> #include <ATen/TensorAccessor.h> #include <c10/util/Logging.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/NonSymbolicBC.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> #include <ATen/native/hip/PersistentSoftmax.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <c10/hip/HIPMathCompat.h> #include <ATen/native/transformers/attention.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/transformers/hip/sdp_utils.h> #ifdef USE_FLASH_ATTENTION #include <ATen/native/transformers/hip/flash_attn/fmha_api.h> #include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h> #endif namespace at { namespace native { namespace { #define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \ { \ if (VALUE_HEAD_DIM <= 64) { \ constexpr bool kIs64x64 = true; \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kIs64x64 = false; \ if (VALUE_HEAD_DIM <= 128) { \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kSingleValueIteration = false; \ FN(); \ } \ } \ } #define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \ { \ hipDeviceProp_t* properties = \ at::cuda::getDeviceProperties(QUERY.device().index()); \ const int computeCapability = properties->major * 10 + properties->minor; \ DISPATCH_BLOCKSIZE( \ VALUE.size(-1), ([&]() { \ static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \ static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \ DISPATCH_TYPES( \ QUERY, ([&]() { \ DISPATCH_ARCHTAG( \ computeCapability, ([&]() { \ using AlignedAK = AttentionKernel< \ scalar_t, \ ArchTag, \ true, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ /* Run a more efficient kernel (with `isAligned=True`) \ if memory is correctly aligned*/ \ bool isAligned = \ (QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \ KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \ VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \ /* TODO: Should we warn or log somewhere when we use a \ less efficient kernel due to wrong alignment? */ \ DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \ using Kernel = AttentionKernel< \ scalar_t, \ ArchTag, \ kIsAligned, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ FUNC(); \ })) \ })) \ })); \ })); \ } static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4; template <typename scalar_t, typename accscalar_t, bool assume_aligned> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { // Same as above, but we can't vectorize memory access. for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; scalar_t qkv_q = qkv[b][t][d + 0 * D]; scalar_t qkv_k = qkv[b][t][d + 1 * D]; scalar_t qkv_v = qkv[b][t][d + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } template <typename scalar_t, typename accscalar_t, bool assume_aligned = false> __global__ void transform_bias_rescale_qkv_add_padding_kernel( // [B, T, 3 * D], but it's a NestedTensor buffer const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, const int* offsets, const int* input_sizes, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. const auto NH = q_k_v.size(2); const auto T = q_k_v.size(3); const auto DH = q_k_v.size(4); const auto t = blockIdx.x % T; const auto b = blockIdx.x / T; const auto D = NH * DH; const auto _3D = 3 * D; const auto offset_for_batch = offsets[b]; const auto input_dim = 1; const auto* sizes_i = input_sizes + b * input_dim; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; const auto first_item_offset = t * _3D + d; const auto last_item_offset = first_item_offset + VEC - 1; const bool first_item_in_bounds = first_item_offset < sizes_i[0]; const bool entire_vec_in_bounds = last_item_offset < sizes_i[0]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); if (entire_vec_in_bounds) { const auto offset = offset_for_batch + first_item_offset; *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } } else if (first_item_in_bounds) { const auto offset = offset_for_batch + first_item_offset; qkv_q[0] = qkv[offset + 0 * D]; qkv_k[0] = qkv[offset + 1 * D]; qkv_v[0] = qkv[offset + 2 * D]; qkv_q[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[0]) + static_cast<accscalar_t>(qkv_bias_q[0])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[0]) + static_cast<accscalar_t>(qkv_bias_k[0]))); qkv_v[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[0]) + static_cast<accscalar_t>(qkv_bias_v[0]))); #pragma unroll for (auto ii = 1; ii < VEC; ++ii) { const auto loop_offset = offset + ii; if (loop_offset < sizes_i[0]) { qkv_q[ii] = qkv[loop_offset + 0 * D]; qkv_k[ii] = qkv[loop_offset + 1 * D]; qkv_v[ii] = qkv[loop_offset + 2 * D]; qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } else { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } } else { #pragma unroll for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; const auto item_offset = t * _3D + d; const bool in_bounds = item_offset < sizes_i[0]; scalar_t qkv_q, qkv_k, qkv_v; if (in_bounds) { const auto qkv_offset = offset_for_batch + item_offset; qkv_q = qkv[qkv_offset + 0 * D]; qkv_k = qkv[qkv_offset + 1 * D]; qkv_v = qkv[qkv_offset + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); } else { qkv_q = 0; qkv_k = 0; qkv_v = 0; } q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } Tensor collapse_dims_1_and_2(const Tensor& sizes) { auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1); auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1); return (sizes_dim1 * sizes_dim2).contiguous(); } } // namespace // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias __host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda( const Tensor& qkv, const Tensor& qkv_bias, const int64_t num_head) { auto B = qkv.is_nested() ? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0) : qkv.size(0); // TODO: calculate this without the std::vector -- NestedTensor_to_mask wants // this too auto T = qkv.is_nested() ? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0] : qkv.size(1); if (qkv.is_nested()) { // Don't mess with non-nested case for now since it's not set up to fiddle // with mask size. // Round T up to next multiple of 8 so as to be able to utilize Tensor // cores. Otherwise, sometimes with padding, *no* row will have the maximum // sequence length and so we'll have a non-divisible-by-8 dimension even if // the model author chose a multiple of 8. T = T + (8 - (T % 8)) % 8; } auto _3D = qkv_bias.size(0); auto D = _3D / 3; TORCH_CHECK(D % num_head == 0); const auto dim_per_head = D / num_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options()); #define CALL_KERNEL(assume_aligned) \ hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \ , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) #define CALL_ADD_PADDING_KERNEL(assume_aligned) \ hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \ scalar_t, \ accscalar_t, \ assume_aligned>) \ , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ nt_qkv_buffer \ .packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ offsets_ptr, \ sizes_ptr, \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = ::max( std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1); auto blocks = B * T; const bool aligned = ((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) && ((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0); if (aligned) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( D % TRANSFORM_BIAS_RESCALE_VEC == 0, "D = num_heads * dim_per_head, so we should have dim_per_head % " "TRANSFORM_BIAS_RESCALE_VEC == 0 => " "D % TRANSFORM_BIAS_RESCALE_VEC == 0"); } if (qkv.is_nested()) { auto* nt_qkv = get_nested_tensor_impl(qkv); const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer(); auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor()); auto offsets = NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel()); at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel()) .copy_(sizes.reshape({-1})); auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true); const auto offsets_ptr = metadata.data_ptr<int>(); const auto sizes_ptr = offsets_ptr + sizes.numel() + 1; const auto input_dim = sizes.sizes()[1]; TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1); if (aligned && ((reinterpret_cast<intptr_t>(qkv.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0)) { CALL_ADD_PADDING_KERNEL(true); } else { CALL_ADD_PADDING_KERNEL(false); } } else if (aligned) { CALL_KERNEL(true); } else { CALL_KERNEL(false); } C10_HIP_KERNEL_LAUNCH_CHECK(); }); #undef CALL_ADD_PADDING_KERNEL #undef CALL_KERNEL auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } std::tuple<Tensor, Tensor> native_multi_head_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, const int64_t embed_dim, const int64_t num_head, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask, bool need_weights, bool average_attn_weights, const c10::optional<int64_t> mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] TORCH_CHECK( !mask || !query.is_nested(), "NestedTensor with mask is not supported yet"); const auto D = embed_dim; TORCH_CHECK( query.dim() == 3, "expected 3-D `query`, got ", query.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || query.sizes()[2] == embed_dim, "passed-in embed_dim ", embed_dim, " didn't match last dim of query ", query.sizes()[2]); TORCH_CHECK( key.dim() == 3, "expected 3-D `key`, got ", key.dim(), "-D tensor"); TORCH_CHECK( value.dim() == 3, "expected 3-D `value`, got ", value.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || key.is_nested() || value.is_nested() || (query.sizes() == key.sizes() && key.sizes() == value.sizes()), "expected `query`/`key`/`value` shapes to match"); TORCH_CHECK( qkv_weight.dim() == 2, "expected 2-D `qkv_weight`, got ", qkv_weight.dim(), "-D tensor"); TORCH_CHECK( D * 3 == qkv_weight.sizes()[0], "expected `qkv_weight` first dim to be 3x embed_dim"); TORCH_CHECK( D == qkv_weight.sizes()[1], "expected `qkv_weight` second dim to be embed_Dim"); TORCH_CHECK( qkv_bias.dim() == 1, "expected 2-D `qkv_bias`, got ", qkv_bias.dim(), "-D tensor"); TORCH_CHECK( qkv_bias.sizes()[0] == 3 * D, "expected `qkv_bias` first dim and first dim of query to be equal"); TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`"); #ifndef NDEBUG const auto B = query.is_nested() ? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0) : query.sizes()[0]; auto T = query.is_nested() ? 0 : query.sizes()[1]; #endif const auto dim_per_head = D / num_head; if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) { // We have not done linear projection yet but the input for SDP // Is expected to be 4 dimensional. We "cheaply" create view tensors // That will then be used for checking hot path conditions with select_sd_backend auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2); sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) { auto x = at::linear(query, qkv_weight, qkv_bias); auto chunks = x.chunk(3, -1); auto x_size_0 = x.size(0); chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); Tensor y, weights; std::tie(y, weights) = at::_scaled_dot_product_attention( chunks[0], chunks[1], chunks[2], mask, 0.0, false, false); auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim}); return std::make_tuple( at::linear(past_sdp, proj_weight, proj_bias), Tensor()); } // Returned math or error lets not use it } // shape: [B, T, 3 x D] auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight); if (!qkv.is_nested() && qkv.numel() == 0) { if (query.is_nested()) { return std::make_tuple(Tensor(), Tensor()); } return std::make_tuple(at::empty_like(query), Tensor()); } #ifndef NDEBUG if (!query.is_nested() || !qkv.is_nested()) { if (query.is_nested()) { T = qkv.size(1); } debug_assert_shape(__LINE__, qkv, {B, T, 3 * D}); } #endif #ifdef DEBUG_PRINT_EACH_STEP if (!qkv.is_nested()) { std::cerr << "qkv: " << qkv << std::endl; } #endif // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head); qkv = Tensor(); // Not used any more, allow free auto& q = std::get<0>(q_k_v); const auto& k = std::get<1>(q_k_v); const auto& v = std::get<2>(q_k_v); #ifndef NDEBUG debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "q: " << q << std::endl; std::cerr << "k: " << k << std::endl; std::cerr << "v: " << v << std::endl; #endif // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // q & k are dead but cannot be freed because they were packed with v #ifndef NDEBUG debug_assert_shape(__LINE__, qkt, {B, num_head, T, T}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt: " << qkt << std::endl; #endif // shape: [B, num_head, T, T] // TODO: long-term, have a kernel that works with // NestedTensor directly if there is no mask passed qkt = masked_softmax(qkt, mask, query, mask_type); #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt after softmax: " << qkt << std::endl; #endif // shape: [B, num_head, T, dim_per_head] // reuse storage for q; we're done with it auto attn_ctx = bmm_nn(q, qkt, v); // qkv is not dead; we just reused storage for q! if (!need_weights) { qkt = Tensor(); } #ifndef NDEBUG debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "attn_ctx: " << attn_ctx << std::endl; #endif // shape: [B, T, D] // Fuse transform_0213 inside auto proj = transform0213_gemm_nt_bias( attn_ctx, proj_weight, proj_bias, query); #ifndef NDEBUG debug_assert_shape(__LINE__, proj, {B, T, D}); #endif if (need_weights && average_attn_weights) { // weights are not needed for full transformer, so don't worry too // much about performance -- we implement this just to make use // cases that don't disable need_weights still get some speedup. qkt = qkt.sum(1); qkt /= num_head; } return std::make_tuple(std::move(proj), std::move(qkt)); } std::tuple<Tensor, Tensor> _scaled_dot_product_flash_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, double dropout_p, bool is_causal) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) // Key (Batch x Num_heads x KV_seq_len x Dim_per_head) // Value (Batch x Num_heads x KV_seq_len x Dim_per_head) const int64_t batch_size = query.size(0); const int64_t num_heads = query.size(1); const int64_t max_seqlen_batch_q = query.size(2); const int64_t head_dim = query.size(3); const int64_t max_seqlen_batch_k = key.size(2); const int64_t max_seqlen_batch_v = value.size(2); TORCH_CHECK( max_seqlen_batch_k == max_seqlen_batch_v, "Key and Value must have the same sequence length"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor cumulative_sequence_length_q = at::arange( 0, (batch_size + 1) * max_seqlen_batch_q, max_seqlen_batch_q, TensorOptions().device(at::kCUDA).dtype(at::kInt)); Tensor cumulative_sequence_length_k = at::arange( 0, (batch_size + 1) * max_seqlen_batch_k, max_seqlen_batch_k, TensorOptions().device(at::kCUDA).dtype(at::kInt)); int64_t Nnz_q{batch_size * max_seqlen_batch_q}; int64_t Nnz_kv{batch_size * max_seqlen_batch_k}; // For the standard MHA these will actually be views Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim}); Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_flash_attention_forward( query_reshaped, key_reshaped, value_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, is_causal); // Reshape output to convert nnz to batch_size and seq_len attention = attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2); return std::make_tuple(attention, log_sumexp); } std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, bool compute_log_sumexp, bool is_causal) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_efficient_attention_forward( q_t, k_t, v_t, c10::nullopt, c10::nullopt, c10::nullopt, compute_log_sumexp, is_causal); attention = attention.transpose(1,2); return std::make_tuple(std::move(attention), std::move(log_sumexp)); } int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value, const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::error) { TORCH_CHECK( false, "No viable backend for scaled_dot_product_attention was found. ", "This is likely due to turning off both the math kernel and the fused kernels."); } return static_cast<int64_t>(backend); } bool _chunk_grad_outputs_efficient_attention( const Tensor& query, const Tensor& key, const Tensor& value, bool is_causal) { int64_t M = query.size(2); int64_t N = key.size(2); bool grad_kv_needs_init = is_causal && N > M; bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage()); bool equal_seq_len = query.size(2) == key.size(2); bool q_v_same_head_dim = query.size(3) == value.size(3); bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased); return chunk_grad_outputs; } std::tuple<Tensor, Tensor> _flash_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& cumulative_sequence_length_q, const Tensor& cumulative_sequence_length_k, const int64_t max_seqlen_batch_q, const int64_t max_seqlen_batch_k, double dropout_p, bool is_causal) { #if defined(USE_FLASH_ATTENTION) /* num_splits determines how much to parallelize over the seqlen_q dimension num_splits=0 means it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking. We will hard code it to 0 for now */ constexpr int num_splits{0}; auto softmax_scale = ::pow(query.size(-1), -0.5); at::Tensor output = at::empty_like(query); Tensor logsumexp, softmax; logsumexp = fmha::mha_fwd( query, key, value, output, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, softmax_scale, false, /*zero_tensors = false for all calls here*/ is_causal, num_splits, c10::nullopt); return std::make_tuple(output, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor(), Tensor()); } std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward( const at::Tensor& query, // [b, seqlen, num_heads, K] const at::Tensor& key, // [b, seqlen, num_heads, K] const at::Tensor& value, // [b, seqlen, num_heads, Kv] // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b const c10::optional<at::Tensor>& cu_seqlens_q, // (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the // position of the first key token for batch $b const c10::optional<at::Tensor>& cu_seqlens_k, // (Mode 1MHK only) Maximum sequence length across batches const c10::optional<int64_t> max_seqlen_q_, bool compute_logsumexp, bool causal) { #if defined(USE_FLASH_ATTENTION) // TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a // machine that is >= 5.0. In practice, this is not a problem but since // this would avoid runtime architecture checks, we should look into it TORCH_CHECK(query.dim() == 4); TORCH_CHECK(key.dim() == 4); TORCH_CHECK(value.dim() == 4); // Batch sizes TORCH_CHECK(query.size(0) == key.size(0)); TORCH_CHECK(query.size(0) == value.size(0)); // Sequence length TORCH_CHECK(key.size(1) == value.size(1)); // Num heads TORCH_CHECK(query.size(2) == key.size(2)); TORCH_CHECK(query.size(2) == value.size(2)); // Embedding per head TORCH_CHECK(query.size(3) == key.size(3)); int64_t max_seqlen_q = 0, max_seqlen_k=0; TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value()); if (cu_seqlens_q.has_value()) { TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q)); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k)); TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0)); TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1"); TORCH_CHECK(max_seqlen_q_.has_value()); max_seqlen_q = *max_seqlen_q_; max_seqlen_k = 0; // Will be set inside the kernel } else { max_seqlen_q = query.size(1); max_seqlen_k = key.size(1); } CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value); at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int64_t B = query.size(0); int64_t M = query.size(1); int64_t N = key.size(1); int64_t num_heads = query.size(-2); int64_t K = query.size(-1); int64_t Kv = value.size(-1); at::Tensor res; at::Tensor logsumexp; auto launchKernel = [&](auto _k, int computeCapability) { using Kernel = decltype(_k); using scalar_t = typename Kernel::scalar_t; (void)_k; res = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_t>::atScalarType())); // NOTE: Should be aligned (by padding) in case M is // not a good number for loading during backward constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE; logsumexp = at::empty( {B, num_heads, compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0}, query.options().dtype(at::ScalarType::Float)); typename Kernel::Params p; p.query_ptr = (scalar_t*)query.data_ptr(); p.key_ptr = (scalar_t*)key.data_ptr(); p.value_ptr = (scalar_t*)value.data_ptr(); p.logsumexp_ptr = compute_logsumexp ? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr() : nullptr; at::Tensor output_accum; if (Kernel::kNeedsOutputAccumulatorBuffer) { output_accum = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_accum_t>::atScalarType())); p.output_accum_ptr = (typename Kernel::output_accum_t*)output_accum.data_ptr(); } else { p.output_accum_ptr = nullptr; } p.output_ptr = (typename Kernel::output_t*)res.data_ptr(); if (cu_seqlens_q.has_value()) { p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr(); p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr(); } #define ASSIGN_CHECK_OVERFLOW(A, B) \ { \ A = B; \ TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \ } p.num_heads = num_heads; p.head_dim = query.size(3); p.head_dim_value = value.size(3); p.num_queries = max_seqlen_q; p.num_keys = max_seqlen_k; p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B; p.causal = causal; ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0)); ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0)); ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0)); ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1)); ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1)); ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1)); ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2)); ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2)); ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2)); constexpr auto kernel_fn = attention_kernel_batched<Kernel>; size_t smem_bytes = sizeof(typename Kernel::SharedStorage); if (smem_bytes > 0xc000) { TORCH_INTERNAL_ASSERT( computeCapability >= 70, "This kernel requires too much shared memory on this machine!"); AT_CUDA_CHECK(hipFuncSetAttribute( kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes)); } Kernel::check_supported(p); hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p); }; // Dispatch to the right kernel DISPATCH_KERNEL(query, key, value, ([&]() { launchKernel(Kernel{}, computeCapability); })); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(res, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor{}, Tensor{}); } Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){ TORCH_CHECK(false, "This operator should be overridden in python before use"); return at::Tensor(); } REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda); } // namespace native } // namespace at
e910718332f30a8279ecc5575a050b34fc859036.cu
#include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/NestedTensorImpl.h> #include <ATen/TensorAccessor.h> #include <c10/util/Logging.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/NonSymbolicBC.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/native/cuda/PersistentSoftmax.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/native/transformers/attention.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/transformers/cuda/sdp_utils.h> #ifdef USE_FLASH_ATTENTION #include <ATen/native/transformers/cuda/flash_attn/fmha_api.h> #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h> #endif namespace at { namespace native { namespace { #define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \ { \ if (VALUE_HEAD_DIM <= 64) { \ constexpr bool kIs64x64 = true; \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kIs64x64 = false; \ if (VALUE_HEAD_DIM <= 128) { \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kSingleValueIteration = false; \ FN(); \ } \ } \ } #define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \ { \ cudaDeviceProp* properties = \ at::cuda::getDeviceProperties(QUERY.device().index()); \ const int computeCapability = properties->major * 10 + properties->minor; \ DISPATCH_BLOCKSIZE( \ VALUE.size(-1), ([&]() { \ static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \ static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \ DISPATCH_TYPES( \ QUERY, ([&]() { \ DISPATCH_ARCHTAG( \ computeCapability, ([&]() { \ using AlignedAK = AttentionKernel< \ scalar_t, \ ArchTag, \ true, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ /* Run a more efficient kernel (with `isAligned=True`) \ if memory is correctly aligned*/ \ bool isAligned = \ (QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \ KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \ VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \ /* TODO: Should we warn or log somewhere when we use a \ less efficient kernel due to wrong alignment? */ \ DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \ using Kernel = AttentionKernel< \ scalar_t, \ ArchTag, \ kIsAligned, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ FUNC(); \ })) \ })) \ })); \ })); \ } static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4; template <typename scalar_t, typename accscalar_t, bool assume_aligned> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { // Same as above, but we can't vectorize memory access. for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; scalar_t qkv_q = qkv[b][t][d + 0 * D]; scalar_t qkv_k = qkv[b][t][d + 1 * D]; scalar_t qkv_v = qkv[b][t][d + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } template <typename scalar_t, typename accscalar_t, bool assume_aligned = false> __global__ void transform_bias_rescale_qkv_add_padding_kernel( // [B, T, 3 * D], but it's a NestedTensor buffer const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, const int* offsets, const int* input_sizes, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. const auto NH = q_k_v.size(2); const auto T = q_k_v.size(3); const auto DH = q_k_v.size(4); const auto t = blockIdx.x % T; const auto b = blockIdx.x / T; const auto D = NH * DH; const auto _3D = 3 * D; const auto offset_for_batch = offsets[b]; const auto input_dim = 1; const auto* sizes_i = input_sizes + b * input_dim; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; const auto first_item_offset = t * _3D + d; const auto last_item_offset = first_item_offset + VEC - 1; const bool first_item_in_bounds = first_item_offset < sizes_i[0]; const bool entire_vec_in_bounds = last_item_offset < sizes_i[0]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); if (entire_vec_in_bounds) { const auto offset = offset_for_batch + first_item_offset; *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } } else if (first_item_in_bounds) { const auto offset = offset_for_batch + first_item_offset; qkv_q[0] = qkv[offset + 0 * D]; qkv_k[0] = qkv[offset + 1 * D]; qkv_v[0] = qkv[offset + 2 * D]; qkv_q[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[0]) + static_cast<accscalar_t>(qkv_bias_q[0])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[0]) + static_cast<accscalar_t>(qkv_bias_k[0]))); qkv_v[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[0]) + static_cast<accscalar_t>(qkv_bias_v[0]))); #pragma unroll for (auto ii = 1; ii < VEC; ++ii) { const auto loop_offset = offset + ii; if (loop_offset < sizes_i[0]) { qkv_q[ii] = qkv[loop_offset + 0 * D]; qkv_k[ii] = qkv[loop_offset + 1 * D]; qkv_v[ii] = qkv[loop_offset + 2 * D]; qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } else { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } } else { #pragma unroll for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; const auto item_offset = t * _3D + d; const bool in_bounds = item_offset < sizes_i[0]; scalar_t qkv_q, qkv_k, qkv_v; if (in_bounds) { const auto qkv_offset = offset_for_batch + item_offset; qkv_q = qkv[qkv_offset + 0 * D]; qkv_k = qkv[qkv_offset + 1 * D]; qkv_v = qkv[qkv_offset + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); } else { qkv_q = 0; qkv_k = 0; qkv_v = 0; } q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } Tensor collapse_dims_1_and_2(const Tensor& sizes) { auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1); auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1); return (sizes_dim1 * sizes_dim2).contiguous(); } } // namespace // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias __host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda( const Tensor& qkv, const Tensor& qkv_bias, const int64_t num_head) { auto B = qkv.is_nested() ? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0) : qkv.size(0); // TODO: calculate this without the std::vector -- NestedTensor_to_mask wants // this too auto T = qkv.is_nested() ? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0] : qkv.size(1); if (qkv.is_nested()) { // Don't mess with non-nested case for now since it's not set up to fiddle // with mask size. // Round T up to next multiple of 8 so as to be able to utilize Tensor // cores. Otherwise, sometimes with padding, *no* row will have the maximum // sequence length and so we'll have a non-divisible-by-8 dimension even if // the model author chose a multiple of 8. T = T + (8 - (T % 8)) % 8; } auto _3D = qkv_bias.size(0); auto D = _3D / 3; TORCH_CHECK(D % num_head == 0); const auto dim_per_head = D / num_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options()); #define CALL_KERNEL(assume_aligned) \ transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \ qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) #define CALL_ADD_PADDING_KERNEL(assume_aligned) \ transform_bias_rescale_qkv_add_padding_kernel< \ scalar_t, \ accscalar_t, \ assume_aligned> \ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \ nt_qkv_buffer \ .packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ offsets_ptr, \ sizes_ptr, \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = std::max( std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1); auto blocks = B * T; const bool aligned = ((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) && ((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0); if (aligned) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( D % TRANSFORM_BIAS_RESCALE_VEC == 0, "D = num_heads * dim_per_head, so we should have dim_per_head % " "TRANSFORM_BIAS_RESCALE_VEC == 0 => " "D % TRANSFORM_BIAS_RESCALE_VEC == 0"); } if (qkv.is_nested()) { auto* nt_qkv = get_nested_tensor_impl(qkv); const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer(); auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor()); auto offsets = NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel()); at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel()) .copy_(sizes.reshape({-1})); auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true); const auto offsets_ptr = metadata.data_ptr<int>(); const auto sizes_ptr = offsets_ptr + sizes.numel() + 1; const auto input_dim = sizes.sizes()[1]; TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1); if (aligned && ((reinterpret_cast<intptr_t>(qkv.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0)) { CALL_ADD_PADDING_KERNEL(true); } else { CALL_ADD_PADDING_KERNEL(false); } } else if (aligned) { CALL_KERNEL(true); } else { CALL_KERNEL(false); } C10_CUDA_KERNEL_LAUNCH_CHECK(); }); #undef CALL_ADD_PADDING_KERNEL #undef CALL_KERNEL auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } std::tuple<Tensor, Tensor> native_multi_head_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, const int64_t embed_dim, const int64_t num_head, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask, bool need_weights, bool average_attn_weights, const c10::optional<int64_t> mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] TORCH_CHECK( !mask || !query.is_nested(), "NestedTensor with mask is not supported yet"); const auto D = embed_dim; TORCH_CHECK( query.dim() == 3, "expected 3-D `query`, got ", query.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || query.sizes()[2] == embed_dim, "passed-in embed_dim ", embed_dim, " didn't match last dim of query ", query.sizes()[2]); TORCH_CHECK( key.dim() == 3, "expected 3-D `key`, got ", key.dim(), "-D tensor"); TORCH_CHECK( value.dim() == 3, "expected 3-D `value`, got ", value.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || key.is_nested() || value.is_nested() || (query.sizes() == key.sizes() && key.sizes() == value.sizes()), "expected `query`/`key`/`value` shapes to match"); TORCH_CHECK( qkv_weight.dim() == 2, "expected 2-D `qkv_weight`, got ", qkv_weight.dim(), "-D tensor"); TORCH_CHECK( D * 3 == qkv_weight.sizes()[0], "expected `qkv_weight` first dim to be 3x embed_dim"); TORCH_CHECK( D == qkv_weight.sizes()[1], "expected `qkv_weight` second dim to be embed_Dim"); TORCH_CHECK( qkv_bias.dim() == 1, "expected 2-D `qkv_bias`, got ", qkv_bias.dim(), "-D tensor"); TORCH_CHECK( qkv_bias.sizes()[0] == 3 * D, "expected `qkv_bias` first dim and first dim of query to be equal"); TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`"); #ifndef NDEBUG const auto B = query.is_nested() ? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0) : query.sizes()[0]; auto T = query.is_nested() ? 0 : query.sizes()[1]; #endif const auto dim_per_head = D / num_head; if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) { // We have not done linear projection yet but the input for SDP // Is expected to be 4 dimensional. We "cheaply" create view tensors // That will then be used for checking hot path conditions with select_sd_backend auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2); sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) { auto x = at::linear(query, qkv_weight, qkv_bias); auto chunks = x.chunk(3, -1); auto x_size_0 = x.size(0); chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); Tensor y, weights; std::tie(y, weights) = at::_scaled_dot_product_attention( chunks[0], chunks[1], chunks[2], mask, 0.0, false, false); auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim}); return std::make_tuple( at::linear(past_sdp, proj_weight, proj_bias), Tensor()); } // Returned math or error lets not use it } // shape: [B, T, 3 x D] auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight); if (!qkv.is_nested() && qkv.numel() == 0) { if (query.is_nested()) { return std::make_tuple(Tensor(), Tensor()); } return std::make_tuple(at::empty_like(query), Tensor()); } #ifndef NDEBUG if (!query.is_nested() || !qkv.is_nested()) { if (query.is_nested()) { T = qkv.size(1); } debug_assert_shape(__LINE__, qkv, {B, T, 3 * D}); } #endif #ifdef DEBUG_PRINT_EACH_STEP if (!qkv.is_nested()) { std::cerr << "qkv: " << qkv << std::endl; } #endif // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head); qkv = Tensor(); // Not used any more, allow free auto& q = std::get<0>(q_k_v); const auto& k = std::get<1>(q_k_v); const auto& v = std::get<2>(q_k_v); #ifndef NDEBUG debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "q: " << q << std::endl; std::cerr << "k: " << k << std::endl; std::cerr << "v: " << v << std::endl; #endif // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // q & k are dead but cannot be freed because they were packed with v #ifndef NDEBUG debug_assert_shape(__LINE__, qkt, {B, num_head, T, T}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt: " << qkt << std::endl; #endif // shape: [B, num_head, T, T] // TODO: long-term, have a kernel that works with // NestedTensor directly if there is no mask passed qkt = masked_softmax(qkt, mask, query, mask_type); #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt after softmax: " << qkt << std::endl; #endif // shape: [B, num_head, T, dim_per_head] // reuse storage for q; we're done with it auto attn_ctx = bmm_nn(q, qkt, v); // qkv is not dead; we just reused storage for q! if (!need_weights) { qkt = Tensor(); } #ifndef NDEBUG debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "attn_ctx: " << attn_ctx << std::endl; #endif // shape: [B, T, D] // Fuse transform_0213 inside auto proj = transform0213_gemm_nt_bias( attn_ctx, proj_weight, proj_bias, query); #ifndef NDEBUG debug_assert_shape(__LINE__, proj, {B, T, D}); #endif if (need_weights && average_attn_weights) { // weights are not needed for full transformer, so don't worry too // much about performance -- we implement this just to make use // cases that don't disable need_weights still get some speedup. qkt = qkt.sum(1); qkt /= num_head; } return std::make_tuple(std::move(proj), std::move(qkt)); } std::tuple<Tensor, Tensor> _scaled_dot_product_flash_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, double dropout_p, bool is_causal) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) // Key (Batch x Num_heads x KV_seq_len x Dim_per_head) // Value (Batch x Num_heads x KV_seq_len x Dim_per_head) const int64_t batch_size = query.size(0); const int64_t num_heads = query.size(1); const int64_t max_seqlen_batch_q = query.size(2); const int64_t head_dim = query.size(3); const int64_t max_seqlen_batch_k = key.size(2); const int64_t max_seqlen_batch_v = value.size(2); TORCH_CHECK( max_seqlen_batch_k == max_seqlen_batch_v, "Key and Value must have the same sequence length"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor cumulative_sequence_length_q = at::arange( 0, (batch_size + 1) * max_seqlen_batch_q, max_seqlen_batch_q, TensorOptions().device(at::kCUDA).dtype(at::kInt)); Tensor cumulative_sequence_length_k = at::arange( 0, (batch_size + 1) * max_seqlen_batch_k, max_seqlen_batch_k, TensorOptions().device(at::kCUDA).dtype(at::kInt)); int64_t Nnz_q{batch_size * max_seqlen_batch_q}; int64_t Nnz_kv{batch_size * max_seqlen_batch_k}; // For the standard MHA these will actually be views Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim}); Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_flash_attention_forward( query_reshaped, key_reshaped, value_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, is_causal); // Reshape output to convert nnz to batch_size and seq_len attention = attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2); return std::make_tuple(attention, log_sumexp); } std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, bool compute_log_sumexp, bool is_causal) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_efficient_attention_forward( q_t, k_t, v_t, c10::nullopt, c10::nullopt, c10::nullopt, compute_log_sumexp, is_causal); attention = attention.transpose(1,2); return std::make_tuple(std::move(attention), std::move(log_sumexp)); } int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value, const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::error) { TORCH_CHECK( false, "No viable backend for scaled_dot_product_attention was found. ", "This is likely due to turning off both the math kernel and the fused kernels."); } return static_cast<int64_t>(backend); } bool _chunk_grad_outputs_efficient_attention( const Tensor& query, const Tensor& key, const Tensor& value, bool is_causal) { int64_t M = query.size(2); int64_t N = key.size(2); bool grad_kv_needs_init = is_causal && N > M; bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage()); bool equal_seq_len = query.size(2) == key.size(2); bool q_v_same_head_dim = query.size(3) == value.size(3); bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased); return chunk_grad_outputs; } std::tuple<Tensor, Tensor> _flash_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& cumulative_sequence_length_q, const Tensor& cumulative_sequence_length_k, const int64_t max_seqlen_batch_q, const int64_t max_seqlen_batch_k, double dropout_p, bool is_causal) { #if defined(USE_FLASH_ATTENTION) /* num_splits determines how much to parallelize over the seqlen_q dimension num_splits=0 means it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking. We will hard code it to 0 for now */ constexpr int num_splits{0}; auto softmax_scale = std::pow(query.size(-1), -0.5); at::Tensor output = at::empty_like(query); Tensor logsumexp, softmax; logsumexp = fmha::mha_fwd( query, key, value, output, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, softmax_scale, false, /*zero_tensors = false for all calls here*/ is_causal, num_splits, c10::nullopt); return std::make_tuple(output, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor(), Tensor()); } std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward( const at::Tensor& query, // [b, seqlen, num_heads, K] const at::Tensor& key, // [b, seqlen, num_heads, K] const at::Tensor& value, // [b, seqlen, num_heads, Kv] // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b const c10::optional<at::Tensor>& cu_seqlens_q, // (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the // position of the first key token for batch $b const c10::optional<at::Tensor>& cu_seqlens_k, // (Mode 1MHK only) Maximum sequence length across batches const c10::optional<int64_t> max_seqlen_q_, bool compute_logsumexp, bool causal) { #if defined(USE_FLASH_ATTENTION) // TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a // machine that is >= 5.0. In practice, this is not a problem but since // this would avoid runtime architecture checks, we should look into it TORCH_CHECK(query.dim() == 4); TORCH_CHECK(key.dim() == 4); TORCH_CHECK(value.dim() == 4); // Batch sizes TORCH_CHECK(query.size(0) == key.size(0)); TORCH_CHECK(query.size(0) == value.size(0)); // Sequence length TORCH_CHECK(key.size(1) == value.size(1)); // Num heads TORCH_CHECK(query.size(2) == key.size(2)); TORCH_CHECK(query.size(2) == value.size(2)); // Embedding per head TORCH_CHECK(query.size(3) == key.size(3)); int64_t max_seqlen_q = 0, max_seqlen_k=0; TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value()); if (cu_seqlens_q.has_value()) { TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q)); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k)); TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0)); TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1"); TORCH_CHECK(max_seqlen_q_.has_value()); max_seqlen_q = *max_seqlen_q_; max_seqlen_k = 0; // Will be set inside the kernel } else { max_seqlen_q = query.size(1); max_seqlen_k = key.size(1); } CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value); at::cuda::CUDAGuard device_guard(query.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int64_t B = query.size(0); int64_t M = query.size(1); int64_t N = key.size(1); int64_t num_heads = query.size(-2); int64_t K = query.size(-1); int64_t Kv = value.size(-1); at::Tensor res; at::Tensor logsumexp; auto launchKernel = [&](auto _k, int computeCapability) { using Kernel = decltype(_k); using scalar_t = typename Kernel::scalar_t; (void)_k; res = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_t>::atScalarType())); // NOTE: Should be aligned (by padding) in case M is // not a good number for loading during backward constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE; logsumexp = at::empty( {B, num_heads, compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0}, query.options().dtype(at::ScalarType::Float)); typename Kernel::Params p; p.query_ptr = (scalar_t*)query.data_ptr(); p.key_ptr = (scalar_t*)key.data_ptr(); p.value_ptr = (scalar_t*)value.data_ptr(); p.logsumexp_ptr = compute_logsumexp ? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr() : nullptr; at::Tensor output_accum; if (Kernel::kNeedsOutputAccumulatorBuffer) { output_accum = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_accum_t>::atScalarType())); p.output_accum_ptr = (typename Kernel::output_accum_t*)output_accum.data_ptr(); } else { p.output_accum_ptr = nullptr; } p.output_ptr = (typename Kernel::output_t*)res.data_ptr(); if (cu_seqlens_q.has_value()) { p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr(); p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr(); } #define ASSIGN_CHECK_OVERFLOW(A, B) \ { \ A = B; \ TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \ } p.num_heads = num_heads; p.head_dim = query.size(3); p.head_dim_value = value.size(3); p.num_queries = max_seqlen_q; p.num_keys = max_seqlen_k; p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B; p.causal = causal; ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0)); ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0)); ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0)); ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1)); ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1)); ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1)); ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2)); ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2)); ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2)); constexpr auto kernel_fn = attention_kernel_batched<Kernel>; size_t smem_bytes = sizeof(typename Kernel::SharedStorage); if (smem_bytes > 0xc000) { TORCH_INTERNAL_ASSERT( computeCapability >= 70, "This kernel requires too much shared memory on this machine!"); AT_CUDA_CHECK(cudaFuncSetAttribute( kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes)); } Kernel::check_supported(p); kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p); }; // Dispatch to the right kernel DISPATCH_KERNEL(query, key, value, ([&]() { launchKernel(Kernel{}, computeCapability); })); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(res, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor{}, Tensor{}); } Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){ TORCH_CHECK(false, "This operator should be overridden in python before use"); return at::Tensor(); } REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda); } // namespace native } // namespace at
681c2d476517dced2fb33664b67f2c3cd8500158.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated d Wed Nov 14 22:53:47 2012 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_d __global__ void magma_dlarfg_gpu_kernel( int n, double* dx0, double* dx, double *dtau, double *dxnorm ) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ double scale; __shared__ double xnorm; double dxi; if ( j < n-1) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 ) { *dtau = MAGMA_D_ZERO; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) double alpha = *dx0; double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = (beta - alpha) / beta; *dx0 = beta; scale = 1 / (alpha - beta); #else double alpha = *dx0; double alphar = MAGMA_D_REAL(alpha), alphai = MAGMA_D_IMAG(alpha); double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta); *dx0 = MAGMA_D_MAKE(beta, 0.); alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha)); scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_D_MUL(dxi, scale); } /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = norm( [dx0, dx] ). Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. */ extern "C" void magma_dlarfg_gpu(int n, double *dx0, double *dx, double *dtau, double *dxnorm) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); hipLaunchKernelGGL(( magma_dlarfg_gpu_kernel), dim3(blocks), dim3(threads) , 0, 0, n, dx0, dx, dtau, dxnorm ); }
681c2d476517dced2fb33664b67f2c3cd8500158.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated d Wed Nov 14 22:53:47 2012 */ #include "common_magma.h" // 512 is maximum number of threads for CUDA capability 1.x #define BLOCK_SIZE 512 #define PRECISION_d __global__ void magma_dlarfg_gpu_kernel( int n, double* dx0, double* dx, double *dtau, double *dxnorm ) { const int i = threadIdx.x; const int j = i + BLOCK_SIZE * blockIdx.x; __shared__ double scale; __shared__ double xnorm; double dxi; if ( j < n-1) dxi = dx[j]; if ( i == 0 ) { xnorm = *dxnorm; if ( xnorm == 0 ) { *dtau = MAGMA_D_ZERO; } else { #if (defined(PRECISION_s) || defined(PRECISION_d)) double alpha = *dx0; double beta = xnorm; // sqrt( alpha*alpha + xnorm*xnorm ); beta = -copysign( beta, alpha ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = (beta - alpha) / beta; *dx0 = beta; scale = 1 / (alpha - beta); #else double alpha = *dx0; double alphar = MAGMA_D_REAL(alpha), alphai = MAGMA_D_IMAG(alpha); double beta = xnorm; // sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm ); beta = -copysign( beta, alphar ); // todo: deal with badly scaled vectors (see lapack's larfg) *dtau = MAGMA_D_MAKE((beta - alphar)/beta, -alphai/beta); *dx0 = MAGMA_D_MAKE(beta, 0.); alpha = MAGMA_D_MAKE( MAGMA_D_REAL(alpha) - beta, MAGMA_D_IMAG(alpha)); scale = MAGMA_D_DIV( MAGMA_D_ONE, alpha); #endif } } // scale x __syncthreads(); if ( xnorm != 0 && j < n-1) dx[j] = MAGMA_D_MUL(dxi, scale); } /* Generates Householder elementary reflector H = I - tau v v^T to reduce H [ dx0 ] = [ beta ] [ dx ] [ 0 ] with beta = ±norm( [dx0, dx] ). Stores v over dx; first element of v is 1 and is not stored. Stores beta over dx0. Stores tau. */ extern "C" void magma_dlarfg_gpu(int n, double *dx0, double *dx, double *dtau, double *dxnorm) { dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE); dim3 threads( BLOCK_SIZE ); magma_dlarfg_gpu_kernel<<< blocks, threads >>>( n, dx0, dx, dtau, dxnorm ); }
edc935155443dd5c580385f14b3f5412554b9e89.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THH.h" #include <stdio.h> #include <assert.h> #include "rocblas.h" #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/reduce.h> #define TB 128 hipblasHandle_t handle; /* operations */ struct opPlus { public: static const float base_value = 0.0; __device__ float operator()(float x, float y) { return x + y; } }; struct opMinus { public: static const float base_value = 0.0; __device__ float operator()(float x, float y) { return x - y; } }; struct opMult { public: static const float base_value = 1.0; __device__ float operator()(float x, float y) { return x * y; } }; struct opDiv { public: static const float base_value = 1.0; __device__ float operator()(float x, float y) { return x / y; } }; struct opMax { public: static const float base_value = -2e38; __device__ float operator()(float x, float y) { return fmaxf(x, y); } }; struct opSMul { float alpha; public: opSMul(float alpha_) : alpha(alpha_) {}; __device__ float operator()(float x) { return alpha * x; } }; struct opExp { public: __device__ float operator()(float x) { return exp(x); } }; struct opSigmoid { public: __device__ float operator()(float x) { return 1 / (1 + exp(-x)); } }; struct opSigmoidDeriv { public: __device__ float operator()(float x, float y) { return x * y * (1 - y); } }; struct opTanh { public: __device__ float operator()(float x) { return 2 / (1 + exp(-2 * x)) - 1; } }; struct opTanhDeriv { public: __device__ float operator()(float x, float y) { return x * (1 - y * y); } }; struct opCCE { public: __device__ float operator()(float input, float target) { return target > 0 ? target * log(input) : 0; } }; /* Is A in column major format? */ int is_cm(THCudaTensor *A) { return A->stride[0] == 1; } void checkCudaError(lua_State *L) { hipError_t status = hipPeekAtLastError(); if (status != hipSuccess) { luaL_error(L, hipGetErrorString(status)); } } int cublas_init(lua_State *L) { assert(hipblasCreate(&handle) == HIPBLAS_STATUS_SUCCESS); return 0; } int dot(lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int trans_A = luaL_optint(L, 4, 0); int trans_B = luaL_optint(L, 5, 0); float alpha = luaL_optnumber(L, 6, 1.0); float beta = luaL_optnumber(L, 7, 0.0); assert(trans_A == 0 || trans_A == 1); assert(trans_B == 0 || trans_B == 1); if (!(A->nDimension == 2 && B->nDimension == 2 && C->nDimension == 2)) { luaL_error(L, "Matrices expected"); } if (!(is_cm(A) && is_cm(B) && is_cm(C))) { luaL_error(L, "Matrices not in column major order"); } int a = A->size[trans_A]; int b = A->size[1 - trans_A]; int c = B->size[trans_B]; int d = B->size[1 - trans_B]; if (b != c || a != C->size[0] || d != C->size[1]) { luaL_error(L, "Size mismatch"); } assert(hipblasSgemm(handle, trans_A ? HIPBLAS_OP_T : HIPBLAS_OP_N, trans_B ? HIPBLAS_OP_T : HIPBLAS_OP_N, a, d, c, &alpha, THCudaTensor_data(A), A->size[0], THCudaTensor_data(B), B->size[0], &beta, THCudaTensor_data(C), C->size[0]) == HIPBLAS_STATUS_SUCCESS); //assert(hipDeviceSynchronize() == HIPBLAS_STATUS_SUCCESS); return 0; } template<class Op> int transform1(Op op, lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int lenA = THCudaTensor_nElement(A); int lenB = THCudaTensor_nElement(B); if (!is_cm(A) || !is_cm(B)) { luaL_error(L, "Matrices not in column major order"); } if (lenA != lenB) { luaL_error(L, "Size mismatch"); } thrust::device_ptr<float> pA(THCudaTensor_data(A)); thrust::device_ptr<float> pB(THCudaTensor_data(B)); thrust::transform(pA, pA + lenA, pB, op); return 0; } template<class Op> int transform2(Op op, lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int lenA = THCudaTensor_nElement(A); int lenB = THCudaTensor_nElement(B); int lenC = THCudaTensor_nElement(C); if (!is_cm(A) || !is_cm(B) || !is_cm(C)) { luaL_error(L, "Matrices not in column major order"); } if (lenA != lenB || lenA != lenC) { luaL_error(L, "Size mismatch"); } thrust::device_ptr<float> pA(THCudaTensor_data(A)); thrust::device_ptr<float> pB(THCudaTensor_data(B)); thrust::device_ptr<float> pC(THCudaTensor_data(C)); thrust::transform(pA, pA + lenA, pB, pC, op); return 0; } int sigmoid(lua_State *L) { return transform1(opSigmoid(), L); } int mult_by_sigmoid_deriv(lua_State *L) { return transform2(opSigmoidDeriv(), L); } int tanh(lua_State *L) { return transform1(opTanh(), L); } int mult_by_tanh_deriv(lua_State *L) { return transform2(opTanhDeriv(), L); } int cce(lua_State *L) { THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); transform2(opCCE(), L); thrust::device_ptr<float> pC(THCudaTensor_data(C)); float sum = thrust::reduce(pC, pC + THCudaTensor_nElement(C)); lua_pushnumber(L, -sum); return 1; } int _exp(lua_State *L) { return transform1(opExp(), L); } int smul(lua_State *L) { float alpha = luaL_checknumber(L, 3); return transform1(opSMul(alpha), L); } /* What a crazy bug! * * * * * */ template <class Op, int axis> __global__ void kMatVect(Op op, float *A, float *x, float *B, int len, int size0) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { if (axis == 0) B[i] = op(A[i], x[i / size0]); if (axis == 1) B[i] = op(A[i], x[i % size0]); } } template <class Op> int mat_vect(Op op, lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int axis = luaL_checkint(L, 4); if (!is_cm(A) || !is_cm(B)) { luaL_error(L, "Matrix not in column major order"); } if (THCudaTensor_nElement(A) != THCudaTensor_nElement(B)) { luaL_error(L, "Size mismatch"); } int len = THCudaTensor_nElement(A); if (axis == 0) { if (A->size[1] != THCudaTensor_nElement(x)) { luaL_error(L, "Size mismatch"); } hipLaunchKernelGGL(( kMatVect<Op, 0>), dim3((len - 1) / TB + 1), dim3(TB), 0, 0, op, THCudaTensor_data(A), THCudaTensor_data(x), THCudaTensor_data(B), len, A->size[0]); } else if (axis == 1) { if (A->size[0] != THCudaTensor_nElement(x)) { luaL_error(L, "Size mismatch"); } hipLaunchKernelGGL(( kMatVect<Op, 1>), dim3((len - 1) / TB + 1), dim3(TB), 0, 0, op, THCudaTensor_data(A), THCudaTensor_data(x), THCudaTensor_data(B), len, A->size[0]); } checkCudaError(L); return 0; } int add_mat_vect(lua_State *L) { return mat_vect(opPlus(), L); } int sub_mat_vect(lua_State *L) { return mat_vect(opMinus(), L); } int mult_mat_vect(lua_State *L) { return mat_vect(opMult(), L); } int div_mat_vect(lua_State *L) { return mat_vect(opDiv(), L); } __global__ void kAdd(float *A, float *B, float *C, float alpha, int len) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) C[i] = A[i] + alpha * B[i]; } /* C = A + alpha * B */ int add(lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float alpha = luaL_optnumber(L, 4, 1.0); if (!(is_cm(A) && is_cm(B) && is_cm(C))) { luaL_error(L, "Matrices not in column major order"); } if (!(A->size[0] == B->size[0] && A->size[1] == B->size[1] && A->size[0] == C->size[0] && A->size[1] == C->size[1])) { luaL_error(L, "Size mismatch"); } int len = THCudaTensor_nElement(A); hipLaunchKernelGGL(( kAdd), dim3((len - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(A), THCudaTensor_data(B), THCudaTensor_data(C), alpha, len); checkCudaError(L); return 0; } /* What a crazy bug! * * * * * */ template <class Op> __global__ void kReduce(Op op, float *A, float *x, int n, int axis) { extern __shared__ float sdata[]; int i = threadIdx.x; sdata[i] = op.base_value; if (i < n) { if (axis == 0) { sdata[i] = A[threadIdx.x + n * blockIdx.x]; } else if (axis == 1) { sdata[i] = A[gridDim.x * threadIdx.x + blockIdx.x]; } } __syncthreads(); for (int s = blockDim.x >> 1; s > 0; s >>= 1) { if (i < s) { sdata[i] = op(sdata[i], sdata[i + s]); } __syncthreads(); } if (i == 0) { x[blockIdx.x] = sdata[0]; } } template <class Op> int reduce(Op op, lua_State *L) { int reduce_dim, other_dim; THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int axis = luaL_checkint(L, 3); if (!is_cm(A)) { luaL_error(L, "Matrix not in column major order"); } assert(axis == 0 || axis == 1); if (axis == 0) { reduce_dim = A->size[0]; other_dim = A->size[1]; } else if (axis == 1) { reduce_dim = A->size[1]; other_dim = A->size[0]; } assert(reduce_dim <= 1024); if (other_dim != THCudaTensor_nElement(x)) { luaL_error(L, "Size mismatch"); } int threads = 1; while(threads < reduce_dim) { threads = threads << 1; } hipLaunchKernelGGL(( kReduce<Op>), dim3(other_dim), dim3(threads), threads * sizeof(float), 0, op, THCudaTensor_data(A), THCudaTensor_data(x), reduce_dim, axis); checkCudaError(L); return 0; } int sum(lua_State *L) { return reduce(opPlus(), L); } int _max(lua_State *L) { return reduce(opMax(), L); } static const struct luaL_Reg funcs[] = { {"add", add}, {"add_mat_vect", add_mat_vect}, {"cce", cce}, {"cublas_init", cublas_init}, {"div_mat_vect", div_mat_vect}, {"dot", dot}, {"exp", _exp}, {"smul", smul}, {"max", _max}, {"mult_by_sigmoid_deriv", mult_by_sigmoid_deriv}, {"mult_by_tanh_deriv", mult_by_tanh_deriv}, {"mult_mat_vect", mult_mat_vect}, {"sigmoid", sigmoid}, {"sub_mat_vect", sub_mat_vect}, {"sum", sum}, {"tanh", tanh}, {NULL, NULL} }; extern "C" int luaopen_libct(lua_State *L) { luaL_openlib(L, "ct", funcs, 0); return 1; }
edc935155443dd5c580385f14b3f5412554b9e89.cu
extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THC.h" #include <stdio.h> #include <assert.h> #include "cublas_v2.h" #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/reduce.h> #define TB 128 cublasHandle_t handle; /* operations */ struct opPlus { public: static const float base_value = 0.0; __device__ float operator()(float x, float y) { return x + y; } }; struct opMinus { public: static const float base_value = 0.0; __device__ float operator()(float x, float y) { return x - y; } }; struct opMult { public: static const float base_value = 1.0; __device__ float operator()(float x, float y) { return x * y; } }; struct opDiv { public: static const float base_value = 1.0; __device__ float operator()(float x, float y) { return x / y; } }; struct opMax { public: static const float base_value = -2e38; __device__ float operator()(float x, float y) { return fmaxf(x, y); } }; struct opSMul { float alpha; public: opSMul(float alpha_) : alpha(alpha_) {}; __device__ float operator()(float x) { return alpha * x; } }; struct opExp { public: __device__ float operator()(float x) { return exp(x); } }; struct opSigmoid { public: __device__ float operator()(float x) { return 1 / (1 + exp(-x)); } }; struct opSigmoidDeriv { public: __device__ float operator()(float x, float y) { return x * y * (1 - y); } }; struct opTanh { public: __device__ float operator()(float x) { return 2 / (1 + exp(-2 * x)) - 1; } }; struct opTanhDeriv { public: __device__ float operator()(float x, float y) { return x * (1 - y * y); } }; struct opCCE { public: __device__ float operator()(float input, float target) { return target > 0 ? target * log(input) : 0; } }; /* Is A in column major format? */ int is_cm(THCudaTensor *A) { return A->stride[0] == 1; } void checkCudaError(lua_State *L) { cudaError_t status = cudaPeekAtLastError(); if (status != cudaSuccess) { luaL_error(L, cudaGetErrorString(status)); } } int cublas_init(lua_State *L) { assert(cublasCreate(&handle) == CUBLAS_STATUS_SUCCESS); return 0; } int dot(lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int trans_A = luaL_optint(L, 4, 0); int trans_B = luaL_optint(L, 5, 0); float alpha = luaL_optnumber(L, 6, 1.0); float beta = luaL_optnumber(L, 7, 0.0); assert(trans_A == 0 || trans_A == 1); assert(trans_B == 0 || trans_B == 1); if (!(A->nDimension == 2 && B->nDimension == 2 && C->nDimension == 2)) { luaL_error(L, "Matrices expected"); } if (!(is_cm(A) && is_cm(B) && is_cm(C))) { luaL_error(L, "Matrices not in column major order"); } int a = A->size[trans_A]; int b = A->size[1 - trans_A]; int c = B->size[trans_B]; int d = B->size[1 - trans_B]; if (b != c || a != C->size[0] || d != C->size[1]) { luaL_error(L, "Size mismatch"); } assert(cublasSgemm(handle, trans_A ? CUBLAS_OP_T : CUBLAS_OP_N, trans_B ? CUBLAS_OP_T : CUBLAS_OP_N, a, d, c, &alpha, THCudaTensor_data(A), A->size[0], THCudaTensor_data(B), B->size[0], &beta, THCudaTensor_data(C), C->size[0]) == CUBLAS_STATUS_SUCCESS); //assert(cudaDeviceSynchronize() == CUBLAS_STATUS_SUCCESS); return 0; } template<class Op> int transform1(Op op, lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int lenA = THCudaTensor_nElement(A); int lenB = THCudaTensor_nElement(B); if (!is_cm(A) || !is_cm(B)) { luaL_error(L, "Matrices not in column major order"); } if (lenA != lenB) { luaL_error(L, "Size mismatch"); } thrust::device_ptr<float> pA(THCudaTensor_data(A)); thrust::device_ptr<float> pB(THCudaTensor_data(B)); thrust::transform(pA, pA + lenA, pB, op); return 0; } template<class Op> int transform2(Op op, lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int lenA = THCudaTensor_nElement(A); int lenB = THCudaTensor_nElement(B); int lenC = THCudaTensor_nElement(C); if (!is_cm(A) || !is_cm(B) || !is_cm(C)) { luaL_error(L, "Matrices not in column major order"); } if (lenA != lenB || lenA != lenC) { luaL_error(L, "Size mismatch"); } thrust::device_ptr<float> pA(THCudaTensor_data(A)); thrust::device_ptr<float> pB(THCudaTensor_data(B)); thrust::device_ptr<float> pC(THCudaTensor_data(C)); thrust::transform(pA, pA + lenA, pB, pC, op); return 0; } int sigmoid(lua_State *L) { return transform1(opSigmoid(), L); } int mult_by_sigmoid_deriv(lua_State *L) { return transform2(opSigmoidDeriv(), L); } int tanh(lua_State *L) { return transform1(opTanh(), L); } int mult_by_tanh_deriv(lua_State *L) { return transform2(opTanhDeriv(), L); } int cce(lua_State *L) { THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); transform2(opCCE(), L); thrust::device_ptr<float> pC(THCudaTensor_data(C)); float sum = thrust::reduce(pC, pC + THCudaTensor_nElement(C)); lua_pushnumber(L, -sum); return 1; } int _exp(lua_State *L) { return transform1(opExp(), L); } int smul(lua_State *L) { float alpha = luaL_checknumber(L, 3); return transform1(opSMul(alpha), L); } /* What a crazy bug! * * * * * */ template <class Op, int axis> __global__ void kMatVect(Op op, float *A, float *x, float *B, int len, int size0) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) { if (axis == 0) B[i] = op(A[i], x[i / size0]); if (axis == 1) B[i] = op(A[i], x[i % size0]); } } template <class Op> int mat_vect(Op op, lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int axis = luaL_checkint(L, 4); if (!is_cm(A) || !is_cm(B)) { luaL_error(L, "Matrix not in column major order"); } if (THCudaTensor_nElement(A) != THCudaTensor_nElement(B)) { luaL_error(L, "Size mismatch"); } int len = THCudaTensor_nElement(A); if (axis == 0) { if (A->size[1] != THCudaTensor_nElement(x)) { luaL_error(L, "Size mismatch"); } kMatVect<Op, 0><<<(len - 1) / TB + 1, TB>>>(op, THCudaTensor_data(A), THCudaTensor_data(x), THCudaTensor_data(B), len, A->size[0]); } else if (axis == 1) { if (A->size[0] != THCudaTensor_nElement(x)) { luaL_error(L, "Size mismatch"); } kMatVect<Op, 1><<<(len - 1) / TB + 1, TB>>>(op, THCudaTensor_data(A), THCudaTensor_data(x), THCudaTensor_data(B), len, A->size[0]); } checkCudaError(L); return 0; } int add_mat_vect(lua_State *L) { return mat_vect(opPlus(), L); } int sub_mat_vect(lua_State *L) { return mat_vect(opMinus(), L); } int mult_mat_vect(lua_State *L) { return mat_vect(opMult(), L); } int div_mat_vect(lua_State *L) { return mat_vect(opDiv(), L); } __global__ void kAdd(float *A, float *B, float *C, float alpha, int len) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < len) C[i] = A[i] + alpha * B[i]; } /* C = A + alpha * B */ int add(lua_State *L) { THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *B = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *C = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float alpha = luaL_optnumber(L, 4, 1.0); if (!(is_cm(A) && is_cm(B) && is_cm(C))) { luaL_error(L, "Matrices not in column major order"); } if (!(A->size[0] == B->size[0] && A->size[1] == B->size[1] && A->size[0] == C->size[0] && A->size[1] == C->size[1])) { luaL_error(L, "Size mismatch"); } int len = THCudaTensor_nElement(A); kAdd<<<(len - 1) / TB + 1, TB>>>(THCudaTensor_data(A), THCudaTensor_data(B), THCudaTensor_data(C), alpha, len); checkCudaError(L); return 0; } /* What a crazy bug! * * * * * */ template <class Op> __global__ void kReduce(Op op, float *A, float *x, int n, int axis) { extern __shared__ float sdata[]; int i = threadIdx.x; sdata[i] = op.base_value; if (i < n) { if (axis == 0) { sdata[i] = A[threadIdx.x + n * blockIdx.x]; } else if (axis == 1) { sdata[i] = A[gridDim.x * threadIdx.x + blockIdx.x]; } } __syncthreads(); for (int s = blockDim.x >> 1; s > 0; s >>= 1) { if (i < s) { sdata[i] = op(sdata[i], sdata[i + s]); } __syncthreads(); } if (i == 0) { x[blockIdx.x] = sdata[0]; } } template <class Op> int reduce(Op op, lua_State *L) { int reduce_dim, other_dim; THCudaTensor *A = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *x = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int axis = luaL_checkint(L, 3); if (!is_cm(A)) { luaL_error(L, "Matrix not in column major order"); } assert(axis == 0 || axis == 1); if (axis == 0) { reduce_dim = A->size[0]; other_dim = A->size[1]; } else if (axis == 1) { reduce_dim = A->size[1]; other_dim = A->size[0]; } assert(reduce_dim <= 1024); if (other_dim != THCudaTensor_nElement(x)) { luaL_error(L, "Size mismatch"); } int threads = 1; while(threads < reduce_dim) { threads = threads << 1; } kReduce<Op><<<other_dim, threads, threads * sizeof(float)>>>(op, THCudaTensor_data(A), THCudaTensor_data(x), reduce_dim, axis); checkCudaError(L); return 0; } int sum(lua_State *L) { return reduce(opPlus(), L); } int _max(lua_State *L) { return reduce(opMax(), L); } static const struct luaL_Reg funcs[] = { {"add", add}, {"add_mat_vect", add_mat_vect}, {"cce", cce}, {"cublas_init", cublas_init}, {"div_mat_vect", div_mat_vect}, {"dot", dot}, {"exp", _exp}, {"smul", smul}, {"max", _max}, {"mult_by_sigmoid_deriv", mult_by_sigmoid_deriv}, {"mult_by_tanh_deriv", mult_by_tanh_deriv}, {"mult_mat_vect", mult_mat_vect}, {"sigmoid", sigmoid}, {"sub_mat_vect", sub_mat_vect}, {"sum", sum}, {"tanh", tanh}, {NULL, NULL} }; extern "C" int luaopen_libct(lua_State *L) { luaL_openlib(L, "ct", funcs, 0); return 1; }
07faddab007fa5b6e54c566e2918f7dc9c3fb3bc.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <time.h> #include <stdio.h> #define BLOCK_SIZE 1024 __device__ float function(float x) { return x * x; } __global__ void init(unsigned int seed, hiprandState_t* states) { int id = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(seed, blockIdx.x, 0, &states[id]); } __global__ void mc(int* ret, hiprandState_t* states, float a, float b, float h, float (*f)(float)) { int i = blockIdx.x * blockDim.x + threadIdx.x; float x = hiprand_uniform(&states[i]) * (b-a) + a; float y = hiprand_uniform(&states[i]) * (h-0) + 0; if (function(x) >= y) atomicAdd(ret, 1); } int main() { int* h_a; int* d_a; float a = 1.0; float b = 10.0; float h = 150.0; unsigned int grid = 1000; hipError_t err; hiprandState_t* states; unsigned int n = BLOCK_SIZE * grid; h_a = (int*)malloc(sizeof(int)); *h_a = 0; hipMalloc((void**)&d_a, sizeof(int)); hipMalloc((void**)&states, n * sizeof(hiprandState_t)); hipMemcpy(d_a, h_a, sizeof(int), hipMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid(grid, 1, 1); hipLaunchKernelGGL(( init), dim3(dimGrid), dim3(dimBlock), 0, 0, (unsigned int) time(NULL), states); err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "init : %s\n", hipGetErrorString(err)); exit(-1); } hipLaunchKernelGGL(( mc), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, states, a, b, h, &function); err = hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "mc : %s\n", hipGetErrorString(err)); exit(-1); } hipMemcpy(h_a, d_a, sizeof(int), hipMemcpyDeviceToHost); printf("return: %d\n", *h_a); float result = ((b - a) * h) * ((float)*h_a / (float)n); printf("answer: %f\n", result); hipFree(d_a); }
07faddab007fa5b6e54c566e2918f7dc9c3fb3bc.cu
#include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <time.h> #include <stdio.h> #define BLOCK_SIZE 1024 __device__ float function(float x) { return x * x; } __global__ void init(unsigned int seed, curandState_t* states) { int id = blockIdx.x * blockDim.x + threadIdx.x; curand_init(seed, blockIdx.x, 0, &states[id]); } __global__ void mc(int* ret, curandState_t* states, float a, float b, float h, float (*f)(float)) { int i = blockIdx.x * blockDim.x + threadIdx.x; float x = curand_uniform(&states[i]) * (b-a) + a; float y = curand_uniform(&states[i]) * (h-0) + 0; if (function(x) >= y) atomicAdd(ret, 1); } int main() { int* h_a; int* d_a; float a = 1.0; float b = 10.0; float h = 150.0; unsigned int grid = 1000; cudaError_t err; curandState_t* states; unsigned int n = BLOCK_SIZE * grid; h_a = (int*)malloc(sizeof(int)); *h_a = 0; cudaMalloc((void**)&d_a, sizeof(int)); cudaMalloc((void**)&states, n * sizeof(curandState_t)); cudaMemcpy(d_a, h_a, sizeof(int), cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid(grid, 1, 1); init<<<dimGrid, dimBlock>>>((unsigned int) time(NULL), states); err = cudaThreadSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "init : %s\n", cudaGetErrorString(err)); exit(-1); } mc<<<dimGrid, dimBlock>>>(d_a, states, a, b, h, &function); err = cudaThreadSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "mc : %s\n", cudaGetErrorString(err)); exit(-1); } cudaMemcpy(h_a, d_a, sizeof(int), cudaMemcpyDeviceToHost); printf("return: %d\n", *h_a); float result = ((b - a) * h) * ((float)*h_a / (float)n); printf("answer: %f\n", result); cudaFree(d_a); }
154eb182ad0fb176610ddca40c6145a804b86cc2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <float.h> //para el calculo de tiempos #include <sys/resource.h> #include <time.h> #include <sys/time.h> #define DIM 20 //#define DIM 254 //#define DIM 64 /* T_x_BLOCK es la cantidad de hilos por Bloque. Si la BD tiene menos elementos que T_x_BLOCK, se ejecutan tantos hilos como elementos hayan */ #define T_x_BLOCK 64 #define ERROR -1 #define TOPK 32 //#define NE 95325 //Num de elementos //#define NE 499865 //#define NE 999987 //#define NE 1699798 //#define NE 200000 //#define NE 500000 //#define NE 1000000 //#define NE 1500000 //#define NE 2000000 //#define NE 8480 //Num de elementos #define NE 3999 #define TAM_WARP 32 //Num de threads maximo de un warp /* El valor Q es la cantidad de consultas lanzadas en un kernel. Q depende de la cantidad de memoria en la GPU */ //#define Q 3972 //NASA_80 //#define Q 2979 //NASA_200000 //#define Q 1324 //NASA_500000 //#define Q 662 //NASA_999996 #define Q 442 //NASA_1500000 // Cophir 500000 //#define Q 331 //NASA_2000000 // Cophir 1000000 // Cophir 1700000 struct _Elem { double dist; int ind; }; typedef struct _Elem Elem; __device__ void insertaH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id); __device__ void extraeH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult); __device__ double topH(Elem *heap, int id); __device__ void popush(Elem *heap, Elem *elem, int *n_elem, int pitch, int id); __global__ void Batch_Heap_Reduction(double *DB_dev, int pitch_DB, Elem *heap, int pitch_H, double *QUERY_dev, int pitch_QUERY, Elem *arr_Dist, int pitch_Dist, int beginQ, double *res_final); __device__ double distancia_trans(double *p1, int pitch_p1, int col_1, double *q); void imprime_trans(double **MAT, int col); int leedato(double *dato, FILE *file); int leedato_cophir(double *dato, FILE *file); int leedato_trans(double **dato, FILE *file, int col); int leedato_trans_cophir(double **dato, FILE *file, int col); int N_QUERIES; //double vectores[DIM][NE]; main(int argc, char *argv[]) { int i, N_ELEM, dimension, tam_elem, j; FILE *pf; double **vectores; struct rusage r1, r2; double user_time, sys_time, real_time; struct timeval t1, t2; double *Elems, *QUERY_dev; double **consultas, *res_final, *res_final_H; int retorno, T_per_BLOCK, N_BLOQUES, contQ, cont; Elem *HEAPS_dev, *arr_res1, *arr_res1H, *arr_Dist; size_t pitch, pitch_H, pitch_Q, pitch_Dist; int *resT, *resTH; long long ED_total=0; double prom, prom_cont; // hipSetDevice(1); if (argc != 6) { printf("\nEjecutar como: a.out archivo_BD archivo_queries N_DB N_QUERIES DIM\n"); return 0; } if ((pf = fopen(argv[1], "r")) == NULL) { printf("\nNo se pudo abrir el archivo %s\n" ,argv[1]); return 0; } N_ELEM = atoi(argv[3]); N_QUERIES = atoi(argv[4]); dimension = atoi(argv[5]); if (dimension != DIM ) { printf("\nERROR :: dimension != DIM\n"); return 0; } printf("\nCant. Elementos=%d :: dimension=%d\n" , N_ELEM, dimension); fflush(stdout); if (N_ELEM != NE) { printf("\nERORR :: N_ELEM != NE\n"); return 0; } if (TOPK > N_DB){ printf("ERROR :: TOPK muy grande debe ser menor de numero de elementos de la base de datos\n"); } if (T_x_BLOCK > N_ELEM) T_per_BLOCK = N_ELEM; else T_per_BLOCK = T_x_BLOCK; if (hipSuccess != hipMalloc((void **)&res_final, sizeof(double)*Q*TOPK)) { printf("\nERROR 21 :: hipMalloc\n"); hipDeviceReset(); return 0; } res_final_H = (double *)malloc(sizeof(double)*Q*TOPK); for (i=0; i<Q*TOPK; i++) { res_final_H[i] = 0; } if (hipSuccess != hipMemset(res_final, 0, sizeof(double)*Q*TOPK)) { printf("\nERROR :: hipMemset\n"); hipDeviceReset(); return 0; } //HEAPS_dev[TOPK][Q*T_per_BLOCK] if (hipSuccess != hipMallocPitch((void **)&HEAPS_dev, &pitch_H, sizeof(Elem)*Q*T_per_BLOCK, (size_t)TOPK)) { printf("\nERROR 4 :: hipMallocPitch :: Heaps_dev col=%lld :: row=%d\n", (long long)(sizeof(Elem)*Q*T_per_BLOCK), TOPK); hipDeviceReset(); return 0; } Elem *linea_temp = (Elem *)malloc(sizeof(Elem)*Q*T_per_BLOCK); for (i=0 ; i < Q*T_per_BLOCK; i++) { linea_temp[i].ind = -1; linea_temp[i].dist = DBL_MAX; //DBL_MAX es el maximo valor para un double segun float.h } for (i=0 ; i < TOPK; i++) if (hipSuccess != hipMemcpy((Elem *)((char *)HEAPS_dev + (i*(int)pitch_H)), (Elem *)linea_temp, sizeof(Elem)*Q*T_per_BLOCK, hipMemcpyHostToDevice)) { printf("\nERROR :: hipMemcpy\n"); hipDeviceReset(); return 0; } //arr_Dist[Q][N_ELEM] if (hipSuccess != hipMallocPitch((void **)&arr_Dist, &pitch_Dist, N_ELEM*sizeof(Elem), (size_t)Q)) { printf("\nERROR 41 :: hipMallocPitch\n"); hipDeviceReset(); return 0; } vectores =(double **)malloc(sizeof(double *)*dimension); for (i=0; i<dimension; i++) vectores[i] = (double *)malloc(sizeof(double)*N_ELEM); for (i=0; i<N_ELEM; i++) { // printf("Leyendo vectores[%d] : ", i); for (j=0; j<dimension; j++) { fscanf(pf, "%lf", &vectores[j][i]); // printf("%lf ", vectores[i][j]); } // printf("\n"); fgetc(pf); } fclose(pf); //Elems[dimension][N_ELEM] if (hipSuccess != hipMallocPitch((void **)&Elems, (size_t *)&pitch, N_ELEM*sizeof(double), (size_t)dimension)) printf("\nERROR :: hipMallocPitch 4\n"); for (i=0; i < dimension; i++) { retorno = hipMemcpy((double *)((char *)Elems + (i*(int)pitch)), (double *)(vectores[i]), sizeof(double)*N_ELEM, hipMemcpyHostToDevice); if (retorno != hipSuccess) { switch(retorno) { case hipErrorInvalidPitchValue: printf("\nERROR 2 -> hipErrorInvalidPitchValue:\n"); break; case hipErrorInvalidDevicePointer: printf("\nERROR 2 -> hipErrorInvalidDevicePointer:\n"); break; case hipErrorInvalidMemcpyDirection: printf("\nERROR 2 -> hipErrorInvalidMemcpyDirection:\n"); break; case hipErrorInvalidValue: printf("\nERROR 2 -> hipErrorInvalidValue :: i=%d :: pitch=%d\n", i, pitch); break; default: printf("\nERROR 2 -> Checkear esto.\n"); break; } return 0; } } consultas =(double **)malloc(sizeof(double *)*N_QUERIES); for (i=0; i<N_QUERIES; i++) consultas[i] = (double *)malloc(sizeof(double)*dimension); //Leo las queries if ((pf = fopen(argv[2], "r")) == NULL) { printf("\nNo se pudo abrir el archivo %s\n" ,argv[2]); return 0; } /* fgets(linea, tam_lin-1, pf); fscanf(pf, "%d", &N_QUERIES); fscanf(pf, "%d", &dimension); fscanf(pf, "%d", &tam_elem); fgetc(pf); */ printf("\n\nArchivo de Queries:\nCant. Elementos=%d :: dimension=%d\n" , N_QUERIES, dimension); for (i=0; i<N_QUERIES; i++) { if (leedato(consultas[i], pf) == -1) { printf("\nError al leer Consultas\n"); hipDeviceReset(); return 0; } } fclose(pf); //QUERY_dev[N_QUERIES][dimension] if (hipSuccess != hipMallocPitch((void **)&QUERY_dev, (size_t *)&pitch_Q, dimension*sizeof(double), (size_t)N_QUERIES)) printf("\nERROR :: hipMallocPitch 1\n"); for (i=0; i < N_QUERIES; i++) { if (hipSuccess != hipMemcpy((char *)QUERY_dev + (i*(int)pitch_Q), consultas[i], sizeof(double)*dimension, hipMemcpyHostToDevice)) printf("\nERROR 3 :: hipMemcpy\n"); } N_BLOQUES = Q; contQ = 0; cont = 0; getrusage(RUSAGE_SELF, &r1); gettimeofday(&t1, 0); while(contQ < N_QUERIES) { contQ += Q; if (contQ > N_QUERIES) N_BLOQUES = N_QUERIES - (contQ-Q); printf("\nN_BLOQUES = %d :: T_per_BLOCK = %d\n", N_BLOQUES, T_per_BLOCK); hipLaunchKernelGGL(( Batch_Heap_Reduction), dim3(N_BLOQUES), dim3(T_per_BLOCK), 0, 0, Elems, (int)pitch, HEAPS_dev, (int)pitch_H, QUERY_dev, (int)pitch_Q, arr_Dist, (int)pitch_Dist, Q*cont, res_final); if (hipSuccess != hipMemcpy((double *)res_final_H, (double *)res_final, sizeof(double)*Q*TOPK, hipMemcpyDeviceToHost)) { printf("\nERROR 41 :: hipMemcpy :: iteraH\n"); hipDeviceReset(); return 0; } cont++; } gettimeofday(&t2, 0); getrusage(RUSAGE_SELF, &r2); for (i=0; i<N_BLOQUES; i++) { printf("\n\nResults array %d (smallest distances):", i); for (j=TOPK*i; j<(TOPK*i)+TOPK; j++) printf("\nquery = %d :: dist = %lf", i, res_final_H[j]); } printf("\n"); user_time = (r2.ru_utime.tv_sec - r1.ru_utime.tv_sec) + (r2.ru_utime.tv_usec - r1.ru_utime.tv_usec)/1000000.0; sys_time = (r2.ru_stime.tv_sec - r1.ru_stime.tv_sec) + (r2.ru_stime.tv_usec - r1.ru_stime.tv_usec)/1000000.0; real_time = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec)/1000000; prom = 0; prom_cont = 0; for (i=0; i<Q; i++) { if (res_final_H[i] != 0) { prom += res_final_H[i]; prom_cont += 1; } } printf("\nK = %d", TOPK); printf("\nTiempo CPU = %f", user_time + sys_time); printf("\nTiempo Real = %f", real_time); printf("\nprom = %lf\n", (double)(prom/(double)prom_cont)); fflush(stdout); hipFree(Elems); hipFree(QUERY_dev); hipFree(HEAPS_dev); hipFree(arr_Dist); hipDeviceReset(); return 0; } __device__ void insertaH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id) { int i; Elem temp; ((Elem *)((char *)heap + (*n_elem)*pitch))[id].dist = elem->dist; ((Elem *)((char *)heap + (*n_elem)*pitch))[id].ind = elem->ind; (*n_elem)++; for (i = *n_elem; i>1 && ((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist; i=i/2) { //Intercambiamos con el padre temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist; temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind; ((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist; ((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind; ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist = temp.dist; ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind = temp.ind; } return; } __device__ void extraeH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult) { int i, k; Elem temp; eresult->dist = ((Elem *)((char *)heap+0))[id].dist; //Se guarda el maximo eresult->ind = ((Elem *)((char *)heap+0))[id].ind; ((Elem *)((char *)heap+0))[id].dist = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].dist;// Movemos el ultimo a la raiz y achicamos el heap ((Elem *)((char *)heap+0))[id].ind = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].ind; (*n_elem)--; i = 1; while(2*i <= *n_elem) // mientras tenga algun hijo { k = 2*i; //el hijo izquierdo if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) k = k+1; //el hijo derecho es el mayor if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) break; //es mayor que ambos hijos temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist; temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind; ((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist; ((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind; ((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist; ((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind; i = k; //lo intercambiamos con el mayor hijo } return; // return max; } __device__ double topH(Elem *heap, int id) { return ((Elem *)((char *)heap + 0))[id].dist; } __device__ void popush(Elem *heap, Elem *elem, int *n_elem, int pitch, int id) { int i, k; Elem temp; ((Elem *)((char *)heap+0))[id].dist = elem->dist; ((Elem *)((char *)heap+0))[id].ind = elem->ind; i = 1; while(2*i <= *n_elem) // mientras tenga algun hijo { k = 2*i; //el hijo izquierdo if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) k = k+1; //el hijo derecho es el mayor if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) break; //es mayor que ambos hijos temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist; temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind; ((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist; ((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind; ((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist; ((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind; i = k; //lo intercambiamos con el mayor hijo } return; } __global__ void Batch_Heap_Reduction(double *DB_dev, int pitch_DB, Elem *heap, int pitch_H, double *QUERY_dev, int pitch_QUERY, Elem *arr_Dist, int pitch_Dist, int beginQ, double *res_final) { int i, j, n_elem=0, n_elemWarp=0; int id; Elem eresult; __shared__ Elem matrizWarp[TOPK][TAM_WARP]; __shared__ Elem heapfin[TOPK][1]; __shared__ double query[DIM]; // int ED=0; id = threadIdx.x + (blockDim.x * blockIdx.x); //Se copia la Query a mem. compartida for (i=threadIdx.x; i < DIM; i += blockDim.x) query[i] = ((double *)((char *)QUERY_dev + ((blockIdx.x + beginQ) * (int)pitch_QUERY)))[i]; __syncthreads(); //Se obtiene el arreglo de distancias for (i=threadIdx.x; i < NE; i += blockDim.x) { // ED++; ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].dist = distancia_trans(DB_dev, pitch_DB, i, query); ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].ind = i; } for(i=threadIdx.x; i < NE; i += blockDim.x)//NE = Numero de elementos de la BD { if (n_elem >= TOPK) { if (topH(heap, id) > ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].dist) popush(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id); //Extrae e inserta en una operacion } else insertaH(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id); } __syncthreads(); //Un warp reduce el problema a una matriz de Kx32 distancias. PEro esta vez los heaps se almacenan en Memoria Shared if (threadIdx.x < TAM_WARP) { for(j=id; j < blockDim.x*(blockIdx.x+1); j += TAM_WARP) { n_elem = TOPK; for(i=0; i < TOPK; i++) { extraeH(heap, &n_elem, pitch_H, j, &eresult); if (n_elemWarp < TOPK) insertaH(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x); else if (topH(&(matrizWarp[0][0]), threadIdx.x) > eresult.dist) popush(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x); } } } __syncthreads(); //Un hilo encuentra los K-NN a partir de la matriz de TOPKxTAM_WARP if (threadIdx.x == 0) { n_elem = 0; for(j=0; j < TAM_WARP; j++) { for(i=0; i < TOPK; i++) if (n_elem < TOPK) insertaH((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0); else if (topH((Elem *)heapfin, 0) > matrizWarp[i][j].dist) popush((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0); } //Escribiendo algunos resultados //res_final[blockIdx.x] = topH((Elem *)heapfin, 0); for (i=TOPK*blockIdx.x; i < (TOPK*blockIdx.x)+TOPK; i++) { extraeH(&(heapfin[0][0]), &n_elem, sizeof(Elem), 0, &eresult); res_final[i] = eresult.dist; } } // atomicAdd(&(resT[blockIdx.x]), ED); return; } __device__ double distancia_trans(double *p1, int pitch_p1, int col_1, double *q) { int i=0; double suma=0; for (i=0; i < DIM; i++) suma += (((double *)((char *)p1 + (i*pitch_p1)))[col_1] - q[i]) * (((double *)((char *)p1 + (i*pitch_p1)))[col_1] - q[i]); return sqrtf(suma); } void imprime_trans(double **MAT, int col) { int i; for (i=0; i<DIM; i++) printf("%lf ", MAT[i][col]); return; } int leedato(double *dato, FILE *file) { int i=0; for (i=0;i<DIM;i++) if (fscanf(file,"%lf",&dato[i])<1) return -1; return 1; } int leedato_cophir(double *dato, FILE *file) { int i=0; int num_f; for (i=0;i<DIM;i++) { if (fscanf(file, "%d", &num_f) < 1) return ERROR; dato[i] = (double)num_f; if (i+1 < DIM) if (fgetc(file) != ',') { printf("\nERROR :: ',' no encontrada\n"); return ERROR; } } return 1; } int leedato_trans(double **dato, FILE *file, int col) { int i=0; for (i=0;i<DIM;i++) if (fscanf(file,"%lf",&(dato[i][col]))<1) return -1; return 1; } int leedato_trans_cophir(double **dato, FILE *file, int col) { int i=0; int num_f; for (i=0;i<DIM;i++) { if (fscanf(file, "%d", &num_f) < 1) return ERROR; dato[i][col] = (double)num_f; if (i+1 < DIM) if (fgetc(file) != ',') { printf("\nERROR :: ',' no encontrada\n"); return ERROR; } } return 1; }
154eb182ad0fb176610ddca40c6145a804b86cc2.cu
#include <stdio.h> #include <cuda.h> #include <float.h> //para el calculo de tiempos #include <sys/resource.h> #include <time.h> #include <sys/time.h> #define DIM 20 //#define DIM 254 //#define DIM 64 /* T_x_BLOCK es la cantidad de hilos por Bloque. Si la BD tiene menos elementos que T_x_BLOCK, se ejecutan tantos hilos como elementos hayan */ #define T_x_BLOCK 64 #define ERROR -1 #define TOPK 32 //#define NE 95325 //Num de elementos //#define NE 499865 //#define NE 999987 //#define NE 1699798 //#define NE 200000 //#define NE 500000 //#define NE 1000000 //#define NE 1500000 //#define NE 2000000 //#define NE 8480 //Num de elementos #define NE 3999 #define TAM_WARP 32 //Num de threads maximo de un warp /* El valor Q es la cantidad de consultas lanzadas en un kernel. Q depende de la cantidad de memoria en la GPU */ //#define Q 3972 //NASA_80 //#define Q 2979 //NASA_200000 //#define Q 1324 //NASA_500000 //#define Q 662 //NASA_999996 #define Q 442 //NASA_1500000 // Cophir 500000 //#define Q 331 //NASA_2000000 // Cophir 1000000 // Cophir 1700000 struct _Elem { double dist; int ind; }; typedef struct _Elem Elem; __device__ void insertaH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id); __device__ void extraeH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult); __device__ double topH(Elem *heap, int id); __device__ void popush(Elem *heap, Elem *elem, int *n_elem, int pitch, int id); __global__ void Batch_Heap_Reduction(double *DB_dev, int pitch_DB, Elem *heap, int pitch_H, double *QUERY_dev, int pitch_QUERY, Elem *arr_Dist, int pitch_Dist, int beginQ, double *res_final); __device__ double distancia_trans(double *p1, int pitch_p1, int col_1, double *q); void imprime_trans(double **MAT, int col); int leedato(double *dato, FILE *file); int leedato_cophir(double *dato, FILE *file); int leedato_trans(double **dato, FILE *file, int col); int leedato_trans_cophir(double **dato, FILE *file, int col); int N_QUERIES; //double vectores[DIM][NE]; main(int argc, char *argv[]) { int i, N_ELEM, dimension, tam_elem, j; FILE *pf; double **vectores; struct rusage r1, r2; double user_time, sys_time, real_time; struct timeval t1, t2; double *Elems, *QUERY_dev; double **consultas, *res_final, *res_final_H; int retorno, T_per_BLOCK, N_BLOQUES, contQ, cont; Elem *HEAPS_dev, *arr_res1, *arr_res1H, *arr_Dist; size_t pitch, pitch_H, pitch_Q, pitch_Dist; int *resT, *resTH; long long ED_total=0; double prom, prom_cont; // cudaSetDevice(1); if (argc != 6) { printf("\nEjecutar como: a.out archivo_BD archivo_queries N_DB N_QUERIES DIM\n"); return 0; } if ((pf = fopen(argv[1], "r")) == NULL) { printf("\nNo se pudo abrir el archivo %s\n" ,argv[1]); return 0; } N_ELEM = atoi(argv[3]); N_QUERIES = atoi(argv[4]); dimension = atoi(argv[5]); if (dimension != DIM ) { printf("\nERROR :: dimension != DIM\n"); return 0; } printf("\nCant. Elementos=%d :: dimension=%d\n" , N_ELEM, dimension); fflush(stdout); if (N_ELEM != NE) { printf("\nERORR :: N_ELEM != NE\n"); return 0; } if (TOPK > N_DB){ printf("ERROR :: TOPK muy grande debe ser menor de numero de elementos de la base de datos\n"); } if (T_x_BLOCK > N_ELEM) T_per_BLOCK = N_ELEM; else T_per_BLOCK = T_x_BLOCK; if (cudaSuccess != cudaMalloc((void **)&res_final, sizeof(double)*Q*TOPK)) { printf("\nERROR 21 :: cudaMalloc\n"); cudaThreadExit(); return 0; } res_final_H = (double *)malloc(sizeof(double)*Q*TOPK); for (i=0; i<Q*TOPK; i++) { res_final_H[i] = 0; } if (cudaSuccess != cudaMemset(res_final, 0, sizeof(double)*Q*TOPK)) { printf("\nERROR :: cudaMemset\n"); cudaThreadExit(); return 0; } //HEAPS_dev[TOPK][Q*T_per_BLOCK] if (cudaSuccess != cudaMallocPitch((void **)&HEAPS_dev, &pitch_H, sizeof(Elem)*Q*T_per_BLOCK, (size_t)TOPK)) { printf("\nERROR 4 :: cudaMallocPitch :: Heaps_dev col=%lld :: row=%d\n", (long long)(sizeof(Elem)*Q*T_per_BLOCK), TOPK); cudaThreadExit(); return 0; } Elem *linea_temp = (Elem *)malloc(sizeof(Elem)*Q*T_per_BLOCK); for (i=0 ; i < Q*T_per_BLOCK; i++) { linea_temp[i].ind = -1; linea_temp[i].dist = DBL_MAX; //DBL_MAX es el maximo valor para un double segun float.h } for (i=0 ; i < TOPK; i++) if (cudaSuccess != cudaMemcpy((Elem *)((char *)HEAPS_dev + (i*(int)pitch_H)), (Elem *)linea_temp, sizeof(Elem)*Q*T_per_BLOCK, cudaMemcpyHostToDevice)) { printf("\nERROR :: cudaMemcpy\n"); cudaThreadExit(); return 0; } //arr_Dist[Q][N_ELEM] if (cudaSuccess != cudaMallocPitch((void **)&arr_Dist, &pitch_Dist, N_ELEM*sizeof(Elem), (size_t)Q)) { printf("\nERROR 41 :: cudaMallocPitch\n"); cudaThreadExit(); return 0; } vectores =(double **)malloc(sizeof(double *)*dimension); for (i=0; i<dimension; i++) vectores[i] = (double *)malloc(sizeof(double)*N_ELEM); for (i=0; i<N_ELEM; i++) { // printf("Leyendo vectores[%d] : ", i); for (j=0; j<dimension; j++) { fscanf(pf, "%lf", &vectores[j][i]); // printf("%lf ", vectores[i][j]); } // printf("\n"); fgetc(pf); } fclose(pf); //Elems[dimension][N_ELEM] if (cudaSuccess != cudaMallocPitch((void **)&Elems, (size_t *)&pitch, N_ELEM*sizeof(double), (size_t)dimension)) printf("\nERROR :: cudaMallocPitch 4\n"); for (i=0; i < dimension; i++) { retorno = cudaMemcpy((double *)((char *)Elems + (i*(int)pitch)), (double *)(vectores[i]), sizeof(double)*N_ELEM, cudaMemcpyHostToDevice); if (retorno != cudaSuccess) { switch(retorno) { case cudaErrorInvalidPitchValue: printf("\nERROR 2 -> cudaErrorInvalidPitchValue:\n"); break; case cudaErrorInvalidDevicePointer: printf("\nERROR 2 -> cudaErrorInvalidDevicePointer:\n"); break; case cudaErrorInvalidMemcpyDirection: printf("\nERROR 2 -> cudaErrorInvalidMemcpyDirection:\n"); break; case cudaErrorInvalidValue: printf("\nERROR 2 -> cudaErrorInvalidValue :: i=%d :: pitch=%d\n", i, pitch); break; default: printf("\nERROR 2 -> Checkear esto.\n"); break; } return 0; } } consultas =(double **)malloc(sizeof(double *)*N_QUERIES); for (i=0; i<N_QUERIES; i++) consultas[i] = (double *)malloc(sizeof(double)*dimension); //Leo las queries if ((pf = fopen(argv[2], "r")) == NULL) { printf("\nNo se pudo abrir el archivo %s\n" ,argv[2]); return 0; } /* fgets(linea, tam_lin-1, pf); fscanf(pf, "%d", &N_QUERIES); fscanf(pf, "%d", &dimension); fscanf(pf, "%d", &tam_elem); fgetc(pf); */ printf("\n\nArchivo de Queries:\nCant. Elementos=%d :: dimension=%d\n" , N_QUERIES, dimension); for (i=0; i<N_QUERIES; i++) { if (leedato(consultas[i], pf) == -1) { printf("\nError al leer Consultas\n"); cudaThreadExit(); return 0; } } fclose(pf); //QUERY_dev[N_QUERIES][dimension] if (cudaSuccess != cudaMallocPitch((void **)&QUERY_dev, (size_t *)&pitch_Q, dimension*sizeof(double), (size_t)N_QUERIES)) printf("\nERROR :: cudaMallocPitch 1\n"); for (i=0; i < N_QUERIES; i++) { if (cudaSuccess != cudaMemcpy((char *)QUERY_dev + (i*(int)pitch_Q), consultas[i], sizeof(double)*dimension, cudaMemcpyHostToDevice)) printf("\nERROR 3 :: cudaMemcpy\n"); } N_BLOQUES = Q; contQ = 0; cont = 0; getrusage(RUSAGE_SELF, &r1); gettimeofday(&t1, 0); while(contQ < N_QUERIES) { contQ += Q; if (contQ > N_QUERIES) N_BLOQUES = N_QUERIES - (contQ-Q); printf("\nN_BLOQUES = %d :: T_per_BLOCK = %d\n", N_BLOQUES, T_per_BLOCK); Batch_Heap_Reduction<<< N_BLOQUES, T_per_BLOCK>>> (Elems, (int)pitch, HEAPS_dev, (int)pitch_H, QUERY_dev, (int)pitch_Q, arr_Dist, (int)pitch_Dist, Q*cont, res_final); if (cudaSuccess != cudaMemcpy((double *)res_final_H, (double *)res_final, sizeof(double)*Q*TOPK, cudaMemcpyDeviceToHost)) { printf("\nERROR 41 :: cudaMemcpy :: iteraH\n"); cudaThreadExit(); return 0; } cont++; } gettimeofday(&t2, 0); getrusage(RUSAGE_SELF, &r2); for (i=0; i<N_BLOQUES; i++) { printf("\n\nResults array %d (smallest distances):", i); for (j=TOPK*i; j<(TOPK*i)+TOPK; j++) printf("\nquery = %d :: dist = %lf", i, res_final_H[j]); } printf("\n"); user_time = (r2.ru_utime.tv_sec - r1.ru_utime.tv_sec) + (r2.ru_utime.tv_usec - r1.ru_utime.tv_usec)/1000000.0; sys_time = (r2.ru_stime.tv_sec - r1.ru_stime.tv_sec) + (r2.ru_stime.tv_usec - r1.ru_stime.tv_usec)/1000000.0; real_time = (t2.tv_sec - t1.tv_sec) + (double)(t2.tv_usec - t1.tv_usec)/1000000; prom = 0; prom_cont = 0; for (i=0; i<Q; i++) { if (res_final_H[i] != 0) { prom += res_final_H[i]; prom_cont += 1; } } printf("\nK = %d", TOPK); printf("\nTiempo CPU = %f", user_time + sys_time); printf("\nTiempo Real = %f", real_time); printf("\nprom = %lf\n", (double)(prom/(double)prom_cont)); fflush(stdout); cudaFree(Elems); cudaFree(QUERY_dev); cudaFree(HEAPS_dev); cudaFree(arr_Dist); cudaThreadExit(); return 0; } __device__ void insertaH(Elem *heap, Elem *elem, int *n_elem, int pitch, int id) { int i; Elem temp; ((Elem *)((char *)heap + (*n_elem)*pitch))[id].dist = elem->dist; ((Elem *)((char *)heap + (*n_elem)*pitch))[id].ind = elem->ind; (*n_elem)++; for (i = *n_elem; i>1 && ((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist; i=i/2) { //Intercambiamos con el padre temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist; temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind; ((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist; ((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind; ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].dist = temp.dist; ((Elem *)((char *)heap + ((i/2)-1)*pitch))[id].ind = temp.ind; } return; } __device__ void extraeH(Elem *heap, int *n_elem, int pitch, int id, Elem *eresult) { int i, k; Elem temp; eresult->dist = ((Elem *)((char *)heap+0))[id].dist; //Se guarda el maximo eresult->ind = ((Elem *)((char *)heap+0))[id].ind; ((Elem *)((char *)heap+0))[id].dist = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].dist;// Movemos el ultimo a la raiz y achicamos el heap ((Elem *)((char *)heap+0))[id].ind = ((Elem *)((char *)heap + ((*n_elem)-1)*pitch))[id].ind; (*n_elem)--; i = 1; while(2*i <= *n_elem) // mientras tenga algun hijo { k = 2*i; //el hijo izquierdo if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) k = k+1; //el hijo derecho es el mayor if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) break; //es mayor que ambos hijos temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist; temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind; ((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist; ((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind; ((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist; ((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind; i = k; //lo intercambiamos con el mayor hijo } return; // return max; } __device__ double topH(Elem *heap, int id) { return ((Elem *)((char *)heap + 0))[id].dist; } __device__ void popush(Elem *heap, Elem *elem, int *n_elem, int pitch, int id) { int i, k; Elem temp; ((Elem *)((char *)heap+0))[id].dist = elem->dist; ((Elem *)((char *)heap+0))[id].ind = elem->ind; i = 1; while(2*i <= *n_elem) // mientras tenga algun hijo { k = 2*i; //el hijo izquierdo if(k+1 <= *n_elem && ((Elem *)((char *)heap + ((k+1)-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) k = k+1; //el hijo derecho es el mayor if(((Elem *)((char *)heap + (i-1)*pitch))[id].dist > ((Elem *)((char *)heap + (k-1)*pitch))[id].dist) break; //es mayor que ambos hijos temp.dist = ((Elem *)((char *)heap + (i-1)*pitch))[id].dist; temp.ind = ((Elem *)((char *)heap + (i-1)*pitch))[id].ind; ((Elem *)((char *)heap + (i-1)*pitch))[id].dist = ((Elem *)((char *)heap + (k-1)*pitch))[id].dist; ((Elem *)((char *)heap + (i-1)*pitch))[id].ind = ((Elem *)((char *)heap + (k-1)*pitch))[id].ind; ((Elem *)((char *)heap + (k-1)*pitch))[id].dist = temp.dist; ((Elem *)((char *)heap + (k-1)*pitch))[id].ind = temp.ind; i = k; //lo intercambiamos con el mayor hijo } return; } __global__ void Batch_Heap_Reduction(double *DB_dev, int pitch_DB, Elem *heap, int pitch_H, double *QUERY_dev, int pitch_QUERY, Elem *arr_Dist, int pitch_Dist, int beginQ, double *res_final) { int i, j, n_elem=0, n_elemWarp=0; int id; Elem eresult; __shared__ Elem matrizWarp[TOPK][TAM_WARP]; __shared__ Elem heapfin[TOPK][1]; __shared__ double query[DIM]; // int ED=0; id = threadIdx.x + (blockDim.x * blockIdx.x); //Se copia la Query a mem. compartida for (i=threadIdx.x; i < DIM; i += blockDim.x) query[i] = ((double *)((char *)QUERY_dev + ((blockIdx.x + beginQ) * (int)pitch_QUERY)))[i]; __syncthreads(); //Se obtiene el arreglo de distancias for (i=threadIdx.x; i < NE; i += blockDim.x) { // ED++; ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].dist = distancia_trans(DB_dev, pitch_DB, i, query); ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].ind = i; } for(i=threadIdx.x; i < NE; i += blockDim.x)//NE = Numero de elementos de la BD { if (n_elem >= TOPK) { if (topH(heap, id) > ((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i].dist) popush(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id); //Extrae e inserta en una operacion } else insertaH(heap, &(((Elem *)((char *)arr_Dist + (blockIdx.x*pitch_Dist)))[i]), &n_elem, pitch_H, id); } __syncthreads(); //Un warp reduce el problema a una matriz de Kx32 distancias. PEro esta vez los heaps se almacenan en Memoria Shared if (threadIdx.x < TAM_WARP) { for(j=id; j < blockDim.x*(blockIdx.x+1); j += TAM_WARP) { n_elem = TOPK; for(i=0; i < TOPK; i++) { extraeH(heap, &n_elem, pitch_H, j, &eresult); if (n_elemWarp < TOPK) insertaH(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x); else if (topH(&(matrizWarp[0][0]), threadIdx.x) > eresult.dist) popush(&(matrizWarp[0][0]), &eresult, &n_elemWarp, sizeof(Elem)*TAM_WARP, threadIdx.x); } } } __syncthreads(); //Un hilo encuentra los K-NN a partir de la matriz de TOPKxTAM_WARP if (threadIdx.x == 0) { n_elem = 0; for(j=0; j < TAM_WARP; j++) { for(i=0; i < TOPK; i++) if (n_elem < TOPK) insertaH((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0); else if (topH((Elem *)heapfin, 0) > matrizWarp[i][j].dist) popush((Elem *)heapfin, &(matrizWarp[i][j]), &n_elem, sizeof(Elem), 0); } //Escribiendo algunos resultados //res_final[blockIdx.x] = topH((Elem *)heapfin, 0); for (i=TOPK*blockIdx.x; i < (TOPK*blockIdx.x)+TOPK; i++) { extraeH(&(heapfin[0][0]), &n_elem, sizeof(Elem), 0, &eresult); res_final[i] = eresult.dist; } } // atomicAdd(&(resT[blockIdx.x]), ED); return; } __device__ double distancia_trans(double *p1, int pitch_p1, int col_1, double *q) { int i=0; double suma=0; for (i=0; i < DIM; i++) suma += (((double *)((char *)p1 + (i*pitch_p1)))[col_1] - q[i]) * (((double *)((char *)p1 + (i*pitch_p1)))[col_1] - q[i]); return sqrtf(suma); } void imprime_trans(double **MAT, int col) { int i; for (i=0; i<DIM; i++) printf("%lf ", MAT[i][col]); return; } int leedato(double *dato, FILE *file) { int i=0; for (i=0;i<DIM;i++) if (fscanf(file,"%lf",&dato[i])<1) return -1; return 1; } int leedato_cophir(double *dato, FILE *file) { int i=0; int num_f; for (i=0;i<DIM;i++) { if (fscanf(file, "%d", &num_f) < 1) return ERROR; dato[i] = (double)num_f; if (i+1 < DIM) if (fgetc(file) != ',') { printf("\nERROR :: ',' no encontrada\n"); return ERROR; } } return 1; } int leedato_trans(double **dato, FILE *file, int col) { int i=0; for (i=0;i<DIM;i++) if (fscanf(file,"%lf",&(dato[i][col]))<1) return -1; return 1; } int leedato_trans_cophir(double **dato, FILE *file, int col) { int i=0; int num_f; for (i=0;i<DIM;i++) { if (fscanf(file, "%d", &num_f) < 1) return ERROR; dato[i][col] = (double)num_f; if (i+1 < DIM) if (fgetc(file) != ',') { printf("\nERROR :: ',' no encontrada\n"); return ERROR; } } return 1; }
7aea424e29ccd2b12c5c4dfab1da351b02dd797a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "GPUSolver.h" /** The number of azimuthal angles */ __constant__ int num_azim[1]; /** The number of energy groups */ __constant__ int num_groups[1]; /** The number of FSRs */ __constant__ int num_FSRs[1]; /** The number of polar angles */ __constant__ int num_polar[1]; /** Twice the number of polar angles */ __constant__ int two_times_num_polar[1]; /** The number of polar angles times energy groups */ __constant__ int polar_times_groups[1]; /** An array for the sines of the polar angle in the polar Quadrature set */ __constant__ FP_PRECISION sin_thetas[MAX_POLAR_ANGLES_GPU]; /** An array of the weights for the polar angles from the Quadrature set */ __constant__ FP_PRECISION polar_weights[MAX_POLAR_ANGLES_GPU*MAX_AZIM_ANGLES_GPU]; /** The total number of Tracks */ __constant__ int tot_num_tracks[1]; /** An GPUExpEvaluator object to compute exponentials */ __constant__ GPUExpEvaluator exp_evaluator; /** * @brief A struct used to check if a value on the GPU is equal to INF. * @details This is used as a predicate in Thrust routines. */ struct isinf_test { /** * @brief Checks if a double precision value is INF. * @param a the value to check * @return true if equal to INF, false otherwise */ __host__ __device__ bool operator()(double a) { return isinf(a); } /** * @brief Checks if a single precision value is INF. * @param a the value to check * @return true if equal to INF, false otherwise */ __host__ __device__ bool operator()(float a) { return isinf(a); } }; /** * @brief A struct used to check if a value on the GPU is equal to NaN. * @details This is used as a predicate in Thrust routines. */ struct isnan_test { /** * @brief Checks if a double precision value is NaN. * @param a the value to check * @return true if equal to NaN, false otherwise */ __host__ __device__ bool operator()(double a) { return isnan(a); } /** * @brief Checks if a single precision value is NaN. * @param a the value to check * @return true if equal to NaN, false otherwise */ __host__ __device__ bool operator()(float a) { return isnan(a); } }; /** * @brief A functor to multiply all elements in a Thrust vector by a constant. * @param constant the constant to multiply the vector */ template< typename T > struct multiplyByConstant { public: /* The constant to multiply by */ const T constant; /** * @brief Constructor for the functor. * @param constant to multiply each element in a Thrust vector */ multiplyByConstant(T constant) : constant(constant) {} /** * @brief Multiply an element in a Thrust vector. * @param VecElem the element to multiply */ __host__ __device__ void operator()(T& VecElem) const { VecElem = VecElem * constant; } }; /** * @class This provides a templated interface for a strided iterator over * a Thrust device_vector on a GPU. * @details This code is taken from the Thrust examples site on 1/20/2015: * https://github.com/thrust/thrust/blob/master/examples/strided_range.cu */ template <typename Iterator> class strided_range { public: typedef typename thrust::iterator_difference<Iterator>::type difference_type; struct stride_functor : public thrust::unary_function<difference_type,difference_type> { difference_type stride; stride_functor(difference_type stride) : stride(stride) { } __host__ __device__ difference_type operator()(const difference_type& i) const { return stride * i; } }; typedef typename thrust::counting_iterator<difference_type> CountingIterator; typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator; typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator; typedef PermutationIterator iterator; /** * @brief The strided iterator constructor. */ strided_range(Iterator first, Iterator last, difference_type stride) : first(first), last(last), stride(stride) { } /** * @brief Get the first element in the iterator. * @return the first element in the iterator */ iterator begin(void) const { return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride))); } /** * @brief Get the last element in the iterator. * @return the last element in the iterator */ iterator end(void) const { return begin() + ((last - first) + (stride - 1)) / stride; } protected: /** The first element in the underlying device_vector as set by the constructor */ Iterator first; /** The last element in the underlying device_vector as set by the constructor */ Iterator last; /** The stride to use when iterating over the underlying device_vector */ difference_type stride; }; /** * @brief Compute the total fission source from all FSRs. * @param FSR_volumes an array of FSR volumes * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_materials on the device * @param scalar_flux the scalar flux in each FSR and energy group * @param fission_sources array of fission sources in each FSR and energy group */ __global__ void computeFissionSourcesOnDevice(FP_PRECISION* FSR_volumes, int* FSR_materials, dev_material* materials, FP_PRECISION* scalar_flux, FP_PRECISION* fission_sources) { /* Use a shared memory buffer for each thread's fission source */ extern __shared__ FP_PRECISION shared_fission_source[]; int tid = threadIdx.x + blockIdx.x * blockDim.x; dev_material* curr_material; FP_PRECISION* nu_sigma_f; FP_PRECISION volume, source; /* Initialize fission source to zero */ shared_fission_source[threadIdx.x] = 0; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; nu_sigma_f = curr_material->_nu_sigma_f; volume = FSR_volumes[tid]; /* Iterate over energy groups and update fission source for * this thread block */ for (int e=0; e < *num_groups; e++) { source = nu_sigma_f[e] * scalar_flux(tid,e) * volume; shared_fission_source[threadIdx.x] += source; } /* Increment thread id */ tid += blockDim.x * gridDim.x; } /* Copy this thread's fission source to global memory */ tid = threadIdx.x + blockIdx.x * blockDim.x; fission_sources[tid] = shared_fission_source[threadIdx.x]; } /** * @brief Computes the total source (fission, scattering, fixed) in each FSR. * @details This method computes the total source in each region based on * this iteration's current approximation to the scalar flux. * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_material pointers * @param scalar_flux an array of FSR scalar fluxes * @param fixed_sources an array of fixed (user-defined) sources * @param reduced_sources an array of FSR sources / total xs * @param inverse_k_eff the inverse of keff */ __global__ void computeFSRSourcesOnDevice(int* FSR_materials, dev_material* materials, FP_PRECISION* scalar_flux, FP_PRECISION* fixed_sources, FP_PRECISION* reduced_sources, FP_PRECISION inverse_k_eff) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION fission_source; FP_PRECISION scatter_source; dev_material* curr_material; FP_PRECISION* sigma_t; FP_PRECISION* sigma_s; FP_PRECISION* fiss_mat; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; sigma_t = curr_material->_sigma_t; sigma_s = curr_material->_sigma_s; fiss_mat = curr_material->_fiss_matrix; /* Compute scatter + fission source for group g */ for (int g=0; g < *num_groups; g++) { scatter_source = 0; fission_source = 0; for (int g_prime=0; g_prime < *num_groups; g_prime++) { scatter_source += sigma_s[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); fission_source += fiss_mat[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); } fission_source *= inverse_k_eff; /* Compute total (scatter+fission+fixed) reduced source */ reduced_sources(tid,g) = fixed_sources(tid,g); reduced_sources(tid,g) += scatter_source + fission_source; reduced_sources(tid,g) *= ONE_OVER_FOUR_PI; reduced_sources(tid,g) = __fdividef(reduced_sources(tid,g), sigma_t[g]); } /* Increment the thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Computes the total fission source in each FSR in each energy group * @details This method is a helper routine for the openmoc.krylov submodule. * This routine computes the total fission source in each FSR. If the * divide_sigma_t parameter is true then the fission source will be * divided by the total cross-section in each FSR. * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_material pointers * @param divide_sigma_t a boolean indicating whether to divide by the total xs * @param scalar_flux an array of FSR scalar fluxes * @param reduced_sources an array of FSR fission sources */ __global__ void computeFSRFissionSourcesOnDevice(int* FSR_materials, dev_material* materials, bool divide_sigma_t, FP_PRECISION* scalar_flux, FP_PRECISION* reduced_sources) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION fission_source; dev_material* curr_material; FP_PRECISION* sigma_t; FP_PRECISION* fiss_mat; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; sigma_t = curr_material->_sigma_t; fiss_mat = curr_material->_fiss_matrix; /* Compute fission source for group g */ for (int g=0; g < *num_groups; g++) { fission_source = 0; for (int g_prime=0; g_prime < *num_groups; g_prime++) fission_source += fiss_mat[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); /* Set the reduced fission source for FSR tid in group g */ reduced_sources(tid,g) = fission_source; reduced_sources(tid,g) *= ONE_OVER_FOUR_PI; if (divide_sigma_t) reduced_sources(tid,g) = __fdividef(reduced_sources(tid,g), sigma_t[g]); } /* Increment the thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Computes the total scattering source in each FSR and energy group. * @details This method is a helper routine for the openmoc.krylov submodule. * This routine computes the total scatter source in each FSR. If the * divide_sigma_t parameter is true then the scatter source will be * divided by the total cross-section in each FSR. * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_material pointers * @param divide_sigma_t a boolean indicating whether to divide by the total xs * @param scalar_flux an array of FSR scalar fluxes * @param reduced_sources an array of FSR scatter sources */ __global__ void computeFSRScatterSourcesOnDevice(int* FSR_materials, dev_material* materials, bool divide_sigma_t, FP_PRECISION* scalar_flux, FP_PRECISION* reduced_sources) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION scatter_source; dev_material* curr_material; FP_PRECISION* sigma_s; FP_PRECISION* sigma_t; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; sigma_s = curr_material->_sigma_s; sigma_t = curr_material->_sigma_t; /* Compute total scattering source for this FSR in group g */ for (int g=0; g < *num_groups; g++) { scatter_source = 0; for (int g_prime=0; g_prime < *num_groups; g_prime++) scatter_source += sigma_s[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); /* Set the reduced scatter source for FSR tid in group g */ reduced_sources(tid,g) = scatter_source; reduced_sources(tid,g) *= ONE_OVER_FOUR_PI; if (divide_sigma_t) reduced_sources(tid,g) = __fdividef(reduced_sources(tid,g), sigma_t[g]); } /* Increment the thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Perform an atomic addition in double precision to an array address. * @details This method is straight out of CUDA C Developers Guide (cc 2013). * @param address the array memory address * @param val the value to add to the array * @return the atomically added array value and input value */ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } /** * @brief Computes the contribution to the FSR scalar flux from a Track * segment in a single energy group. * @details This method integrates the angular flux for a Track segment across * energy groups and polar angles, and tallies it into the FSR scalar * flux, and updates the Track's angular flux. * @param curr_segment a pointer to the Track segment of interest * @param azim_index a pointer to the azimuthal angle index for this segment * @param energy_group the energy group of interest * @param materials the array of dev_material pointers * @param track_flux a pointer to the Track's angular flux * @param reduced_sources the array of FSR sources / total xs * @param polar_weights the array of polar Quadrature weights * @param scalar_flux the array of FSR scalar fluxes */ __device__ void tallyScalarFlux(dev_segment* curr_segment, int azim_index, int energy_group, dev_material* materials, FP_PRECISION* track_flux, FP_PRECISION* reduced_sources, FP_PRECISION* polar_weights, FP_PRECISION* scalar_flux) { int fsr_id = curr_segment->_region_uid; FP_PRECISION length = curr_segment->_length; dev_material* curr_material = &materials[curr_segment->_material_index]; FP_PRECISION* sigma_t = curr_material->_sigma_t; /* The change in angular flux long this Track segment in this FSR */ FP_PRECISION delta_psi; FP_PRECISION exponential; /* Zero the FSR scalar flux contribution from this segment and energy group */ FP_PRECISION fsr_flux = 0.0; /* Loop over polar angles */ for (int p=0; p < *num_polar; p++) { exponential = exp_evaluator.computeExponential(sigma_t[energy_group] * length, p); delta_psi = (track_flux[p] - reduced_sources(fsr_id,energy_group)); delta_psi *= exponential; fsr_flux += delta_psi * polar_weights(azim_index,p); track_flux[p] -= delta_psi; } /* Atomically increment the scalar flux for this FSR */ atomicAdd(&scalar_flux(fsr_id,energy_group), fsr_flux); } /** * @brief Updates the boundary flux for a Track given boundary conditions. * @details For reflective and periodic boundary conditions, the outgoing * boundary flux for the Track is given to the corresponding reflecting * or periodic Track. For vacuum boundary conditions, the outgoing flux * is tallied as leakage. Note: Only one energy group is transferred * by this routine. * @param curr_track a pointer to the Track of interest * @param azim_index a pointer to the azimuthal angle index for this segment * @param track_flux an array of the outgoing Track flux * @param boundary_flux an array of all angular fluxes * @param polar_weights an array of polar Quadrature weights * @param energy_angle_index the energy group index * @param direction the Track direction (forward - true, reverse - false) */ __device__ void transferBoundaryFlux(dev_track* curr_track, int azim_index, FP_PRECISION* track_flux, FP_PRECISION* boundary_flux, FP_PRECISION* polar_weights, int energy_angle_index, bool direction) { int start = energy_angle_index; bool transfer_flux; int track_out_id; /* For the "forward" direction */ if (direction) { transfer_flux = curr_track->_transfer_flux_out; track_out_id = curr_track->_track_out; start += curr_track->_next_out * (*polar_times_groups); } /* For the "reverse" direction */ else { transfer_flux = curr_track->_transfer_flux_in; track_out_id = curr_track->_track_in; start += curr_track->_next_in * (*polar_times_groups); } FP_PRECISION* track_out_flux = &boundary_flux(track_out_id,start); /* Put Track's flux in the shared memory temporary flux array */ for (int p=0; p < *num_polar; p++) track_out_flux[p] = track_flux[p] * transfer_flux; } /** * @brief This method performs one transport sweep of one halfspace of all * azimuthal angles, tracks, segments, polar angles and energy groups. * @details The method integrates the flux along each track and updates the * boundary fluxes for the corresponding output Track, while updating * the scalar flux in each FSR. * @param scalar_flux an array of FSR scalar fluxes * @param boundary_flux an array of Track boundary fluxes * @param reduced_sources an array of FSR sources / total xs * @param materials an array of dev_material pointers * @param tracks an array of Tracks * @param tid_offset the Track offset for azimuthal angle halfspace * @param tid_max the upper bound on the Track IDs for this azimuthal * angle halfspace */ __global__ void transportSweepOnDevice(FP_PRECISION* scalar_flux, FP_PRECISION* boundary_flux, FP_PRECISION* reduced_sources, dev_material* materials, dev_track* tracks, int tid_offset, int tid_max) { /* Shared memory buffer for each thread's angular flux */ extern __shared__ FP_PRECISION temp_flux[]; FP_PRECISION* track_flux; int tid = tid_offset + threadIdx.x + blockIdx.x * blockDim.x; int track_id = tid / *num_groups; int track_flux_index = threadIdx.x * (*two_times_num_polar); int energy_group = tid % (*num_groups); int energy_angle_index = energy_group * (*num_polar); dev_track* curr_track; int azim_index; int num_segments; dev_segment* curr_segment; /* Iterate over Track with azimuthal angles in (0, pi/2) */ while (track_id < tid_max) { /* Initialize local registers with important data */ curr_track = &tracks[track_id]; azim_index = curr_track->_azim_angle_index; num_segments = curr_track->_num_segments; /* Retrieve pointer to thread's shared memory buffer for angular flux */ track_flux = &temp_flux[track_flux_index]; /* Put Track's flux in the shared memory temporary flux array */ for (int p=0; p < *num_polar; p++) { /* Forward flux along this Track */ track_flux[p] = boundary_flux(track_id,p+energy_angle_index); /* Reverse flux along this Track */ track_flux[(*num_polar) + p] = boundary_flux(track_id,p+energy_angle_index+(*polar_times_groups)); } /* Loop over each Track segment in forward direction */ for (int i=0; i < num_segments; i++) { curr_segment = &curr_track->_segments[i]; tallyScalarFlux(curr_segment, azim_index, energy_group, materials, track_flux, reduced_sources, polar_weights, scalar_flux); } /* Transfer boundary angular flux to outgoing Track */ transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux, polar_weights, energy_angle_index, true); /* Loop over each Track segment in reverse direction */ track_flux = &temp_flux[track_flux_index + (*num_polar)]; for (int i=num_segments-1; i > -1; i--) { curr_segment = &curr_track->_segments[i]; tallyScalarFlux(curr_segment, azim_index, energy_group, materials, track_flux, reduced_sources, polar_weights, scalar_flux); } /* Transfer boundary angular flux to outgoing Track */ transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux, polar_weights, energy_angle_index, false); /* Update the indices for this thread to the next Track, energy group */ tid += blockDim.x * gridDim.x; track_id = tid / *num_groups; energy_group = tid % (*num_groups); energy_angle_index = energy_group * (*num_polar); } } /** * @brief Add the source term contribution in the transport equation to * the FSR scalar flux on the GPU. * @param scalar_flux an array of FSR scalar fluxes * @param reduced_sources an array of FSR sources / total xs * @param FSR_volumes an array of FSR volumes * @param FSR_materials an array of FSR material indices * @param materials an array of dev_material pointers */ __global__ void addSourceToScalarFluxOnDevice(FP_PRECISION* scalar_flux, FP_PRECISION* reduced_sources, FP_PRECISION* FSR_volumes, int* FSR_materials, dev_material* materials) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION volume; dev_material* curr_material; FP_PRECISION* sigma_t; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; volume = FSR_volumes[tid]; sigma_t = curr_material->_sigma_t; /* Iterate over all energy groups */ for (int i=0; i < *num_groups; i++) { scalar_flux(tid,i) *= 0.5; scalar_flux(tid,i) = __fdividef(scalar_flux(tid,i), (sigma_t[i] * volume)); scalar_flux(tid,i) += FOUR_PI * reduced_sources(tid,i); } /* Increment thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Compute the total volume-intergrated fission source from * all FSRs and energy groups. * @param FSR_volumes an array of the FSR volumes * @param FSR_materials an array of the FSR Material indices * @param materials an array of the dev_material pointers * @param scalar_flux an array of FSR scalar fluxes * @param fission an array of FSR nu-fission rates */ __global__ void computeFSRFissionRatesOnDevice(FP_PRECISION* FSR_volumes, int* FSR_materials, dev_material* materials, FP_PRECISION* scalar_flux, FP_PRECISION* fission) { int tid = threadIdx.x + blockIdx.x * blockDim.x; dev_material* curr_material; FP_PRECISION* nu_sigma_f; FP_PRECISION volume; FP_PRECISION fiss = 0.; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; nu_sigma_f = curr_material->_nu_sigma_f; volume = FSR_volumes[tid]; FP_PRECISION curr_fiss = 0.; /* Compute nu-fission rates rates for this thread block */ for (int e=0; e < *num_groups; e++) curr_fiss += nu_sigma_f[e] * scalar_flux(tid,e); fiss += curr_fiss * volume; /* Increment thread id */ tid += blockDim.x * gridDim.x; } /* Copy this thread's fission to global memory */ tid = threadIdx.x + blockIdx.x * blockDim.x; fission[tid] = fiss; } /** * @brief Constructor initializes arrays for dev_tracks and dev_materials.. * @details The constructor initalizes the number of CUDA threads and thread * blocks each to a default of 64. * @param track_generator an optional pointer to the TrackjGenerator */ GPUSolver::GPUSolver(TrackGenerator* track_generator) : Solver(track_generator) { /* The default number of thread blocks and threads per thread block */ _B = 64; _T = 64; _materials = NULL; _dev_tracks = NULL; _FSR_materials = NULL; if (track_generator != NULL) setTrackGenerator(track_generator); } /** * @brief Solver destructor frees all memory on the device, including arrays * for the FSR scalar fluxes and sources and Track boundary fluxes. */ GPUSolver::~GPUSolver() { if (_FSR_volumes != NULL) { hipFree(_FSR_volumes); _FSR_volumes = NULL; } if (_FSR_materials != NULL) { hipFree(_FSR_materials); _FSR_materials = NULL; } if (_materials != NULL) { hipFree(_materials); _materials = NULL; } if (_dev_tracks != NULL) { hipFree(_dev_tracks); _dev_tracks = NULL; } /* Clear Thrust vectors's memory on the device */ _boundary_flux.clear(); _scalar_flux.clear(); _old_scalar_flux.clear(); _fixed_sources.clear(); _reduced_sources.clear(); } /** * @brief Returns the number of thread blocks to execute on the GPU. * @return the number of thread blocks */ int GPUSolver::getNumThreadBlocks() { return _B; } /** * @brief Returns the number of threads per block to execute on the GPU. * @return the number of threads per block */ int GPUSolver::getNumThreadsPerBlock() { return _T; } /** * @brief Returns the source for some energy group for a flat source region * @details This is a helper routine used by the openmoc.process module. * @param fsr_id the ID for the FSR of interest * @param group the energy group of interest * @return the flat source region source */ FP_PRECISION GPUSolver::getFSRSource(int fsr_id, int group) { if (fsr_id >= _num_FSRs) log_printf(ERROR, "Unable to return a source for FSR ID = %d " "since the max FSR ID = %d", fsr_id, _num_FSRs-1); else if (fsr_id < 0) log_printf(ERROR, "Unable to return a source for FSR ID = %d " "since FSRs do not have negative IDs", fsr_id); else if (group-1 >= _num_groups) log_printf(ERROR, "Unable to return a source in group %d " "since there are only %d groups", group, _num_groups); else if (group <= 0) log_printf(ERROR, "Unable to return a source in group %d " "since groups must be greater or equal to 1", group); else if (_scalar_flux.size() == 0) log_printf(ERROR, "Unable to return a source " "since it has not yet been computed"); /* Get host material */ Material* host_material = _geometry->findFSRMaterial(fsr_id); /* Get cross sections and scalar flux */ FP_PRECISION* sigma_s = host_material->getSigmaS(); FP_PRECISION* fiss_mat = host_material->getFissionMatrix(); FP_PRECISION* fsr_scalar_fluxes = new FP_PRECISION[_num_groups]; FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); hipMemcpy((void*)fsr_scalar_fluxes, (void*)&scalar_flux[fsr_id*_num_groups], _num_groups * sizeof(FP_PRECISION), hipMemcpyDeviceToHost); FP_PRECISION fission_source = 0.0; FP_PRECISION scatter_source = 0.0; FP_PRECISION total_source; /* Compute total scattering and fission sources for this FSR */ for (int g=0; g < _num_groups; g++) { scatter_source += sigma_s[(group-1)*(_num_groups)+g] * fsr_scalar_fluxes[g]; fission_source += fiss_mat[(group-1)*(_num_groups)+g] * fsr_scalar_fluxes[g]; } fission_source /= _k_eff; /* Compute the total source */ total_source = fission_source + scatter_source; /* Add in fixed source (if specified by user) */ total_source += _fixed_sources(fsr_id,group-1); /* Normalize to solid angle for isotropic approximation */ total_source *= ONE_OVER_FOUR_PI; delete [] fsr_scalar_fluxes; return total_source; } /** * @brief Returns the scalar flux for some FSR and energy group. * @param fsr_id the ID for the FSR of interest * @param group the energy group of interest * @return the FSR scalar flux */ FP_PRECISION GPUSolver::getFlux(int fsr_id, int group) { if (fsr_id >= _num_FSRs) log_printf(ERROR, "Unable to return a scalar flux for FSR ID = %d " "since the max FSR ID = %d", fsr_id, _num_FSRs-1); else if (fsr_id < 0) log_printf(ERROR, "Unable to return a scalar flux for FSR ID = %d " "since FSRs do not have negative IDs", fsr_id); else if (group-1 >= _num_groups) log_printf(ERROR, "Unable to return a scalar flux in group %d " "since there are only %d groups", group, _num_groups); else if (group <= 0) log_printf(ERROR, "Unable to return a scalar flux in group %d " "since groups must be greater or equal to 1", group); if (_scalar_flux.size() == 0) log_printf(ERROR, "Unable to return a scalar flux " "since it has not yet been computed"); return _scalar_flux(fsr_id,group-1); } /** * @brief Fills an array with the scalar fluxes on the GPU. * @details This class method is a helper routine called by the OpenMOC * Python "openmoc.krylov" module for Krylov subspace methods. * Although this method appears to require two arguments, in * reality it only requires one due to SWIG and would be called * from within Python as follows: * * @code * num_fluxes = num_groups * num_FSRs * fluxes = solver.getFluxes(num_fluxes) * @endcode * * @param fluxes an array of FSR scalar fluxes in each energy group * @param num_fluxes the total number of FSR flux values */ void GPUSolver::getFluxes(FP_PRECISION* out_fluxes, int num_fluxes) { if (num_fluxes != _num_groups * _num_FSRs) log_printf(ERROR, "Unable to get FSR scalar fluxes since there are " "%d groups and %d FSRs which does not match the requested " "%d flux values", _num_groups, _num_FSRs, num_fluxes); else if (_scalar_flux.size() == 0) log_printf(ERROR, "Unable to get FSR scalar fluxes since they " "have not yet been allocated on the device"); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Copy the fluxes from the GPU to the input array */ hipMemcpy((void*)out_fluxes, (void*)scalar_flux, num_fluxes * sizeof(FP_PRECISION), hipMemcpyDeviceToHost); } /** * @brief Sets the number of thread blocks (>0) for CUDA kernels. * @param num_blocks the number of thread blocks */ void GPUSolver::setNumThreadBlocks(int num_blocks) { if (num_blocks < 0) log_printf(ERROR, "Unable to set the number of CUDA thread blocks " "to %d since it is a negative number", num_blocks); _B = num_blocks; } /** * @brief Sets the number of threads per block (>0) for CUDA kernels. * @param num_threads the number of threads per block */ void GPUSolver::setNumThreadsPerBlock(int num_threads) { if (num_threads < 0) log_printf(ERROR, "Unable to set the number of CUDA threads per block " "to %d since it is a negative number", num_threads); _T = num_threads; } /** * @brief Sets the Geometry for the Solver. * @details This is a private setter method for the Solver and is not * intended to be called by the user. * @param geometry a pointer to a Geometry object */ void GPUSolver::setGeometry(Geometry* geometry) { Solver::setGeometry(geometry); std::map<int, Material*> host_materials=_geometry->getAllMaterials(); std::map<int, Material*>::iterator iter; int material_index = 0; /* Iterate through all Materials and clone them as dev_material structs * on the device */ for (iter=host_materials.begin(); iter != host_materials.end(); ++iter) { _material_IDs_to_indices[iter->second->getId()] = material_index; material_index++; } } /** * @brief Sets the Solver's TrackGenerator with characteristic Tracks. * @details The TrackGenerator must already have generated Tracks and have * used ray tracing to segmentize them across the Geometry. This * should be initated in Python prior to assigning the TrackGenerator * to the Solver: * * @code * track_generator.generateTracks() * solver.setTrackGenerator(track_generator) * @endcode * * @param track_generator a pointer to a TrackGenerator object */ void GPUSolver::setTrackGenerator(TrackGenerator* track_generator) { Solver::setTrackGenerator(track_generator); initializeTracks(); } /** * @brief Set the flux array for use in transport sweep source calculations. * @detail This is a helper method for the checkpoint restart capabilities, * as well as the IRAMSolver in the openmoc.krylov submodule. This * routine may be used as follows from within Python: * * @code * num_FSRs = solver.getGeometry.getNumFSRs() * num_groups = solver.getGeometry.getNumEnergyGroups() * fluxes = numpy.random.rand(num_FSRs * num_groups, dtype=np.float) * solver.setFluxes(fluxes) * @endcode * * NOTE: This routine stores a pointer to the fluxes for the Solver * to use during transport sweeps and other calculations. Hence, the * flux array pointer is shared between NumPy and the Solver. * * @param in_fluxes an array with the fluxes to use * @param num_fluxes the number of flux values (# groups x # FSRs) */ void GPUSolver::setFluxes(FP_PRECISION* in_fluxes, int num_fluxes) { if (num_fluxes != _num_groups * _num_FSRs) log_printf(ERROR, "Unable to set an array with %d flux values for %d " " groups and %d FSRs", num_fluxes, _num_groups, _num_FSRs); /* Allocate array if flux arrays have not yet been initialized */ if (_scalar_flux.size() == 0) initializeFluxArrays(); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Copy the input fluxes onto the GPU */ hipMemcpy((void*)scalar_flux, (void*)in_fluxes, num_fluxes * sizeof(FP_PRECISION), hipMemcpyHostToDevice); _user_fluxes = true; } /** * @brief Creates a polar quadrature object for the GPUSolver on the GPU. */ void GPUSolver::initializePolarQuadrature() { log_printf(INFO, "Initializing polar quadrature on the GPU..."); Solver::initializePolarQuadrature(); if (_num_polar > MAX_POLAR_ANGLES_GPU) log_printf(ERROR, "Unable to initialize a polar quadrature with %d " "angles for the GPUSolver which is limited to %d polar " "angles. Update the MAX_POLAR_ANGLES_GPU macro in constants.h " "and recompile.", _num_polar, MAX_POLAR_ANGLES_GPU); /* Copy the number of polar angles to constant memory on the GPU */ hipMemcpyToSymbol(num_polar, (void*)&_num_polar, sizeof(int), 0, hipMemcpyHostToDevice); /* Copy twice the number of polar angles to constant memory on the GPU */ _two_times_num_polar = 2 * _num_polar; hipMemcpyToSymbol(two_times_num_polar, (void*)&_two_times_num_polar, sizeof(int), 0, hipMemcpyHostToDevice); /* Copy the number of polar angles times energy groups to constant memory * on the GPU */ hipMemcpyToSymbol(polar_times_groups, (void*)&_polar_times_groups, sizeof(int), 0, hipMemcpyHostToDevice); /* Copy the polar weights to constant memory on the GPU */ hipMemcpyToSymbol(polar_weights, (void*)_polar_weights, _num_polar * _num_azim * sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice); /* Copy the sines of the polar angles which is needed if the user * requested the use of the exp intrinsic to evaluate exponentials */ hipMemcpyToSymbol(sin_thetas, (void*)_polar_quad->getSinThetas(), _num_polar * sizeof(FP_PRECISION), 0, hipMemcpyHostToDevice); } /** * @brief Initializes new GPUExpEvaluator object to compute exponentials. */ void GPUSolver::initializeExpEvaluator() { Solver::initializeExpEvaluator(); log_printf(INFO, "Initializing the exponential evaluator on the GPU..."); /* Allocate memory for a GPUExpEvaluator on the device */ GPUExpEvaluator* dev_exp_evaluator; hipMalloc((void**)&dev_exp_evaluator, sizeof(GPUExpEvaluator)); /* Clone ExpEvaluator from the host into GPUExpEvaluator on the device */ clone_exp_evaluator(_exp_evaluator, dev_exp_evaluator); /* Copy the GPUExpEvaluator into constant memory on the GPU */ hipMemcpyToSymbol(exp_evaluator, (void*)dev_exp_evaluator, sizeof(GPUExpEvaluator), 0, hipMemcpyDeviceToDevice); } /** * @brief Initializes the FSR volumes and dev_materials array on the GPU. * @details This method assigns each FSR a unique, monotonically increasing * ID, sets the Material for each FSR, and assigns a volume based on * the cumulative length of all of the segments inside the FSR. */ void GPUSolver::initializeFSRs() { log_printf(NORMAL, "Initializing FSRs on the GPU..."); /* Delete old FSRs array if it exists */ if (_FSR_volumes != NULL) { hipFree(_FSR_volumes); _FSR_volumes = NULL; } if (_FSR_materials != NULL) { hipFree(_FSR_materials); _FSR_materials = NULL; } Solver::initializeFSRs(); /* Allocate memory for all FSR volumes and dev_materials on the device */ try{ /* Store pointers to arrays of FSR data created on the host by the * the parent class Solver::initializeFSRs() routine */ FP_PRECISION* host_FSR_volumes = _FSR_volumes; int* host_FSR_materials = _FSR_materials; /* Allocate memory on device for FSR volumes and Material indices */ hipMalloc((void**)&_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION)); hipMalloc((void**)&_FSR_materials, _num_FSRs * sizeof(int)); /* Create a temporary FSR to material indices array */ int* FSRs_to_material_indices = new int[_num_FSRs]; /* Populate FSR Material indices array */ for (int i = 0; i < _num_FSRs; i++) FSRs_to_material_indices[i] = _material_IDs_to_indices[_geometry-> findFSRMaterial(i)->getId()]; /* Copy the arrays of FSR data to the device */ hipMemcpy((void*)_FSR_volumes, (void*)host_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION), hipMemcpyHostToDevice); hipMemcpy((void*)_FSR_materials, (void*)FSRs_to_material_indices, _num_FSRs * sizeof(int), hipMemcpyHostToDevice); /* Copy the number of FSRs into constant memory on the GPU */ hipMemcpyToSymbol(num_FSRs, (void*)&_num_FSRs, sizeof(int), 0, hipMemcpyHostToDevice); /* Free the array of FSRs data allocated by the Solver parent class */ free(host_FSR_volumes); free(host_FSR_materials); /* Free the temporary array of FSRs to material indices on the host */ free(FSRs_to_material_indices); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for FSRs on GPU"); } } /** * @brief Allocates all Materials data on the GPU. * @details This method loops over the materials in the host_materials map. * Since CUDA does not support std::map data types on the device, * the materials map must be converted to an array and a map created * that maps a material ID to an indice in the new materials array. In * initializeTracks, this map is used to convert the Material ID * associated with every segment to an index in the materials array. * @param mode the solution type (FORWARD or ADJOINT) */ void GPUSolver::initializeMaterials(solverMode mode) { Solver::initializeMaterials(mode); log_printf(INFO, "Initializing materials on the GPU..."); /* Copy the number of energy groups to constant memory on the GPU */ hipMemcpyToSymbol(num_groups, (void*)&_num_groups, sizeof(int), 0, hipMemcpyHostToDevice); /* Delete old materials array if it exists */ if (_materials != NULL) hipFree(_materials); /* Allocate memory for all dev_materials on the device */ try{ std::map<int, Material*> host_materials=_geometry->getAllMaterials(); std::map<int, Material*>::iterator iter; int material_index = 0; /* Iterate through all Materials and clone them as dev_material structs * on the device */ hipMalloc((void**)&_materials, _num_materials * sizeof(dev_material)); for (iter=host_materials.begin(); iter != host_materials.end(); ++iter) { clone_material(iter->second, &_materials[material_index]); material_index++; } } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for Materials on GPU"); } } /** * @brief Allocates memory for all Tracks on the GPU */ void GPUSolver::initializeTracks() { log_printf(INFO, "Initializing tracks on the GPU..."); /* Delete old Tracks array if it exists */ if (_dev_tracks != NULL) hipFree(_dev_tracks); /* Allocate memory for all Tracks and Track offset indices on the device */ try{ /* Allocate array of dev_tracks */ hipMalloc((void**)&_dev_tracks, _tot_num_tracks * sizeof(dev_track)); /* Iterate through all Tracks and clone them as dev_tracks on the device */ int index; for (int i=0; i < _tot_num_tracks; i++) { clone_track(_tracks[i], &_dev_tracks[i], _material_IDs_to_indices); /* Get indices to next tracks along "forward" and "reverse" directions */ index = _tracks[i]->getTrackIn()->getUid(); hipMemcpy((void*)&_dev_tracks[i]._track_in, (void*)&index, sizeof(int), hipMemcpyHostToDevice); index = _tracks[i]->getTrackOut()->getUid(); hipMemcpy((void*)&_dev_tracks[i]._track_out, (void*)&index, sizeof(int), hipMemcpyHostToDevice); } /* Copy the total number of Tracks into constant memory on GPU */ hipMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks, sizeof(int), 0, hipMemcpyHostToDevice); /* Copy the number of azimuthal angles into constant memory on GPU */ hipMemcpyToSymbol(num_azim, (void*)&_num_azim, sizeof(int), 0, hipMemcpyHostToDevice); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for Tracks on GPU"); } } /** * @brief Allocates memory for Track boundary angular and FSR scalar fluxes. * @details Deletes memory for old flux vectors if they were allocated for a * previous simulation. */ void GPUSolver::initializeFluxArrays() { log_printf(INFO, "Initializing flux vectors on the GPU..."); /* Clear Thrust vectors' memory if previously allocated */ _boundary_flux.clear(); _scalar_flux.clear(); _old_scalar_flux.clear(); /* Allocate memory for all flux arrays on the device */ try{ int size = 2 * _tot_num_tracks * _polar_times_groups; _boundary_flux.resize(size); size = _num_FSRs * _num_groups; _scalar_flux.resize(size); _old_scalar_flux.resize(size); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for fluxes on GPU"); } } /** * @brief Allocates memory for FSR source vectors on the GPU. * @details Deletes memory for old source vectors if they were allocated * for a previous simulation. */ void GPUSolver::initializeSourceArrays() { log_printf(INFO, "Initializing source vectors on the GPU..."); /* Clear Thrust vectors' memory if previously allocated */ _reduced_sources.clear(); _fixed_sources.clear(); int size = _num_FSRs * _num_groups; /* Allocate memory for all source arrays on the device */ try{ _reduced_sources.resize(size); _fixed_sources.resize(size); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for sources on GPU"); } /* Initialize fixed sources to zero */ thrust::fill(_fixed_sources.begin(), _fixed_sources.end(), 0.0); /* Fill fixed sources with those assigned by Cell, Material or FSR */ initializeFixedSources(); } /** * @brief Populates array of fixed sources assigned by FSR. */ void GPUSolver::initializeFixedSources() { Solver::initializeFixedSources(); int fsr_id, group; std::pair<int, int> fsr_group_key; std::map< std::pair<int, int>, FP_PRECISION >::iterator fsr_iter; /* Populate fixed source array with any user-defined sources */ for (fsr_iter = _fix_src_FSR_map.begin(); fsr_iter != _fix_src_FSR_map.end(); ++fsr_iter) { /* Get the FSR with an assigned fixed source */ fsr_group_key = fsr_iter->first; fsr_id = fsr_group_key.first; group = fsr_group_key.second; if (group <= 0 || group > _num_groups) log_printf(ERROR,"Unable to use fixed source for group %d in " "a %d energy group problem", group, _num_groups); if (fsr_id < 0 || fsr_id >= _num_FSRs) log_printf(ERROR,"Unable to use fixed source for FSR %d with only " "%d FSRs in the geometry", fsr_id, _num_FSRs); _fixed_sources(fsr_id, group-1) = _fix_src_FSR_map[fsr_group_key]; } } /** * @brief Zero each Track's boundary fluxes for each energy group and polar * angle in the "forward" and "reverse" directions. */ void GPUSolver::zeroTrackFluxes() { thrust::fill(_boundary_flux.begin(), _boundary_flux.end(), 0.0); } /** * @brief Set the scalar flux for each FSR and energy group to some value. * @param value the value to assign to each FSR scalar flux */ void GPUSolver::flattenFSRFluxes(FP_PRECISION value) { thrust::fill(_scalar_flux.begin(), _scalar_flux.end(), value); } /** * @brief Stores the FSR scalar fluxes in the old scalar flux array. */ void GPUSolver::storeFSRFluxes() { thrust::copy(_scalar_flux.begin(), _scalar_flux.end(), _old_scalar_flux.begin()); } /** * @brief Normalizes all FSR scalar fluxes and Track boundary angular * fluxes to the total fission source (times \f$ \nu \f$). */ void GPUSolver::normalizeFluxes() { /** Create Thrust vector of fission sources in each FSR */ thrust::device_vector<FP_PRECISION> fission_sources_vec; fission_sources_vec.resize(_B * _T); FP_PRECISION* fission_sources = thrust::raw_pointer_cast(&fission_sources_vec[0]); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); int shared_mem = sizeof(FP_PRECISION) * _T; hipLaunchKernelGGL(( computeFissionSourcesOnDevice), dim3(_B), dim3(_T), shared_mem, 0, _FSR_volumes, _FSR_materials, _materials, scalar_flux, fission_sources); FP_PRECISION norm_factor = 1.0 / thrust::reduce(fission_sources_vec.begin(), fission_sources_vec.end()); /* Multiply all scalar and angular fluxes by the normalization constant */ thrust::transform(_scalar_flux.begin(), _scalar_flux.end(), thrust::constant_iterator<FP_PRECISION>(norm_factor), _scalar_flux.begin(), thrust::multiplies<FP_PRECISION>()); thrust::transform(_old_scalar_flux.begin(), _old_scalar_flux.end(), thrust::constant_iterator<FP_PRECISION>(norm_factor), _old_scalar_flux.begin(), thrust::multiplies<FP_PRECISION>()); thrust::transform(_boundary_flux.begin(), _boundary_flux.end(), thrust::constant_iterator<FP_PRECISION>(norm_factor), _boundary_flux.begin(), thrust::multiplies<FP_PRECISION>()); /* Clear Thrust vector of FSR fission sources */ fission_sources_vec.clear(); } /** * @brief Computes the total source (fission, scattering, fixed) in each FSR. * @details This method computes the total source in each FSR based on * this iteration's current approximation to the scalar flux. */ void GPUSolver::computeFSRSources() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* fixed_sources = thrust::raw_pointer_cast(&_fixed_sources[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); hipLaunchKernelGGL(( computeFSRSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, scalar_flux, fixed_sources, reduced_sources, 1.0 / _k_eff); } /** * @brief Computes the fission source in each FSR. * @details This method computes the fission source in each FSR based on * this iteration's current approximation to the scalar flux. */ void GPUSolver::computeFSRFissionSources() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); hipLaunchKernelGGL(( computeFSRFissionSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, true, scalar_flux, reduced_sources); } /** * @brief Computes the scatter source in each FSR. * @details This method computes the scatter source in each FSR based on * this iteration's current approximation to the scalar flux. */ void GPUSolver::computeFSRScatterSources() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); hipLaunchKernelGGL(( computeFSRScatterSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, true, scalar_flux, reduced_sources); } /** * @brief This method performs one transport sweep of all azimuthal angles, * Tracks, Track segments, polar angles and energy groups. * @details The method integrates the flux along each Track and updates the * boundary fluxes for the corresponding output Track, while updating * the scalar flux in each flat source region. */ void GPUSolver::transportSweep() { int shared_mem = _T * _two_times_num_polar * sizeof(FP_PRECISION); int tid_offset = 0; int tid_max = 0; log_printf(DEBUG, "Transport sweep on device with %d blocks and %d threads", _B, _T); /* Get device pointer to the Thrust vectors */ FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* boundary_flux = thrust::raw_pointer_cast(&_boundary_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); /* Initialize flux in each FSR to zero */ flattenFSRFluxes(0.0); /* Loop over the parallel track groups and perform transport sweep on tracks * in that group */ for (int g=0; g < _num_parallel_track_groups; g++) { tid_offset = tid_max * _num_groups; tid_max += _track_generator->getNumTracksByParallelGroup(g); hipLaunchKernelGGL(( transportSweepOnDevice), dim3(_B), dim3(_T), shared_mem, 0, scalar_flux, boundary_flux, reduced_sources, _materials, _dev_tracks, tid_offset, tid_max); hipDeviceSynchronize(); } } /** * @brief Add the source term contribution in the transport equation to * the FSR scalar flux. */ void GPUSolver::addSourceToScalarFlux() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); hipLaunchKernelGGL(( addSourceToScalarFluxOnDevice), dim3(_B),dim3(_T), 0, 0, scalar_flux, reduced_sources, _FSR_volumes, _FSR_materials, _materials); } /** * @brief Compute \f$ k_{eff} \f$ from successive fission sources. * @details This method computes the current approximation to the * multiplication factor on this iteration as follows: * \f$ k_{eff} = \frac{\displaystyle\sum_{i \in I} * \displaystyle\sum_{g \in G} \nu \Sigma^F_g \Phi V_{i}} * {\displaystyle\sum_{i \in I} * \displaystyle\sum_{g \in G} (\Sigma^T_g \Phi V_{i} - * \Sigma^S_g \Phi V_{i} - L_{i,g})} \f$ */ void GPUSolver::computeKeff() { FP_PRECISION fission; thrust::device_vector<FP_PRECISION> fission_vec; fission_vec.resize(_B * _T); FP_PRECISION* fiss_ptr = thrust::raw_pointer_cast(&fission_vec[0]); FP_PRECISION* flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Compute the total, fission and scattering reaction rates on device. * This kernel stores partial rates in a Thrust vector with as many * entries as CUDAthreads executed by the kernel */ hipLaunchKernelGGL(( computeFSRFissionRatesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_volumes, _FSR_materials, _materials, flux, fiss_ptr); /* Compute the total fission source */ fission = thrust::reduce(fission_vec.begin(), fission_vec.end()); _k_eff *= fission; fission_vec.clear(); } /** * @brief Computes the residual between source/flux iterations. * @param res_type the type of residuals to compute * (SCALAR_FLUX, FISSION_SOURCE, TOTAL_SOURCE) * @return the average residual in each flat source region */ double GPUSolver::computeResidual(residualType res_type) { int norm; double residual; isinf_test inf_test; isnan_test nan_test; /* Allocate Thrust vector for residuals in each FSR */ thrust::device_vector<double> residuals(_num_FSRs); if (res_type == SCALAR_FLUX) { norm = _num_FSRs; /* Allocate Thrust vector for residuals */ thrust::device_vector<FP_PRECISION> fp_residuals(_num_FSRs * _num_groups); thrust::device_vector<FP_PRECISION> FSR_fp_residuals(_num_FSRs); /* Compute the relative flux change in each FSR and group */ thrust::transform(_scalar_flux.begin(), _scalar_flux.end(), _old_scalar_flux.begin(), fp_residuals.begin(), thrust::minus<FP_PRECISION>()); thrust::transform(fp_residuals.begin(), fp_residuals.end(), _old_scalar_flux.begin(), fp_residuals.begin(), thrust::divides<FP_PRECISION>()); /* Replace INF and NaN values (from divide by zero) with 0. */ thrust::replace_if(fp_residuals.begin(), fp_residuals.end(), inf_test, 0); thrust::replace_if(fp_residuals.begin(), fp_residuals.end(), nan_test, 0); /* Square the residuals */ thrust::transform(fp_residuals.begin(), fp_residuals.end(), fp_residuals.begin(), fp_residuals.begin(), thrust::multiplies<FP_PRECISION>()); typedef thrust::device_vector<FP_PRECISION>::iterator Iterator; /* Reduce flux residuals across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> strider(fp_residuals.begin() + e, fp_residuals.end(), _num_groups); thrust::transform(FSR_fp_residuals.begin(), FSR_fp_residuals.end(), strider.begin(), FSR_fp_residuals.begin(), thrust::plus<FP_PRECISION>()); } /* Copy the FP_PRECISION residual to the double precision residual */ thrust::copy(FSR_fp_residuals.begin(), FSR_fp_residuals.end(), residuals.begin()); /* Sum up the residuals */ residual = thrust::reduce(residuals.begin(), residuals.end()); /* Deallocate memory for Thrust vectors */ fp_residuals.clear(); FSR_fp_residuals.clear(); residuals.clear(); /* Normalize the residual */ residual = sqrt(residual / norm); return residual; } else if (res_type == FISSION_SOURCE) { if (_num_fissionable_FSRs == 0) log_printf(ERROR, "The Solver is unable to compute a " "FISSION_SOURCE residual without fissionable FSRs"); norm = _num_fissionable_FSRs; /* Allocate Thrust vectors for fission sources in each FSR, group */ thrust::device_vector<FP_PRECISION> new_fission_sources_vec(_num_FSRs * _num_groups); thrust::device_vector<FP_PRECISION> old_fission_sources_vec(_num_FSRs * _num_groups); /* Allocate Thrust vectors for energy-integrated fission sources in each FSR */ thrust::device_vector<FP_PRECISION> FSR_old_fiss_src(_num_FSRs); thrust::device_vector<FP_PRECISION> FSR_new_fiss_src(_num_FSRs); /* Cast Thrust vectors as array pointers */ FP_PRECISION* old_fission_sources = thrust::raw_pointer_cast(&old_fission_sources_vec[0]); FP_PRECISION* new_fission_sources = thrust::raw_pointer_cast(&new_fission_sources_vec[0]); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* old_scalar_flux = thrust::raw_pointer_cast(&_old_scalar_flux[0]); /* Compute the old and new nu-fission sources in each FSR, group */ hipLaunchKernelGGL(( computeFSRFissionSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, false, old_scalar_flux, old_fission_sources); hipLaunchKernelGGL(( computeFSRFissionSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, false, scalar_flux, new_fission_sources); typedef thrust::device_vector<FP_PRECISION>::iterator Iterator; /* Reduce nu-fission sources across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> old_strider(old_fission_sources_vec.begin() + e, old_fission_sources_vec.end(), _num_groups); strided_range<Iterator> new_strider(new_fission_sources_vec.begin() + e, new_fission_sources_vec.end(), _num_groups); thrust::transform(FSR_old_fiss_src.begin(), FSR_old_fiss_src.end(), old_strider.begin(), FSR_old_fiss_src.begin(), thrust::plus<FP_PRECISION>()); thrust::transform(FSR_new_fiss_src.begin(), FSR_new_fiss_src.end(), new_strider.begin(), FSR_new_fiss_src.begin(), thrust::plus<FP_PRECISION>()); } /* Compute the relative nu-fission source change in each FSR */ thrust::transform(FSR_new_fiss_src.begin(), FSR_new_fiss_src.end(), FSR_old_fiss_src.begin(), residuals.begin(), thrust::minus<FP_PRECISION>()); thrust::transform(residuals.begin(), residuals.end(), FSR_old_fiss_src.begin(), residuals.begin(), thrust::divides<FP_PRECISION>()); /* Deallocate memory for Thrust vectors */ old_fission_sources_vec.clear(); new_fission_sources_vec.clear(); FSR_old_fiss_src.clear(); FSR_new_fiss_src.clear(); } else if (res_type == TOTAL_SOURCE) { norm = _num_FSRs; /* Allocate Thrust vectors for fission/scatter sources in each FSR, group */ thrust::device_vector<FP_PRECISION> new_sources_vec(_num_FSRs * _num_groups); thrust::device_vector<FP_PRECISION> old_sources_vec(_num_FSRs * _num_groups); thrust::fill(new_sources_vec.begin(), new_sources_vec.end(), 0.0); thrust::fill(old_sources_vec.begin(), old_sources_vec.end(), 0.0); /* Allocate Thrust vectors for energy-integrated fission/scatter sources in each FSR */ thrust::device_vector<FP_PRECISION> FSR_old_src(_num_FSRs); thrust::device_vector<FP_PRECISION> FSR_new_src(_num_FSRs); thrust::fill(FSR_old_src.begin(), FSR_old_src.end(), 0.); thrust::fill(FSR_new_src.begin(), FSR_new_src.end(), 0.); /* Cast Thrust vectors as array pointers */ FP_PRECISION* old_sources = thrust::raw_pointer_cast(&old_sources_vec[0]); FP_PRECISION* new_sources = thrust::raw_pointer_cast(&new_sources_vec[0]); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* old_scalar_flux = thrust::raw_pointer_cast(&_old_scalar_flux[0]); /* Compute nu-fission source */ /* Compute the old and new nu-fission sources in each FSR, group */ hipLaunchKernelGGL(( computeFSRFissionSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, false, old_scalar_flux, old_sources); hipLaunchKernelGGL(( computeFSRFissionSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, false, scalar_flux, new_sources); typedef thrust::device_vector<FP_PRECISION>::iterator Iterator; /* Reduce nu-fission sources across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> old_strider(old_sources_vec.begin() + e, old_sources_vec.end(), _num_groups); strided_range<Iterator> new_strider(new_sources_vec.begin() + e, new_sources_vec.end(), _num_groups); thrust::transform(FSR_old_src.begin(), FSR_old_src.end(), old_strider.begin(), FSR_old_src.begin(), thrust::plus<FP_PRECISION>()); thrust::transform(FSR_new_src.begin(), FSR_new_src.end(), new_strider.begin(), FSR_new_src.begin(), thrust::plus<FP_PRECISION>()); } /* Multiply fission sources by inverse keff */ thrust::for_each(FSR_new_src.begin(), FSR_new_src.end(), multiplyByConstant<FP_PRECISION>(1. / _k_eff)); thrust::for_each(FSR_old_src.begin(), FSR_old_src.end(), multiplyByConstant<FP_PRECISION>(1. / _k_eff)); /* Compute scatter source */ /* Reset sources Thrust vectors to zero */ thrust::fill(new_sources_vec.begin(), new_sources_vec.end(), 0.0); thrust::fill(old_sources_vec.begin(), old_sources_vec.end(), 0.0); /* Compute the old and new scattering sources in each FSR, group */ hipLaunchKernelGGL(( computeFSRScatterSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, false, old_scalar_flux, old_sources); hipLaunchKernelGGL(( computeFSRScatterSourcesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_materials, _materials, false, scalar_flux, new_sources); /* Reduce scatter sources across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> old_strider(old_sources_vec.begin() + e, old_sources_vec.end(), _num_groups); strided_range<Iterator> new_strider(new_sources_vec.begin() + e, new_sources_vec.end(), _num_groups); thrust::transform(FSR_old_src.begin(), FSR_old_src.end(), old_strider.begin(), FSR_old_src.begin(), thrust::plus<FP_PRECISION>()); thrust::transform(FSR_new_src.begin(), FSR_new_src.end(), new_strider.begin(), FSR_new_src.begin(), thrust::plus<FP_PRECISION>()); } /* Compute the relative total source change in each FSR */ thrust::transform(FSR_new_src.begin(), FSR_new_src.end(), FSR_old_src.begin(), residuals.begin(), thrust::minus<FP_PRECISION>()); thrust::transform(residuals.begin(), residuals.end(), FSR_old_src.begin(), residuals.begin(), thrust::divides<FP_PRECISION>()); /* Deallocate memory for Thrust vectors */ old_sources_vec.clear(); new_sources_vec.clear(); FSR_old_src.clear(); FSR_new_src.clear(); } /* Replace INF and NaN values (from divide by zero) with 0. */ thrust::replace_if(residuals.begin(), residuals.end(), inf_test, 0); thrust::replace_if(residuals.begin(), residuals.end(), nan_test, 0); /* Square the residuals */ thrust::transform(residuals.begin(), residuals.end(), residuals.begin(), residuals.begin(), thrust::multiplies<double>()); /* Sum up the residuals */ residual = thrust::reduce(residuals.begin(), residuals.end()); /* Deallocate memory for residuals vector */ residuals.clear(); /* Normalize the residual */ residual = sqrt(residual / norm); return residual; } /** * @brief Computes the volume-averaged, energy-integrated nu-fission rate in * each FSR and stores them in an array indexed by FSR ID. * @details This is a helper method for SWIG to allow users to retrieve * FSR nu-fission rates as a NumPy array. An example of how this method * can be called from Python is as follows: * * @code * num_FSRs = geometry.getNumFSRs() * fission_rates = solver.computeFSRFissionRates(num_FSRs) * @endcode * * @param fission_rates an array to store the nu-fission rates (implicitly * passed in as a NumPy array from Python) * @param num_FSRs the number of FSRs passed in from Python */ void GPUSolver::computeFSRFissionRates(double* fission_rates, int num_FSRs) { log_printf(INFO, "Computing FSR fission rates..."); /* Allocate memory for the FSR nu-fission rates on the device and host */ FP_PRECISION* dev_fission_rates; hipMalloc((void**)&dev_fission_rates, _num_FSRs * sizeof(FP_PRECISION)); FP_PRECISION* host_fission_rates = new FP_PRECISION[_num_FSRs]; FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Compute the FSR nu-fission rates on the device */ hipLaunchKernelGGL(( computeFSRFissionRatesOnDevice), dim3(_B), dim3(_T), 0, 0, _FSR_volumes, _FSR_materials, _materials, scalar_flux, dev_fission_rates); /* Copy the nu-fission rate array from the device to the host */ hipMemcpy((void*)host_fission_rates, (void*)dev_fission_rates, _num_FSRs * sizeof(FP_PRECISION), hipMemcpyDeviceToHost); /* Populate the double precision NumPy array for the output */ for (int i=0; i < _num_FSRs; i++) fission_rates[i] = host_fission_rates[i]; /* Deallocate the memory assigned to store the fission rates on the device */ hipFree(dev_fission_rates); delete [] host_fission_rates; }
7aea424e29ccd2b12c5c4dfab1da351b02dd797a.cu
#include "GPUSolver.h" /** The number of azimuthal angles */ __constant__ int num_azim[1]; /** The number of energy groups */ __constant__ int num_groups[1]; /** The number of FSRs */ __constant__ int num_FSRs[1]; /** The number of polar angles */ __constant__ int num_polar[1]; /** Twice the number of polar angles */ __constant__ int two_times_num_polar[1]; /** The number of polar angles times energy groups */ __constant__ int polar_times_groups[1]; /** An array for the sines of the polar angle in the polar Quadrature set */ __constant__ FP_PRECISION sin_thetas[MAX_POLAR_ANGLES_GPU]; /** An array of the weights for the polar angles from the Quadrature set */ __constant__ FP_PRECISION polar_weights[MAX_POLAR_ANGLES_GPU*MAX_AZIM_ANGLES_GPU]; /** The total number of Tracks */ __constant__ int tot_num_tracks[1]; /** An GPUExpEvaluator object to compute exponentials */ __constant__ GPUExpEvaluator exp_evaluator; /** * @brief A struct used to check if a value on the GPU is equal to INF. * @details This is used as a predicate in Thrust routines. */ struct isinf_test { /** * @brief Checks if a double precision value is INF. * @param a the value to check * @return true if equal to INF, false otherwise */ __host__ __device__ bool operator()(double a) { return isinf(a); } /** * @brief Checks if a single precision value is INF. * @param a the value to check * @return true if equal to INF, false otherwise */ __host__ __device__ bool operator()(float a) { return isinf(a); } }; /** * @brief A struct used to check if a value on the GPU is equal to NaN. * @details This is used as a predicate in Thrust routines. */ struct isnan_test { /** * @brief Checks if a double precision value is NaN. * @param a the value to check * @return true if equal to NaN, false otherwise */ __host__ __device__ bool operator()(double a) { return isnan(a); } /** * @brief Checks if a single precision value is NaN. * @param a the value to check * @return true if equal to NaN, false otherwise */ __host__ __device__ bool operator()(float a) { return isnan(a); } }; /** * @brief A functor to multiply all elements in a Thrust vector by a constant. * @param constant the constant to multiply the vector */ template< typename T > struct multiplyByConstant { public: /* The constant to multiply by */ const T constant; /** * @brief Constructor for the functor. * @param constant to multiply each element in a Thrust vector */ multiplyByConstant(T constant) : constant(constant) {} /** * @brief Multiply an element in a Thrust vector. * @param VecElem the element to multiply */ __host__ __device__ void operator()(T& VecElem) const { VecElem = VecElem * constant; } }; /** * @class This provides a templated interface for a strided iterator over * a Thrust device_vector on a GPU. * @details This code is taken from the Thrust examples site on 1/20/2015: * https://github.com/thrust/thrust/blob/master/examples/strided_range.cu */ template <typename Iterator> class strided_range { public: typedef typename thrust::iterator_difference<Iterator>::type difference_type; struct stride_functor : public thrust::unary_function<difference_type,difference_type> { difference_type stride; stride_functor(difference_type stride) : stride(stride) { } __host__ __device__ difference_type operator()(const difference_type& i) const { return stride * i; } }; typedef typename thrust::counting_iterator<difference_type> CountingIterator; typedef typename thrust::transform_iterator<stride_functor, CountingIterator> TransformIterator; typedef typename thrust::permutation_iterator<Iterator,TransformIterator> PermutationIterator; typedef PermutationIterator iterator; /** * @brief The strided iterator constructor. */ strided_range(Iterator first, Iterator last, difference_type stride) : first(first), last(last), stride(stride) { } /** * @brief Get the first element in the iterator. * @return the first element in the iterator */ iterator begin(void) const { return PermutationIterator(first, TransformIterator(CountingIterator(0), stride_functor(stride))); } /** * @brief Get the last element in the iterator. * @return the last element in the iterator */ iterator end(void) const { return begin() + ((last - first) + (stride - 1)) / stride; } protected: /** The first element in the underlying device_vector as set by the constructor */ Iterator first; /** The last element in the underlying device_vector as set by the constructor */ Iterator last; /** The stride to use when iterating over the underlying device_vector */ difference_type stride; }; /** * @brief Compute the total fission source from all FSRs. * @param FSR_volumes an array of FSR volumes * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_materials on the device * @param scalar_flux the scalar flux in each FSR and energy group * @param fission_sources array of fission sources in each FSR and energy group */ __global__ void computeFissionSourcesOnDevice(FP_PRECISION* FSR_volumes, int* FSR_materials, dev_material* materials, FP_PRECISION* scalar_flux, FP_PRECISION* fission_sources) { /* Use a shared memory buffer for each thread's fission source */ extern __shared__ FP_PRECISION shared_fission_source[]; int tid = threadIdx.x + blockIdx.x * blockDim.x; dev_material* curr_material; FP_PRECISION* nu_sigma_f; FP_PRECISION volume, source; /* Initialize fission source to zero */ shared_fission_source[threadIdx.x] = 0; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; nu_sigma_f = curr_material->_nu_sigma_f; volume = FSR_volumes[tid]; /* Iterate over energy groups and update fission source for * this thread block */ for (int e=0; e < *num_groups; e++) { source = nu_sigma_f[e] * scalar_flux(tid,e) * volume; shared_fission_source[threadIdx.x] += source; } /* Increment thread id */ tid += blockDim.x * gridDim.x; } /* Copy this thread's fission source to global memory */ tid = threadIdx.x + blockIdx.x * blockDim.x; fission_sources[tid] = shared_fission_source[threadIdx.x]; } /** * @brief Computes the total source (fission, scattering, fixed) in each FSR. * @details This method computes the total source in each region based on * this iteration's current approximation to the scalar flux. * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_material pointers * @param scalar_flux an array of FSR scalar fluxes * @param fixed_sources an array of fixed (user-defined) sources * @param reduced_sources an array of FSR sources / total xs * @param inverse_k_eff the inverse of keff */ __global__ void computeFSRSourcesOnDevice(int* FSR_materials, dev_material* materials, FP_PRECISION* scalar_flux, FP_PRECISION* fixed_sources, FP_PRECISION* reduced_sources, FP_PRECISION inverse_k_eff) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION fission_source; FP_PRECISION scatter_source; dev_material* curr_material; FP_PRECISION* sigma_t; FP_PRECISION* sigma_s; FP_PRECISION* fiss_mat; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; sigma_t = curr_material->_sigma_t; sigma_s = curr_material->_sigma_s; fiss_mat = curr_material->_fiss_matrix; /* Compute scatter + fission source for group g */ for (int g=0; g < *num_groups; g++) { scatter_source = 0; fission_source = 0; for (int g_prime=0; g_prime < *num_groups; g_prime++) { scatter_source += sigma_s[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); fission_source += fiss_mat[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); } fission_source *= inverse_k_eff; /* Compute total (scatter+fission+fixed) reduced source */ reduced_sources(tid,g) = fixed_sources(tid,g); reduced_sources(tid,g) += scatter_source + fission_source; reduced_sources(tid,g) *= ONE_OVER_FOUR_PI; reduced_sources(tid,g) = __fdividef(reduced_sources(tid,g), sigma_t[g]); } /* Increment the thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Computes the total fission source in each FSR in each energy group * @details This method is a helper routine for the openmoc.krylov submodule. * This routine computes the total fission source in each FSR. If the * divide_sigma_t parameter is true then the fission source will be * divided by the total cross-section in each FSR. * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_material pointers * @param divide_sigma_t a boolean indicating whether to divide by the total xs * @param scalar_flux an array of FSR scalar fluxes * @param reduced_sources an array of FSR fission sources */ __global__ void computeFSRFissionSourcesOnDevice(int* FSR_materials, dev_material* materials, bool divide_sigma_t, FP_PRECISION* scalar_flux, FP_PRECISION* reduced_sources) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION fission_source; dev_material* curr_material; FP_PRECISION* sigma_t; FP_PRECISION* fiss_mat; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; sigma_t = curr_material->_sigma_t; fiss_mat = curr_material->_fiss_matrix; /* Compute fission source for group g */ for (int g=0; g < *num_groups; g++) { fission_source = 0; for (int g_prime=0; g_prime < *num_groups; g_prime++) fission_source += fiss_mat[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); /* Set the reduced fission source for FSR tid in group g */ reduced_sources(tid,g) = fission_source; reduced_sources(tid,g) *= ONE_OVER_FOUR_PI; if (divide_sigma_t) reduced_sources(tid,g) = __fdividef(reduced_sources(tid,g), sigma_t[g]); } /* Increment the thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Computes the total scattering source in each FSR and energy group. * @details This method is a helper routine for the openmoc.krylov submodule. * This routine computes the total scatter source in each FSR. If the * divide_sigma_t parameter is true then the scatter source will be * divided by the total cross-section in each FSR. * @param FSR_materials an array of FSR Material indices * @param materials an array of dev_material pointers * @param divide_sigma_t a boolean indicating whether to divide by the total xs * @param scalar_flux an array of FSR scalar fluxes * @param reduced_sources an array of FSR scatter sources */ __global__ void computeFSRScatterSourcesOnDevice(int* FSR_materials, dev_material* materials, bool divide_sigma_t, FP_PRECISION* scalar_flux, FP_PRECISION* reduced_sources) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION scatter_source; dev_material* curr_material; FP_PRECISION* sigma_s; FP_PRECISION* sigma_t; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; sigma_s = curr_material->_sigma_s; sigma_t = curr_material->_sigma_t; /* Compute total scattering source for this FSR in group g */ for (int g=0; g < *num_groups; g++) { scatter_source = 0; for (int g_prime=0; g_prime < *num_groups; g_prime++) scatter_source += sigma_s[g*(*num_groups)+g_prime] * scalar_flux(tid,g_prime); /* Set the reduced scatter source for FSR tid in group g */ reduced_sources(tid,g) = scatter_source; reduced_sources(tid,g) *= ONE_OVER_FOUR_PI; if (divide_sigma_t) reduced_sources(tid,g) = __fdividef(reduced_sources(tid,g), sigma_t[g]); } /* Increment the thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Perform an atomic addition in double precision to an array address. * @details This method is straight out of CUDA C Developers Guide (cc 2013). * @param address the array memory address * @param val the value to add to the array * @return the atomically added array value and input value */ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } /** * @brief Computes the contribution to the FSR scalar flux from a Track * segment in a single energy group. * @details This method integrates the angular flux for a Track segment across * energy groups and polar angles, and tallies it into the FSR scalar * flux, and updates the Track's angular flux. * @param curr_segment a pointer to the Track segment of interest * @param azim_index a pointer to the azimuthal angle index for this segment * @param energy_group the energy group of interest * @param materials the array of dev_material pointers * @param track_flux a pointer to the Track's angular flux * @param reduced_sources the array of FSR sources / total xs * @param polar_weights the array of polar Quadrature weights * @param scalar_flux the array of FSR scalar fluxes */ __device__ void tallyScalarFlux(dev_segment* curr_segment, int azim_index, int energy_group, dev_material* materials, FP_PRECISION* track_flux, FP_PRECISION* reduced_sources, FP_PRECISION* polar_weights, FP_PRECISION* scalar_flux) { int fsr_id = curr_segment->_region_uid; FP_PRECISION length = curr_segment->_length; dev_material* curr_material = &materials[curr_segment->_material_index]; FP_PRECISION* sigma_t = curr_material->_sigma_t; /* The change in angular flux long this Track segment in this FSR */ FP_PRECISION delta_psi; FP_PRECISION exponential; /* Zero the FSR scalar flux contribution from this segment and energy group */ FP_PRECISION fsr_flux = 0.0; /* Loop over polar angles */ for (int p=0; p < *num_polar; p++) { exponential = exp_evaluator.computeExponential(sigma_t[energy_group] * length, p); delta_psi = (track_flux[p] - reduced_sources(fsr_id,energy_group)); delta_psi *= exponential; fsr_flux += delta_psi * polar_weights(azim_index,p); track_flux[p] -= delta_psi; } /* Atomically increment the scalar flux for this FSR */ atomicAdd(&scalar_flux(fsr_id,energy_group), fsr_flux); } /** * @brief Updates the boundary flux for a Track given boundary conditions. * @details For reflective and periodic boundary conditions, the outgoing * boundary flux for the Track is given to the corresponding reflecting * or periodic Track. For vacuum boundary conditions, the outgoing flux * is tallied as leakage. Note: Only one energy group is transferred * by this routine. * @param curr_track a pointer to the Track of interest * @param azim_index a pointer to the azimuthal angle index for this segment * @param track_flux an array of the outgoing Track flux * @param boundary_flux an array of all angular fluxes * @param polar_weights an array of polar Quadrature weights * @param energy_angle_index the energy group index * @param direction the Track direction (forward - true, reverse - false) */ __device__ void transferBoundaryFlux(dev_track* curr_track, int azim_index, FP_PRECISION* track_flux, FP_PRECISION* boundary_flux, FP_PRECISION* polar_weights, int energy_angle_index, bool direction) { int start = energy_angle_index; bool transfer_flux; int track_out_id; /* For the "forward" direction */ if (direction) { transfer_flux = curr_track->_transfer_flux_out; track_out_id = curr_track->_track_out; start += curr_track->_next_out * (*polar_times_groups); } /* For the "reverse" direction */ else { transfer_flux = curr_track->_transfer_flux_in; track_out_id = curr_track->_track_in; start += curr_track->_next_in * (*polar_times_groups); } FP_PRECISION* track_out_flux = &boundary_flux(track_out_id,start); /* Put Track's flux in the shared memory temporary flux array */ for (int p=0; p < *num_polar; p++) track_out_flux[p] = track_flux[p] * transfer_flux; } /** * @brief This method performs one transport sweep of one halfspace of all * azimuthal angles, tracks, segments, polar angles and energy groups. * @details The method integrates the flux along each track and updates the * boundary fluxes for the corresponding output Track, while updating * the scalar flux in each FSR. * @param scalar_flux an array of FSR scalar fluxes * @param boundary_flux an array of Track boundary fluxes * @param reduced_sources an array of FSR sources / total xs * @param materials an array of dev_material pointers * @param tracks an array of Tracks * @param tid_offset the Track offset for azimuthal angle halfspace * @param tid_max the upper bound on the Track IDs for this azimuthal * angle halfspace */ __global__ void transportSweepOnDevice(FP_PRECISION* scalar_flux, FP_PRECISION* boundary_flux, FP_PRECISION* reduced_sources, dev_material* materials, dev_track* tracks, int tid_offset, int tid_max) { /* Shared memory buffer for each thread's angular flux */ extern __shared__ FP_PRECISION temp_flux[]; FP_PRECISION* track_flux; int tid = tid_offset + threadIdx.x + blockIdx.x * blockDim.x; int track_id = tid / *num_groups; int track_flux_index = threadIdx.x * (*two_times_num_polar); int energy_group = tid % (*num_groups); int energy_angle_index = energy_group * (*num_polar); dev_track* curr_track; int azim_index; int num_segments; dev_segment* curr_segment; /* Iterate over Track with azimuthal angles in (0, pi/2) */ while (track_id < tid_max) { /* Initialize local registers with important data */ curr_track = &tracks[track_id]; azim_index = curr_track->_azim_angle_index; num_segments = curr_track->_num_segments; /* Retrieve pointer to thread's shared memory buffer for angular flux */ track_flux = &temp_flux[track_flux_index]; /* Put Track's flux in the shared memory temporary flux array */ for (int p=0; p < *num_polar; p++) { /* Forward flux along this Track */ track_flux[p] = boundary_flux(track_id,p+energy_angle_index); /* Reverse flux along this Track */ track_flux[(*num_polar) + p] = boundary_flux(track_id,p+energy_angle_index+(*polar_times_groups)); } /* Loop over each Track segment in forward direction */ for (int i=0; i < num_segments; i++) { curr_segment = &curr_track->_segments[i]; tallyScalarFlux(curr_segment, azim_index, energy_group, materials, track_flux, reduced_sources, polar_weights, scalar_flux); } /* Transfer boundary angular flux to outgoing Track */ transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux, polar_weights, energy_angle_index, true); /* Loop over each Track segment in reverse direction */ track_flux = &temp_flux[track_flux_index + (*num_polar)]; for (int i=num_segments-1; i > -1; i--) { curr_segment = &curr_track->_segments[i]; tallyScalarFlux(curr_segment, azim_index, energy_group, materials, track_flux, reduced_sources, polar_weights, scalar_flux); } /* Transfer boundary angular flux to outgoing Track */ transferBoundaryFlux(curr_track, azim_index, track_flux, boundary_flux, polar_weights, energy_angle_index, false); /* Update the indices for this thread to the next Track, energy group */ tid += blockDim.x * gridDim.x; track_id = tid / *num_groups; energy_group = tid % (*num_groups); energy_angle_index = energy_group * (*num_polar); } } /** * @brief Add the source term contribution in the transport equation to * the FSR scalar flux on the GPU. * @param scalar_flux an array of FSR scalar fluxes * @param reduced_sources an array of FSR sources / total xs * @param FSR_volumes an array of FSR volumes * @param FSR_materials an array of FSR material indices * @param materials an array of dev_material pointers */ __global__ void addSourceToScalarFluxOnDevice(FP_PRECISION* scalar_flux, FP_PRECISION* reduced_sources, FP_PRECISION* FSR_volumes, int* FSR_materials, dev_material* materials) { int tid = threadIdx.x + blockIdx.x * blockDim.x; FP_PRECISION volume; dev_material* curr_material; FP_PRECISION* sigma_t; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; volume = FSR_volumes[tid]; sigma_t = curr_material->_sigma_t; /* Iterate over all energy groups */ for (int i=0; i < *num_groups; i++) { scalar_flux(tid,i) *= 0.5; scalar_flux(tid,i) = __fdividef(scalar_flux(tid,i), (sigma_t[i] * volume)); scalar_flux(tid,i) += FOUR_PI * reduced_sources(tid,i); } /* Increment thread id */ tid += blockDim.x * gridDim.x; } } /** * @brief Compute the total volume-intergrated fission source from * all FSRs and energy groups. * @param FSR_volumes an array of the FSR volumes * @param FSR_materials an array of the FSR Material indices * @param materials an array of the dev_material pointers * @param scalar_flux an array of FSR scalar fluxes * @param fission an array of FSR nu-fission rates */ __global__ void computeFSRFissionRatesOnDevice(FP_PRECISION* FSR_volumes, int* FSR_materials, dev_material* materials, FP_PRECISION* scalar_flux, FP_PRECISION* fission) { int tid = threadIdx.x + blockIdx.x * blockDim.x; dev_material* curr_material; FP_PRECISION* nu_sigma_f; FP_PRECISION volume; FP_PRECISION fiss = 0.; /* Iterate over all FSRs */ while (tid < *num_FSRs) { curr_material = &materials[FSR_materials[tid]]; nu_sigma_f = curr_material->_nu_sigma_f; volume = FSR_volumes[tid]; FP_PRECISION curr_fiss = 0.; /* Compute nu-fission rates rates for this thread block */ for (int e=0; e < *num_groups; e++) curr_fiss += nu_sigma_f[e] * scalar_flux(tid,e); fiss += curr_fiss * volume; /* Increment thread id */ tid += blockDim.x * gridDim.x; } /* Copy this thread's fission to global memory */ tid = threadIdx.x + blockIdx.x * blockDim.x; fission[tid] = fiss; } /** * @brief Constructor initializes arrays for dev_tracks and dev_materials.. * @details The constructor initalizes the number of CUDA threads and thread * blocks each to a default of 64. * @param track_generator an optional pointer to the TrackjGenerator */ GPUSolver::GPUSolver(TrackGenerator* track_generator) : Solver(track_generator) { /* The default number of thread blocks and threads per thread block */ _B = 64; _T = 64; _materials = NULL; _dev_tracks = NULL; _FSR_materials = NULL; if (track_generator != NULL) setTrackGenerator(track_generator); } /** * @brief Solver destructor frees all memory on the device, including arrays * for the FSR scalar fluxes and sources and Track boundary fluxes. */ GPUSolver::~GPUSolver() { if (_FSR_volumes != NULL) { cudaFree(_FSR_volumes); _FSR_volumes = NULL; } if (_FSR_materials != NULL) { cudaFree(_FSR_materials); _FSR_materials = NULL; } if (_materials != NULL) { cudaFree(_materials); _materials = NULL; } if (_dev_tracks != NULL) { cudaFree(_dev_tracks); _dev_tracks = NULL; } /* Clear Thrust vectors's memory on the device */ _boundary_flux.clear(); _scalar_flux.clear(); _old_scalar_flux.clear(); _fixed_sources.clear(); _reduced_sources.clear(); } /** * @brief Returns the number of thread blocks to execute on the GPU. * @return the number of thread blocks */ int GPUSolver::getNumThreadBlocks() { return _B; } /** * @brief Returns the number of threads per block to execute on the GPU. * @return the number of threads per block */ int GPUSolver::getNumThreadsPerBlock() { return _T; } /** * @brief Returns the source for some energy group for a flat source region * @details This is a helper routine used by the openmoc.process module. * @param fsr_id the ID for the FSR of interest * @param group the energy group of interest * @return the flat source region source */ FP_PRECISION GPUSolver::getFSRSource(int fsr_id, int group) { if (fsr_id >= _num_FSRs) log_printf(ERROR, "Unable to return a source for FSR ID = %d " "since the max FSR ID = %d", fsr_id, _num_FSRs-1); else if (fsr_id < 0) log_printf(ERROR, "Unable to return a source for FSR ID = %d " "since FSRs do not have negative IDs", fsr_id); else if (group-1 >= _num_groups) log_printf(ERROR, "Unable to return a source in group %d " "since there are only %d groups", group, _num_groups); else if (group <= 0) log_printf(ERROR, "Unable to return a source in group %d " "since groups must be greater or equal to 1", group); else if (_scalar_flux.size() == 0) log_printf(ERROR, "Unable to return a source " "since it has not yet been computed"); /* Get host material */ Material* host_material = _geometry->findFSRMaterial(fsr_id); /* Get cross sections and scalar flux */ FP_PRECISION* sigma_s = host_material->getSigmaS(); FP_PRECISION* fiss_mat = host_material->getFissionMatrix(); FP_PRECISION* fsr_scalar_fluxes = new FP_PRECISION[_num_groups]; FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); cudaMemcpy((void*)fsr_scalar_fluxes, (void*)&scalar_flux[fsr_id*_num_groups], _num_groups * sizeof(FP_PRECISION), cudaMemcpyDeviceToHost); FP_PRECISION fission_source = 0.0; FP_PRECISION scatter_source = 0.0; FP_PRECISION total_source; /* Compute total scattering and fission sources for this FSR */ for (int g=0; g < _num_groups; g++) { scatter_source += sigma_s[(group-1)*(_num_groups)+g] * fsr_scalar_fluxes[g]; fission_source += fiss_mat[(group-1)*(_num_groups)+g] * fsr_scalar_fluxes[g]; } fission_source /= _k_eff; /* Compute the total source */ total_source = fission_source + scatter_source; /* Add in fixed source (if specified by user) */ total_source += _fixed_sources(fsr_id,group-1); /* Normalize to solid angle for isotropic approximation */ total_source *= ONE_OVER_FOUR_PI; delete [] fsr_scalar_fluxes; return total_source; } /** * @brief Returns the scalar flux for some FSR and energy group. * @param fsr_id the ID for the FSR of interest * @param group the energy group of interest * @return the FSR scalar flux */ FP_PRECISION GPUSolver::getFlux(int fsr_id, int group) { if (fsr_id >= _num_FSRs) log_printf(ERROR, "Unable to return a scalar flux for FSR ID = %d " "since the max FSR ID = %d", fsr_id, _num_FSRs-1); else if (fsr_id < 0) log_printf(ERROR, "Unable to return a scalar flux for FSR ID = %d " "since FSRs do not have negative IDs", fsr_id); else if (group-1 >= _num_groups) log_printf(ERROR, "Unable to return a scalar flux in group %d " "since there are only %d groups", group, _num_groups); else if (group <= 0) log_printf(ERROR, "Unable to return a scalar flux in group %d " "since groups must be greater or equal to 1", group); if (_scalar_flux.size() == 0) log_printf(ERROR, "Unable to return a scalar flux " "since it has not yet been computed"); return _scalar_flux(fsr_id,group-1); } /** * @brief Fills an array with the scalar fluxes on the GPU. * @details This class method is a helper routine called by the OpenMOC * Python "openmoc.krylov" module for Krylov subspace methods. * Although this method appears to require two arguments, in * reality it only requires one due to SWIG and would be called * from within Python as follows: * * @code * num_fluxes = num_groups * num_FSRs * fluxes = solver.getFluxes(num_fluxes) * @endcode * * @param fluxes an array of FSR scalar fluxes in each energy group * @param num_fluxes the total number of FSR flux values */ void GPUSolver::getFluxes(FP_PRECISION* out_fluxes, int num_fluxes) { if (num_fluxes != _num_groups * _num_FSRs) log_printf(ERROR, "Unable to get FSR scalar fluxes since there are " "%d groups and %d FSRs which does not match the requested " "%d flux values", _num_groups, _num_FSRs, num_fluxes); else if (_scalar_flux.size() == 0) log_printf(ERROR, "Unable to get FSR scalar fluxes since they " "have not yet been allocated on the device"); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Copy the fluxes from the GPU to the input array */ cudaMemcpy((void*)out_fluxes, (void*)scalar_flux, num_fluxes * sizeof(FP_PRECISION), cudaMemcpyDeviceToHost); } /** * @brief Sets the number of thread blocks (>0) for CUDA kernels. * @param num_blocks the number of thread blocks */ void GPUSolver::setNumThreadBlocks(int num_blocks) { if (num_blocks < 0) log_printf(ERROR, "Unable to set the number of CUDA thread blocks " "to %d since it is a negative number", num_blocks); _B = num_blocks; } /** * @brief Sets the number of threads per block (>0) for CUDA kernels. * @param num_threads the number of threads per block */ void GPUSolver::setNumThreadsPerBlock(int num_threads) { if (num_threads < 0) log_printf(ERROR, "Unable to set the number of CUDA threads per block " "to %d since it is a negative number", num_threads); _T = num_threads; } /** * @brief Sets the Geometry for the Solver. * @details This is a private setter method for the Solver and is not * intended to be called by the user. * @param geometry a pointer to a Geometry object */ void GPUSolver::setGeometry(Geometry* geometry) { Solver::setGeometry(geometry); std::map<int, Material*> host_materials=_geometry->getAllMaterials(); std::map<int, Material*>::iterator iter; int material_index = 0; /* Iterate through all Materials and clone them as dev_material structs * on the device */ for (iter=host_materials.begin(); iter != host_materials.end(); ++iter) { _material_IDs_to_indices[iter->second->getId()] = material_index; material_index++; } } /** * @brief Sets the Solver's TrackGenerator with characteristic Tracks. * @details The TrackGenerator must already have generated Tracks and have * used ray tracing to segmentize them across the Geometry. This * should be initated in Python prior to assigning the TrackGenerator * to the Solver: * * @code * track_generator.generateTracks() * solver.setTrackGenerator(track_generator) * @endcode * * @param track_generator a pointer to a TrackGenerator object */ void GPUSolver::setTrackGenerator(TrackGenerator* track_generator) { Solver::setTrackGenerator(track_generator); initializeTracks(); } /** * @brief Set the flux array for use in transport sweep source calculations. * @detail This is a helper method for the checkpoint restart capabilities, * as well as the IRAMSolver in the openmoc.krylov submodule. This * routine may be used as follows from within Python: * * @code * num_FSRs = solver.getGeometry.getNumFSRs() * num_groups = solver.getGeometry.getNumEnergyGroups() * fluxes = numpy.random.rand(num_FSRs * num_groups, dtype=np.float) * solver.setFluxes(fluxes) * @endcode * * NOTE: This routine stores a pointer to the fluxes for the Solver * to use during transport sweeps and other calculations. Hence, the * flux array pointer is shared between NumPy and the Solver. * * @param in_fluxes an array with the fluxes to use * @param num_fluxes the number of flux values (# groups x # FSRs) */ void GPUSolver::setFluxes(FP_PRECISION* in_fluxes, int num_fluxes) { if (num_fluxes != _num_groups * _num_FSRs) log_printf(ERROR, "Unable to set an array with %d flux values for %d " " groups and %d FSRs", num_fluxes, _num_groups, _num_FSRs); /* Allocate array if flux arrays have not yet been initialized */ if (_scalar_flux.size() == 0) initializeFluxArrays(); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Copy the input fluxes onto the GPU */ cudaMemcpy((void*)scalar_flux, (void*)in_fluxes, num_fluxes * sizeof(FP_PRECISION), cudaMemcpyHostToDevice); _user_fluxes = true; } /** * @brief Creates a polar quadrature object for the GPUSolver on the GPU. */ void GPUSolver::initializePolarQuadrature() { log_printf(INFO, "Initializing polar quadrature on the GPU..."); Solver::initializePolarQuadrature(); if (_num_polar > MAX_POLAR_ANGLES_GPU) log_printf(ERROR, "Unable to initialize a polar quadrature with %d " "angles for the GPUSolver which is limited to %d polar " "angles. Update the MAX_POLAR_ANGLES_GPU macro in constants.h " "and recompile.", _num_polar, MAX_POLAR_ANGLES_GPU); /* Copy the number of polar angles to constant memory on the GPU */ cudaMemcpyToSymbol(num_polar, (void*)&_num_polar, sizeof(int), 0, cudaMemcpyHostToDevice); /* Copy twice the number of polar angles to constant memory on the GPU */ _two_times_num_polar = 2 * _num_polar; cudaMemcpyToSymbol(two_times_num_polar, (void*)&_two_times_num_polar, sizeof(int), 0, cudaMemcpyHostToDevice); /* Copy the number of polar angles times energy groups to constant memory * on the GPU */ cudaMemcpyToSymbol(polar_times_groups, (void*)&_polar_times_groups, sizeof(int), 0, cudaMemcpyHostToDevice); /* Copy the polar weights to constant memory on the GPU */ cudaMemcpyToSymbol(polar_weights, (void*)_polar_weights, _num_polar * _num_azim * sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice); /* Copy the sines of the polar angles which is needed if the user * requested the use of the exp intrinsic to evaluate exponentials */ cudaMemcpyToSymbol(sin_thetas, (void*)_polar_quad->getSinThetas(), _num_polar * sizeof(FP_PRECISION), 0, cudaMemcpyHostToDevice); } /** * @brief Initializes new GPUExpEvaluator object to compute exponentials. */ void GPUSolver::initializeExpEvaluator() { Solver::initializeExpEvaluator(); log_printf(INFO, "Initializing the exponential evaluator on the GPU..."); /* Allocate memory for a GPUExpEvaluator on the device */ GPUExpEvaluator* dev_exp_evaluator; cudaMalloc((void**)&dev_exp_evaluator, sizeof(GPUExpEvaluator)); /* Clone ExpEvaluator from the host into GPUExpEvaluator on the device */ clone_exp_evaluator(_exp_evaluator, dev_exp_evaluator); /* Copy the GPUExpEvaluator into constant memory on the GPU */ cudaMemcpyToSymbol(exp_evaluator, (void*)dev_exp_evaluator, sizeof(GPUExpEvaluator), 0, cudaMemcpyDeviceToDevice); } /** * @brief Initializes the FSR volumes and dev_materials array on the GPU. * @details This method assigns each FSR a unique, monotonically increasing * ID, sets the Material for each FSR, and assigns a volume based on * the cumulative length of all of the segments inside the FSR. */ void GPUSolver::initializeFSRs() { log_printf(NORMAL, "Initializing FSRs on the GPU..."); /* Delete old FSRs array if it exists */ if (_FSR_volumes != NULL) { cudaFree(_FSR_volumes); _FSR_volumes = NULL; } if (_FSR_materials != NULL) { cudaFree(_FSR_materials); _FSR_materials = NULL; } Solver::initializeFSRs(); /* Allocate memory for all FSR volumes and dev_materials on the device */ try{ /* Store pointers to arrays of FSR data created on the host by the * the parent class Solver::initializeFSRs() routine */ FP_PRECISION* host_FSR_volumes = _FSR_volumes; int* host_FSR_materials = _FSR_materials; /* Allocate memory on device for FSR volumes and Material indices */ cudaMalloc((void**)&_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION)); cudaMalloc((void**)&_FSR_materials, _num_FSRs * sizeof(int)); /* Create a temporary FSR to material indices array */ int* FSRs_to_material_indices = new int[_num_FSRs]; /* Populate FSR Material indices array */ for (int i = 0; i < _num_FSRs; i++) FSRs_to_material_indices[i] = _material_IDs_to_indices[_geometry-> findFSRMaterial(i)->getId()]; /* Copy the arrays of FSR data to the device */ cudaMemcpy((void*)_FSR_volumes, (void*)host_FSR_volumes, _num_FSRs * sizeof(FP_PRECISION), cudaMemcpyHostToDevice); cudaMemcpy((void*)_FSR_materials, (void*)FSRs_to_material_indices, _num_FSRs * sizeof(int), cudaMemcpyHostToDevice); /* Copy the number of FSRs into constant memory on the GPU */ cudaMemcpyToSymbol(num_FSRs, (void*)&_num_FSRs, sizeof(int), 0, cudaMemcpyHostToDevice); /* Free the array of FSRs data allocated by the Solver parent class */ free(host_FSR_volumes); free(host_FSR_materials); /* Free the temporary array of FSRs to material indices on the host */ free(FSRs_to_material_indices); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for FSRs on GPU"); } } /** * @brief Allocates all Materials data on the GPU. * @details This method loops over the materials in the host_materials map. * Since CUDA does not support std::map data types on the device, * the materials map must be converted to an array and a map created * that maps a material ID to an indice in the new materials array. In * initializeTracks, this map is used to convert the Material ID * associated with every segment to an index in the materials array. * @param mode the solution type (FORWARD or ADJOINT) */ void GPUSolver::initializeMaterials(solverMode mode) { Solver::initializeMaterials(mode); log_printf(INFO, "Initializing materials on the GPU..."); /* Copy the number of energy groups to constant memory on the GPU */ cudaMemcpyToSymbol(num_groups, (void*)&_num_groups, sizeof(int), 0, cudaMemcpyHostToDevice); /* Delete old materials array if it exists */ if (_materials != NULL) cudaFree(_materials); /* Allocate memory for all dev_materials on the device */ try{ std::map<int, Material*> host_materials=_geometry->getAllMaterials(); std::map<int, Material*>::iterator iter; int material_index = 0; /* Iterate through all Materials and clone them as dev_material structs * on the device */ cudaMalloc((void**)&_materials, _num_materials * sizeof(dev_material)); for (iter=host_materials.begin(); iter != host_materials.end(); ++iter) { clone_material(iter->second, &_materials[material_index]); material_index++; } } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for Materials on GPU"); } } /** * @brief Allocates memory for all Tracks on the GPU */ void GPUSolver::initializeTracks() { log_printf(INFO, "Initializing tracks on the GPU..."); /* Delete old Tracks array if it exists */ if (_dev_tracks != NULL) cudaFree(_dev_tracks); /* Allocate memory for all Tracks and Track offset indices on the device */ try{ /* Allocate array of dev_tracks */ cudaMalloc((void**)&_dev_tracks, _tot_num_tracks * sizeof(dev_track)); /* Iterate through all Tracks and clone them as dev_tracks on the device */ int index; for (int i=0; i < _tot_num_tracks; i++) { clone_track(_tracks[i], &_dev_tracks[i], _material_IDs_to_indices); /* Get indices to next tracks along "forward" and "reverse" directions */ index = _tracks[i]->getTrackIn()->getUid(); cudaMemcpy((void*)&_dev_tracks[i]._track_in, (void*)&index, sizeof(int), cudaMemcpyHostToDevice); index = _tracks[i]->getTrackOut()->getUid(); cudaMemcpy((void*)&_dev_tracks[i]._track_out, (void*)&index, sizeof(int), cudaMemcpyHostToDevice); } /* Copy the total number of Tracks into constant memory on GPU */ cudaMemcpyToSymbol(tot_num_tracks, (void*)&_tot_num_tracks, sizeof(int), 0, cudaMemcpyHostToDevice); /* Copy the number of azimuthal angles into constant memory on GPU */ cudaMemcpyToSymbol(num_azim, (void*)&_num_azim, sizeof(int), 0, cudaMemcpyHostToDevice); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for Tracks on GPU"); } } /** * @brief Allocates memory for Track boundary angular and FSR scalar fluxes. * @details Deletes memory for old flux vectors if they were allocated for a * previous simulation. */ void GPUSolver::initializeFluxArrays() { log_printf(INFO, "Initializing flux vectors on the GPU..."); /* Clear Thrust vectors' memory if previously allocated */ _boundary_flux.clear(); _scalar_flux.clear(); _old_scalar_flux.clear(); /* Allocate memory for all flux arrays on the device */ try{ int size = 2 * _tot_num_tracks * _polar_times_groups; _boundary_flux.resize(size); size = _num_FSRs * _num_groups; _scalar_flux.resize(size); _old_scalar_flux.resize(size); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for fluxes on GPU"); } } /** * @brief Allocates memory for FSR source vectors on the GPU. * @details Deletes memory for old source vectors if they were allocated * for a previous simulation. */ void GPUSolver::initializeSourceArrays() { log_printf(INFO, "Initializing source vectors on the GPU..."); /* Clear Thrust vectors' memory if previously allocated */ _reduced_sources.clear(); _fixed_sources.clear(); int size = _num_FSRs * _num_groups; /* Allocate memory for all source arrays on the device */ try{ _reduced_sources.resize(size); _fixed_sources.resize(size); } catch(std::exception &e) { log_printf(ERROR, "Could not allocate memory for sources on GPU"); } /* Initialize fixed sources to zero */ thrust::fill(_fixed_sources.begin(), _fixed_sources.end(), 0.0); /* Fill fixed sources with those assigned by Cell, Material or FSR */ initializeFixedSources(); } /** * @brief Populates array of fixed sources assigned by FSR. */ void GPUSolver::initializeFixedSources() { Solver::initializeFixedSources(); int fsr_id, group; std::pair<int, int> fsr_group_key; std::map< std::pair<int, int>, FP_PRECISION >::iterator fsr_iter; /* Populate fixed source array with any user-defined sources */ for (fsr_iter = _fix_src_FSR_map.begin(); fsr_iter != _fix_src_FSR_map.end(); ++fsr_iter) { /* Get the FSR with an assigned fixed source */ fsr_group_key = fsr_iter->first; fsr_id = fsr_group_key.first; group = fsr_group_key.second; if (group <= 0 || group > _num_groups) log_printf(ERROR,"Unable to use fixed source for group %d in " "a %d energy group problem", group, _num_groups); if (fsr_id < 0 || fsr_id >= _num_FSRs) log_printf(ERROR,"Unable to use fixed source for FSR %d with only " "%d FSRs in the geometry", fsr_id, _num_FSRs); _fixed_sources(fsr_id, group-1) = _fix_src_FSR_map[fsr_group_key]; } } /** * @brief Zero each Track's boundary fluxes for each energy group and polar * angle in the "forward" and "reverse" directions. */ void GPUSolver::zeroTrackFluxes() { thrust::fill(_boundary_flux.begin(), _boundary_flux.end(), 0.0); } /** * @brief Set the scalar flux for each FSR and energy group to some value. * @param value the value to assign to each FSR scalar flux */ void GPUSolver::flattenFSRFluxes(FP_PRECISION value) { thrust::fill(_scalar_flux.begin(), _scalar_flux.end(), value); } /** * @brief Stores the FSR scalar fluxes in the old scalar flux array. */ void GPUSolver::storeFSRFluxes() { thrust::copy(_scalar_flux.begin(), _scalar_flux.end(), _old_scalar_flux.begin()); } /** * @brief Normalizes all FSR scalar fluxes and Track boundary angular * fluxes to the total fission source (times \f$ \nu \f$). */ void GPUSolver::normalizeFluxes() { /** Create Thrust vector of fission sources in each FSR */ thrust::device_vector<FP_PRECISION> fission_sources_vec; fission_sources_vec.resize(_B * _T); FP_PRECISION* fission_sources = thrust::raw_pointer_cast(&fission_sources_vec[0]); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); int shared_mem = sizeof(FP_PRECISION) * _T; computeFissionSourcesOnDevice<<<_B, _T, shared_mem>>>(_FSR_volumes, _FSR_materials, _materials, scalar_flux, fission_sources); FP_PRECISION norm_factor = 1.0 / thrust::reduce(fission_sources_vec.begin(), fission_sources_vec.end()); /* Multiply all scalar and angular fluxes by the normalization constant */ thrust::transform(_scalar_flux.begin(), _scalar_flux.end(), thrust::constant_iterator<FP_PRECISION>(norm_factor), _scalar_flux.begin(), thrust::multiplies<FP_PRECISION>()); thrust::transform(_old_scalar_flux.begin(), _old_scalar_flux.end(), thrust::constant_iterator<FP_PRECISION>(norm_factor), _old_scalar_flux.begin(), thrust::multiplies<FP_PRECISION>()); thrust::transform(_boundary_flux.begin(), _boundary_flux.end(), thrust::constant_iterator<FP_PRECISION>(norm_factor), _boundary_flux.begin(), thrust::multiplies<FP_PRECISION>()); /* Clear Thrust vector of FSR fission sources */ fission_sources_vec.clear(); } /** * @brief Computes the total source (fission, scattering, fixed) in each FSR. * @details This method computes the total source in each FSR based on * this iteration's current approximation to the scalar flux. */ void GPUSolver::computeFSRSources() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* fixed_sources = thrust::raw_pointer_cast(&_fixed_sources[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); computeFSRSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, scalar_flux, fixed_sources, reduced_sources, 1.0 / _k_eff); } /** * @brief Computes the fission source in each FSR. * @details This method computes the fission source in each FSR based on * this iteration's current approximation to the scalar flux. */ void GPUSolver::computeFSRFissionSources() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); computeFSRFissionSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, true, scalar_flux, reduced_sources); } /** * @brief Computes the scatter source in each FSR. * @details This method computes the scatter source in each FSR based on * this iteration's current approximation to the scalar flux. */ void GPUSolver::computeFSRScatterSources() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); computeFSRScatterSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, true, scalar_flux, reduced_sources); } /** * @brief This method performs one transport sweep of all azimuthal angles, * Tracks, Track segments, polar angles and energy groups. * @details The method integrates the flux along each Track and updates the * boundary fluxes for the corresponding output Track, while updating * the scalar flux in each flat source region. */ void GPUSolver::transportSweep() { int shared_mem = _T * _two_times_num_polar * sizeof(FP_PRECISION); int tid_offset = 0; int tid_max = 0; log_printf(DEBUG, "Transport sweep on device with %d blocks and %d threads", _B, _T); /* Get device pointer to the Thrust vectors */ FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* boundary_flux = thrust::raw_pointer_cast(&_boundary_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); /* Initialize flux in each FSR to zero */ flattenFSRFluxes(0.0); /* Loop over the parallel track groups and perform transport sweep on tracks * in that group */ for (int g=0; g < _num_parallel_track_groups; g++) { tid_offset = tid_max * _num_groups; tid_max += _track_generator->getNumTracksByParallelGroup(g); transportSweepOnDevice<<<_B, _T, shared_mem>>>(scalar_flux, boundary_flux, reduced_sources, _materials, _dev_tracks, tid_offset, tid_max); cudaDeviceSynchronize(); } } /** * @brief Add the source term contribution in the transport equation to * the FSR scalar flux. */ void GPUSolver::addSourceToScalarFlux() { FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* reduced_sources = thrust::raw_pointer_cast(&_reduced_sources[0]); addSourceToScalarFluxOnDevice<<<_B,_T>>>(scalar_flux, reduced_sources, _FSR_volumes, _FSR_materials, _materials); } /** * @brief Compute \f$ k_{eff} \f$ from successive fission sources. * @details This method computes the current approximation to the * multiplication factor on this iteration as follows: * \f$ k_{eff} = \frac{\displaystyle\sum_{i \in I} * \displaystyle\sum_{g \in G} \nu \Sigma^F_g \Phi V_{i}} * {\displaystyle\sum_{i \in I} * \displaystyle\sum_{g \in G} (\Sigma^T_g \Phi V_{i} - * \Sigma^S_g \Phi V_{i} - L_{i,g})} \f$ */ void GPUSolver::computeKeff() { FP_PRECISION fission; thrust::device_vector<FP_PRECISION> fission_vec; fission_vec.resize(_B * _T); FP_PRECISION* fiss_ptr = thrust::raw_pointer_cast(&fission_vec[0]); FP_PRECISION* flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Compute the total, fission and scattering reaction rates on device. * This kernel stores partial rates in a Thrust vector with as many * entries as CUDAthreads executed by the kernel */ computeFSRFissionRatesOnDevice<<<_B, _T>>>(_FSR_volumes, _FSR_materials, _materials, flux, fiss_ptr); /* Compute the total fission source */ fission = thrust::reduce(fission_vec.begin(), fission_vec.end()); _k_eff *= fission; fission_vec.clear(); } /** * @brief Computes the residual between source/flux iterations. * @param res_type the type of residuals to compute * (SCALAR_FLUX, FISSION_SOURCE, TOTAL_SOURCE) * @return the average residual in each flat source region */ double GPUSolver::computeResidual(residualType res_type) { int norm; double residual; isinf_test inf_test; isnan_test nan_test; /* Allocate Thrust vector for residuals in each FSR */ thrust::device_vector<double> residuals(_num_FSRs); if (res_type == SCALAR_FLUX) { norm = _num_FSRs; /* Allocate Thrust vector for residuals */ thrust::device_vector<FP_PRECISION> fp_residuals(_num_FSRs * _num_groups); thrust::device_vector<FP_PRECISION> FSR_fp_residuals(_num_FSRs); /* Compute the relative flux change in each FSR and group */ thrust::transform(_scalar_flux.begin(), _scalar_flux.end(), _old_scalar_flux.begin(), fp_residuals.begin(), thrust::minus<FP_PRECISION>()); thrust::transform(fp_residuals.begin(), fp_residuals.end(), _old_scalar_flux.begin(), fp_residuals.begin(), thrust::divides<FP_PRECISION>()); /* Replace INF and NaN values (from divide by zero) with 0. */ thrust::replace_if(fp_residuals.begin(), fp_residuals.end(), inf_test, 0); thrust::replace_if(fp_residuals.begin(), fp_residuals.end(), nan_test, 0); /* Square the residuals */ thrust::transform(fp_residuals.begin(), fp_residuals.end(), fp_residuals.begin(), fp_residuals.begin(), thrust::multiplies<FP_PRECISION>()); typedef thrust::device_vector<FP_PRECISION>::iterator Iterator; /* Reduce flux residuals across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> strider(fp_residuals.begin() + e, fp_residuals.end(), _num_groups); thrust::transform(FSR_fp_residuals.begin(), FSR_fp_residuals.end(), strider.begin(), FSR_fp_residuals.begin(), thrust::plus<FP_PRECISION>()); } /* Copy the FP_PRECISION residual to the double precision residual */ thrust::copy(FSR_fp_residuals.begin(), FSR_fp_residuals.end(), residuals.begin()); /* Sum up the residuals */ residual = thrust::reduce(residuals.begin(), residuals.end()); /* Deallocate memory for Thrust vectors */ fp_residuals.clear(); FSR_fp_residuals.clear(); residuals.clear(); /* Normalize the residual */ residual = sqrt(residual / norm); return residual; } else if (res_type == FISSION_SOURCE) { if (_num_fissionable_FSRs == 0) log_printf(ERROR, "The Solver is unable to compute a " "FISSION_SOURCE residual without fissionable FSRs"); norm = _num_fissionable_FSRs; /* Allocate Thrust vectors for fission sources in each FSR, group */ thrust::device_vector<FP_PRECISION> new_fission_sources_vec(_num_FSRs * _num_groups); thrust::device_vector<FP_PRECISION> old_fission_sources_vec(_num_FSRs * _num_groups); /* Allocate Thrust vectors for energy-integrated fission sources in each FSR */ thrust::device_vector<FP_PRECISION> FSR_old_fiss_src(_num_FSRs); thrust::device_vector<FP_PRECISION> FSR_new_fiss_src(_num_FSRs); /* Cast Thrust vectors as array pointers */ FP_PRECISION* old_fission_sources = thrust::raw_pointer_cast(&old_fission_sources_vec[0]); FP_PRECISION* new_fission_sources = thrust::raw_pointer_cast(&new_fission_sources_vec[0]); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* old_scalar_flux = thrust::raw_pointer_cast(&_old_scalar_flux[0]); /* Compute the old and new nu-fission sources in each FSR, group */ computeFSRFissionSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, false, old_scalar_flux, old_fission_sources); computeFSRFissionSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, false, scalar_flux, new_fission_sources); typedef thrust::device_vector<FP_PRECISION>::iterator Iterator; /* Reduce nu-fission sources across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> old_strider(old_fission_sources_vec.begin() + e, old_fission_sources_vec.end(), _num_groups); strided_range<Iterator> new_strider(new_fission_sources_vec.begin() + e, new_fission_sources_vec.end(), _num_groups); thrust::transform(FSR_old_fiss_src.begin(), FSR_old_fiss_src.end(), old_strider.begin(), FSR_old_fiss_src.begin(), thrust::plus<FP_PRECISION>()); thrust::transform(FSR_new_fiss_src.begin(), FSR_new_fiss_src.end(), new_strider.begin(), FSR_new_fiss_src.begin(), thrust::plus<FP_PRECISION>()); } /* Compute the relative nu-fission source change in each FSR */ thrust::transform(FSR_new_fiss_src.begin(), FSR_new_fiss_src.end(), FSR_old_fiss_src.begin(), residuals.begin(), thrust::minus<FP_PRECISION>()); thrust::transform(residuals.begin(), residuals.end(), FSR_old_fiss_src.begin(), residuals.begin(), thrust::divides<FP_PRECISION>()); /* Deallocate memory for Thrust vectors */ old_fission_sources_vec.clear(); new_fission_sources_vec.clear(); FSR_old_fiss_src.clear(); FSR_new_fiss_src.clear(); } else if (res_type == TOTAL_SOURCE) { norm = _num_FSRs; /* Allocate Thrust vectors for fission/scatter sources in each FSR, group */ thrust::device_vector<FP_PRECISION> new_sources_vec(_num_FSRs * _num_groups); thrust::device_vector<FP_PRECISION> old_sources_vec(_num_FSRs * _num_groups); thrust::fill(new_sources_vec.begin(), new_sources_vec.end(), 0.0); thrust::fill(old_sources_vec.begin(), old_sources_vec.end(), 0.0); /* Allocate Thrust vectors for energy-integrated fission/scatter sources in each FSR */ thrust::device_vector<FP_PRECISION> FSR_old_src(_num_FSRs); thrust::device_vector<FP_PRECISION> FSR_new_src(_num_FSRs); thrust::fill(FSR_old_src.begin(), FSR_old_src.end(), 0.); thrust::fill(FSR_new_src.begin(), FSR_new_src.end(), 0.); /* Cast Thrust vectors as array pointers */ FP_PRECISION* old_sources = thrust::raw_pointer_cast(&old_sources_vec[0]); FP_PRECISION* new_sources = thrust::raw_pointer_cast(&new_sources_vec[0]); FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); FP_PRECISION* old_scalar_flux = thrust::raw_pointer_cast(&_old_scalar_flux[0]); /* Compute nu-fission source */ /* Compute the old and new nu-fission sources in each FSR, group */ computeFSRFissionSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, false, old_scalar_flux, old_sources); computeFSRFissionSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, false, scalar_flux, new_sources); typedef thrust::device_vector<FP_PRECISION>::iterator Iterator; /* Reduce nu-fission sources across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> old_strider(old_sources_vec.begin() + e, old_sources_vec.end(), _num_groups); strided_range<Iterator> new_strider(new_sources_vec.begin() + e, new_sources_vec.end(), _num_groups); thrust::transform(FSR_old_src.begin(), FSR_old_src.end(), old_strider.begin(), FSR_old_src.begin(), thrust::plus<FP_PRECISION>()); thrust::transform(FSR_new_src.begin(), FSR_new_src.end(), new_strider.begin(), FSR_new_src.begin(), thrust::plus<FP_PRECISION>()); } /* Multiply fission sources by inverse keff */ thrust::for_each(FSR_new_src.begin(), FSR_new_src.end(), multiplyByConstant<FP_PRECISION>(1. / _k_eff)); thrust::for_each(FSR_old_src.begin(), FSR_old_src.end(), multiplyByConstant<FP_PRECISION>(1. / _k_eff)); /* Compute scatter source */ /* Reset sources Thrust vectors to zero */ thrust::fill(new_sources_vec.begin(), new_sources_vec.end(), 0.0); thrust::fill(old_sources_vec.begin(), old_sources_vec.end(), 0.0); /* Compute the old and new scattering sources in each FSR, group */ computeFSRScatterSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, false, old_scalar_flux, old_sources); computeFSRScatterSourcesOnDevice<<<_B, _T>>>(_FSR_materials, _materials, false, scalar_flux, new_sources); /* Reduce scatter sources across energy groups within each FSR */ for (int e=0; e < _num_groups; e++) { strided_range<Iterator> old_strider(old_sources_vec.begin() + e, old_sources_vec.end(), _num_groups); strided_range<Iterator> new_strider(new_sources_vec.begin() + e, new_sources_vec.end(), _num_groups); thrust::transform(FSR_old_src.begin(), FSR_old_src.end(), old_strider.begin(), FSR_old_src.begin(), thrust::plus<FP_PRECISION>()); thrust::transform(FSR_new_src.begin(), FSR_new_src.end(), new_strider.begin(), FSR_new_src.begin(), thrust::plus<FP_PRECISION>()); } /* Compute the relative total source change in each FSR */ thrust::transform(FSR_new_src.begin(), FSR_new_src.end(), FSR_old_src.begin(), residuals.begin(), thrust::minus<FP_PRECISION>()); thrust::transform(residuals.begin(), residuals.end(), FSR_old_src.begin(), residuals.begin(), thrust::divides<FP_PRECISION>()); /* Deallocate memory for Thrust vectors */ old_sources_vec.clear(); new_sources_vec.clear(); FSR_old_src.clear(); FSR_new_src.clear(); } /* Replace INF and NaN values (from divide by zero) with 0. */ thrust::replace_if(residuals.begin(), residuals.end(), inf_test, 0); thrust::replace_if(residuals.begin(), residuals.end(), nan_test, 0); /* Square the residuals */ thrust::transform(residuals.begin(), residuals.end(), residuals.begin(), residuals.begin(), thrust::multiplies<double>()); /* Sum up the residuals */ residual = thrust::reduce(residuals.begin(), residuals.end()); /* Deallocate memory for residuals vector */ residuals.clear(); /* Normalize the residual */ residual = sqrt(residual / norm); return residual; } /** * @brief Computes the volume-averaged, energy-integrated nu-fission rate in * each FSR and stores them in an array indexed by FSR ID. * @details This is a helper method for SWIG to allow users to retrieve * FSR nu-fission rates as a NumPy array. An example of how this method * can be called from Python is as follows: * * @code * num_FSRs = geometry.getNumFSRs() * fission_rates = solver.computeFSRFissionRates(num_FSRs) * @endcode * * @param fission_rates an array to store the nu-fission rates (implicitly * passed in as a NumPy array from Python) * @param num_FSRs the number of FSRs passed in from Python */ void GPUSolver::computeFSRFissionRates(double* fission_rates, int num_FSRs) { log_printf(INFO, "Computing FSR fission rates..."); /* Allocate memory for the FSR nu-fission rates on the device and host */ FP_PRECISION* dev_fission_rates; cudaMalloc((void**)&dev_fission_rates, _num_FSRs * sizeof(FP_PRECISION)); FP_PRECISION* host_fission_rates = new FP_PRECISION[_num_FSRs]; FP_PRECISION* scalar_flux = thrust::raw_pointer_cast(&_scalar_flux[0]); /* Compute the FSR nu-fission rates on the device */ computeFSRFissionRatesOnDevice<<<_B, _T>>>(_FSR_volumes, _FSR_materials, _materials, scalar_flux, dev_fission_rates); /* Copy the nu-fission rate array from the device to the host */ cudaMemcpy((void*)host_fission_rates, (void*)dev_fission_rates, _num_FSRs * sizeof(FP_PRECISION), cudaMemcpyDeviceToHost); /* Populate the double precision NumPy array for the output */ for (int i=0; i < _num_FSRs; i++) fission_rates[i] = host_fission_rates[i]; /* Deallocate the memory assigned to store the fission rates on the device */ cudaFree(dev_fission_rates); delete [] host_fission_rates; }
b1eafc7b7fed7a75449dbca6227540dc2bde5d29.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2018-2020, Michael P. Howard // Copyright (c) 2021, Auburn University // This file is part of the azplugins project, released under the Modified BSD License. #include "PairPotentials.cuh" namespace azplugins { namespace gpu { //! Kernel driver for LJ 9-6 pair potential template hipError_t compute_pair_potential<azplugins::detail::PairEvaluatorLJ96> (const pair_args_t& pair_args, const typename azplugins::detail::PairEvaluatorLJ96::param_type *d_params); } // end namespace gpu } // end namespace azplugins
b1eafc7b7fed7a75449dbca6227540dc2bde5d29.cu
// Copyright (c) 2018-2020, Michael P. Howard // Copyright (c) 2021, Auburn University // This file is part of the azplugins project, released under the Modified BSD License. #include "PairPotentials.cuh" namespace azplugins { namespace gpu { //! Kernel driver for LJ 9-6 pair potential template cudaError_t compute_pair_potential<azplugins::detail::PairEvaluatorLJ96> (const pair_args_t& pair_args, const typename azplugins::detail::PairEvaluatorLJ96::param_type *d_params); } // end namespace gpu } // end namespace azplugins
696cbe0be6f4e01c13e8fd4ece538de3fe189a80.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h" #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/softmax_impl.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #include "paddle/fluid/string/string_helper.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void MaskLabelByIndex(T* predicted_logits, const T* logit, const IndexT* label, const int start_index, const int end_index, const int64_t N, const int64_t D, const int nranks) { CUDA_KERNEL_LOOP(i, N) { auto real_label = label[i]; PADDLE_ENFORCE((real_label < D * nranks) && (real_label >= 0), "The index is out of bounds, " "please check whether the value of label and " "input meet the class number. It should " "be less than [%d], but received [%d]", D * nranks, real_label); if (real_label >= start_index && real_label < end_index) { predicted_logits[i] = logit[i * D + real_label - start_index]; } } } template <typename T, typename IndexT> __global__ void MaskLabelByIndexGrad(T* logits_grad, const T* loss_grad, const IndexT* labels, const int start_index, const int end_index, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row]; } else { logits_grad[i] *= loss_grad[row]; } } } template <typename T> class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* logits = ctx.Input<Tensor>("Logits"); const Tensor* labels = ctx.Input<Tensor>("Label"); Tensor* softmax = ctx.Output<Tensor>("Softmax"); Tensor* loss = ctx.Output<Tensor>("Loss"); const int rid = ctx.Attr<int>("ring_id"); const int nranks = ctx.Attr<int>("nranks"); const int rank = ctx.Attr<int>("rank"); const auto& place = ctx.GetPlace(); const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); // use global calculate stream const auto stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(place)) ->stream(); // allocate memory on device. softmax->mutable_data<T>(place); loss->mutable_data<T>(place); const auto& logits_dims = logits->dims(); const auto& labels_dims = labels->dims(); const int axis = logits_dims.size() - 1; const int N = SizeToAxis(axis, logits_dims); const int D = SizeFromAxis(axis, logits_dims); Tensor logits_2d, softmax_2d, loss_2d; logits_2d.ShareDataWith(*logits).Resize({N, D}); softmax_2d.ShareDataWith(*softmax).Resize({N, D}); loss_2d.ShareDataWith(*loss).Resize({N, 1}); auto eigen_logits = math::EigenMatrix<T>::From(logits_2d); auto eigen_softmax = math::EigenMatrix<T>::From(softmax_2d); // step 1, obtain logit_max Tensor logits_max; logits_max = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); void* logits_max_buff = logits_max.mutable_data<T>(place); auto eigen_logits_max = math::EigenMatrix<T>::From(logits_max); Eigen::DSizes<int, 1> along_axis(1); eigen_logits_max.device(*dev_ctx.eigen_device()) = eigen_logits.maximum(along_axis); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), platform::ToNCCLDataType(logits_max.type()), ncclMax, comm->comm(), stream)); // step 2, obtain logit - logit_max Eigen::DSizes<int, 2> batch_by_one(N, 1); Eigen::DSizes<int, 2> one_by_class(1, D); eigen_softmax.device(*dev_ctx.eigen_device()) = (eigen_logits - eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class)) .unaryExpr(math::ValueClip<T>()); // step 3, obtain predict target Tensor predicted_logits; predicted_logits = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); predicted_logits.mutable_data<T>(place); auto t = framework::EigenVector<T>::Flatten(predicted_logits); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); const int start_index = rank * D; const int end_index = start_index + D; int blocks = NumBlocks(N); int threads = kNumCUDAThreads; const auto& label_type = labels->type(); if (label_type == framework::proto::VarType::INT32) { hipLaunchKernelGGL(( MaskLabelByIndex<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), predicted_logits.data<T>(), softmax_2d.data<T>(), labels->data<int32_t>(), start_index, end_index, N, D, nranks); } else if (label_type == framework::proto::VarType::INT64) { hipLaunchKernelGGL(( MaskLabelByIndex<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), predicted_logits.data<T>(), softmax_2d.data<T>(), labels->data<int64_t>(), start_index, end_index, N, D, nranks); } void* predict_logits_buff = predicted_logits.mutable_data<T>(place); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( predict_logits_buff, predict_logits_buff, predicted_logits.numel(), platform::ToNCCLDataType(predicted_logits.type()), ncclSum, comm->comm(), stream)); // step 4, obtain exp(logit) eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp(); // step 5, obtain sum_exp_logits Tensor sum_exp_logits; sum_exp_logits = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place); auto eigen_sum_exp_logits = math::EigenMatrix<T>::From(sum_exp_logits); eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) = eigen_softmax.sum(along_axis); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), platform::ToNCCLDataType(sum_exp_logits.type()), ncclSum, comm->comm(), stream)); auto eigen_loss = math::EigenMatrix<T>::From(loss_2d); auto eigen_predicted_logits = math::EigenMatrix<T>::From(predicted_logits); eigen_loss.device(*dev_ctx.eigen_device()) = (eigen_sum_exp_logits.log().unaryExpr(math::TolerableValue<T>()) - eigen_predicted_logits) .unaryExpr(math::TolerableValue<T>()); eigen_softmax.device(*dev_ctx.eigen_device()) = (eigen_softmax * eigen_sum_exp_logits.inverse().broadcast(one_by_class)); } }; template <typename T> class CSoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* labels = context.Input<Tensor>("Label"); const Tensor* loss_grad = context.Input<Tensor>(framework::GradVarName("Loss")); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const Tensor* softmax = context.Input<Tensor>("Softmax"); const int rank = context.Attr<int>("rank"); auto& dev_ctx = context.template device_context<platform::CUDADeviceContext>(); if (logit_grad != softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } const auto sofrmax_dims = softmax->dims(); const int axis = sofrmax_dims.size() - 1; const int N = SizeToAxis(axis, sofrmax_dims); const int D = SizeFromAxis(axis, sofrmax_dims); Tensor logit_grad_2d; logit_grad_2d.ShareDataWith(*logit_grad).Resize({N, D}); int blocks = NumBlocks(N * D); int threads = kNumCUDAThreads; const auto& label_type = labels->type(); const int start_index = rank * D; const int end_index = start_index + D; if (label_type == framework::proto::VarType::INT32) { hipLaunchKernelGGL(( MaskLabelByIndexGrad<T, int32_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logit_grad_2d.data<T>(), loss_grad->data<T>(), labels->data<int32_t>(), start_index, end_index, N, D); } else if (label_type == framework::proto::VarType::INT64) { hipLaunchKernelGGL(( MaskLabelByIndexGrad<T, int64_t>), dim3(blocks), dim3(threads), 0, dev_ctx.stream(), logit_grad_2d.data<T>(), loss_grad->data<T>(), labels->data<int64_t>(), start_index, end_index, N, D); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( c_softmax_with_cross_entropy, ops::CSoftmaxWithCrossEntropyOpCUDAKernel<float>, ops::CSoftmaxWithCrossEntropyOpCUDAKernel<double>, ops::CSoftmaxWithCrossEntropyOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL( c_softmax_with_cross_entropy_grad, ops::CSoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::CSoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>, ops::CSoftmaxWithCrossEntropyGradCUDAKernel<double>);
696cbe0be6f4e01c13e8fd4ece538de3fe189a80.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/collective/c_softmax_with_cross_entropy_op.h" #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/softmax_impl.h" #include "paddle/fluid/platform/collective_helper.h" #include "paddle/fluid/platform/device/gpu/nccl_helper.h" #include "paddle/fluid/string/string_helper.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void MaskLabelByIndex(T* predicted_logits, const T* logit, const IndexT* label, const int start_index, const int end_index, const int64_t N, const int64_t D, const int nranks) { CUDA_KERNEL_LOOP(i, N) { auto real_label = label[i]; PADDLE_ENFORCE((real_label < D * nranks) && (real_label >= 0), "The index is out of bounds, " "please check whether the value of label and " "input meet the class number. It should " "be less than [%d], but received [%d]", D * nranks, real_label); if (real_label >= start_index && real_label < end_index) { predicted_logits[i] = logit[i * D + real_label - start_index]; } } } template <typename T, typename IndexT> __global__ void MaskLabelByIndexGrad(T* logits_grad, const T* loss_grad, const IndexT* labels, const int start_index, const int end_index, const int64_t N, const int64_t D) { CUDA_KERNEL_LOOP(i, N * D) { auto row = i / D; auto col = i % D; if ((col + start_index) == labels[row]) { logits_grad[i] = (logits_grad[i] - static_cast<T>(1.0)) * loss_grad[row]; } else { logits_grad[i] *= loss_grad[row]; } } } template <typename T> class CSoftmaxWithCrossEntropyOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* logits = ctx.Input<Tensor>("Logits"); const Tensor* labels = ctx.Input<Tensor>("Label"); Tensor* softmax = ctx.Output<Tensor>("Softmax"); Tensor* loss = ctx.Output<Tensor>("Loss"); const int rid = ctx.Attr<int>("ring_id"); const int nranks = ctx.Attr<int>("nranks"); const int rank = ctx.Attr<int>("rank"); const auto& place = ctx.GetPlace(); const auto& comm = platform::NCCLCommContext::Instance().Get(rid, place); auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); // use global calculate stream const auto stream = static_cast<platform::CUDADeviceContext*>( platform::DeviceContextPool::Instance().Get(place)) ->stream(); // allocate memory on device. softmax->mutable_data<T>(place); loss->mutable_data<T>(place); const auto& logits_dims = logits->dims(); const auto& labels_dims = labels->dims(); const int axis = logits_dims.size() - 1; const int N = SizeToAxis(axis, logits_dims); const int D = SizeFromAxis(axis, logits_dims); Tensor logits_2d, softmax_2d, loss_2d; logits_2d.ShareDataWith(*logits).Resize({N, D}); softmax_2d.ShareDataWith(*softmax).Resize({N, D}); loss_2d.ShareDataWith(*loss).Resize({N, 1}); auto eigen_logits = math::EigenMatrix<T>::From(logits_2d); auto eigen_softmax = math::EigenMatrix<T>::From(softmax_2d); // step 1, obtain logit_max Tensor logits_max; logits_max = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); void* logits_max_buff = logits_max.mutable_data<T>(place); auto eigen_logits_max = math::EigenMatrix<T>::From(logits_max); Eigen::DSizes<int, 1> along_axis(1); eigen_logits_max.device(*dev_ctx.eigen_device()) = eigen_logits.maximum(along_axis); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( logits_max_buff, logits_max_buff, logits_max.numel(), platform::ToNCCLDataType(logits_max.type()), ncclMax, comm->comm(), stream)); // step 2, obtain logit - logit_max Eigen::DSizes<int, 2> batch_by_one(N, 1); Eigen::DSizes<int, 2> one_by_class(1, D); eigen_softmax.device(*dev_ctx.eigen_device()) = (eigen_logits - eigen_logits_max.reshape(batch_by_one).broadcast(one_by_class)) .unaryExpr(math::ValueClip<T>()); // step 3, obtain predict target Tensor predicted_logits; predicted_logits = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); predicted_logits.mutable_data<T>(place); auto t = framework::EigenVector<T>::Flatten(predicted_logits); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast<T>(0)); const int start_index = rank * D; const int end_index = start_index + D; int blocks = NumBlocks(N); int threads = kNumCUDAThreads; const auto& label_type = labels->type(); if (label_type == framework::proto::VarType::INT32) { MaskLabelByIndex<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>( predicted_logits.data<T>(), softmax_2d.data<T>(), labels->data<int32_t>(), start_index, end_index, N, D, nranks); } else if (label_type == framework::proto::VarType::INT64) { MaskLabelByIndex<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>( predicted_logits.data<T>(), softmax_2d.data<T>(), labels->data<int64_t>(), start_index, end_index, N, D, nranks); } void* predict_logits_buff = predicted_logits.mutable_data<T>(place); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( predict_logits_buff, predict_logits_buff, predicted_logits.numel(), platform::ToNCCLDataType(predicted_logits.type()), ncclSum, comm->comm(), stream)); // step 4, obtain exp(logit) eigen_softmax.device(*dev_ctx.eigen_device()) = eigen_softmax.exp(); // step 5, obtain sum_exp_logits Tensor sum_exp_logits; sum_exp_logits = ctx.AllocateTmpTensor<T, platform::CUDADeviceContext>({N, 1}, dev_ctx); void* sum_exp_logits_buff = sum_exp_logits.mutable_data<T>(place); auto eigen_sum_exp_logits = math::EigenMatrix<T>::From(sum_exp_logits); eigen_sum_exp_logits.device(*dev_ctx.eigen_device()) = eigen_softmax.sum(along_axis); PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::ncclAllReduce( sum_exp_logits_buff, sum_exp_logits_buff, sum_exp_logits.numel(), platform::ToNCCLDataType(sum_exp_logits.type()), ncclSum, comm->comm(), stream)); auto eigen_loss = math::EigenMatrix<T>::From(loss_2d); auto eigen_predicted_logits = math::EigenMatrix<T>::From(predicted_logits); eigen_loss.device(*dev_ctx.eigen_device()) = (eigen_sum_exp_logits.log().unaryExpr(math::TolerableValue<T>()) - eigen_predicted_logits) .unaryExpr(math::TolerableValue<T>()); eigen_softmax.device(*dev_ctx.eigen_device()) = (eigen_softmax * eigen_sum_exp_logits.inverse().broadcast(one_by_class)); } }; template <typename T> class CSoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { const Tensor* labels = context.Input<Tensor>("Label"); const Tensor* loss_grad = context.Input<Tensor>(framework::GradVarName("Loss")); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const Tensor* softmax = context.Input<Tensor>("Softmax"); const int rank = context.Attr<int>("rank"); auto& dev_ctx = context.template device_context<platform::CUDADeviceContext>(); if (logit_grad != softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } const auto sofrmax_dims = softmax->dims(); const int axis = sofrmax_dims.size() - 1; const int N = SizeToAxis(axis, sofrmax_dims); const int D = SizeFromAxis(axis, sofrmax_dims); Tensor logit_grad_2d; logit_grad_2d.ShareDataWith(*logit_grad).Resize({N, D}); int blocks = NumBlocks(N * D); int threads = kNumCUDAThreads; const auto& label_type = labels->type(); const int start_index = rank * D; const int end_index = start_index + D; if (label_type == framework::proto::VarType::INT32) { MaskLabelByIndexGrad<T, int32_t><<<blocks, threads, 0, dev_ctx.stream()>>>( logit_grad_2d.data<T>(), loss_grad->data<T>(), labels->data<int32_t>(), start_index, end_index, N, D); } else if (label_type == framework::proto::VarType::INT64) { MaskLabelByIndexGrad<T, int64_t><<<blocks, threads, 0, dev_ctx.stream()>>>( logit_grad_2d.data<T>(), loss_grad->data<T>(), labels->data<int64_t>(), start_index, end_index, N, D); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( c_softmax_with_cross_entropy, ops::CSoftmaxWithCrossEntropyOpCUDAKernel<float>, ops::CSoftmaxWithCrossEntropyOpCUDAKernel<double>, ops::CSoftmaxWithCrossEntropyOpCUDAKernel<plat::float16>); REGISTER_OP_CUDA_KERNEL( c_softmax_with_cross_entropy_grad, ops::CSoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::CSoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>, ops::CSoftmaxWithCrossEntropyGradCUDAKernel<double>);
b60a475ed2a71693e490fd1c0547a1fe9f92f2a9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * mpUtils * main.cpp * * @author: Hendrik Schwanekamp * @mail: [email protected] * * mpUtils = my personal Utillities * A utility library for my personal c++ projects * * Copyright 2016 Hendrik Schwanekamp * */ /* * This is testing features of the matrix class... to be replaced by actual unit tests in the future... */ #include <mpUtils/mpGraphics.h> #include <mpUtils/mpUtils.h> #include <mpUtils/mpCuda.h> #define FULL_MASK 0xffffffff using namespace mpu; struct ManagedData : mpu::Managed { int i; }; __global__ void init(mpu::VectorReference<int> data, ManagedData* res, int N) { if(threadIdx.x == 0 && blockIdx.x == 0) res->i = 25; for( int idx : gridStrideRange(data.size())) { data[idx] += 2; } } int main() { Log myLog( LogLvl::ALL, ConsoleSink()); myLog.printHeader("cudaTest", "0.9.1"); int N = 32000; mpu::DeviceVector<int> data(N,5); ManagedData *res = new ManagedData; hipLaunchKernelGGL(( init), dim3(numBlocks(N,512)),dim3(512), 0, 0, data.getVectorReference(), res,N); // mpu::SimpleStopwatch sw; assert_cuda(hipDeviceSynchronize()); // sw.pause(); // int resCPU = 0; // hipMemcpy(&resCPU,res,sizeof(int),hipMemcpyDeviceToHost); // mpu::PinnedVector<int> hostData = data; // int i = 10; // const int& ref = i; // const mpu::DeviceVector<int>& ref = data; DeviceVector<int> copy(30,10); copy[25] += 25; ManagedVector<int> managed(data); ManagedVector<int> managed2; managed2 = managed; myLog.print(LogLvl::INFO) << "result: " << res->i << " value[10]=" << data[10] << " copy[25]=" << copy[25] << " managed[13]= " << managed2[13]; DeviceVector<float> a(128); DeviceVector<float> b(a); DeviceVector<float> c(16000); DeviceVector<float> d(32000); DeviceVector<float> e(64000); c=d; e=a; d=c; swap(a,e); return 0; }
b60a475ed2a71693e490fd1c0547a1fe9f92f2a9.cu
/* * mpUtils * main.cpp * * @author: Hendrik Schwanekamp * @mail: [email protected] * * mpUtils = my personal Utillities * A utility library for my personal c++ projects * * Copyright 2016 Hendrik Schwanekamp * */ /* * This is testing features of the matrix class... to be replaced by actual unit tests in the future... */ #include <mpUtils/mpGraphics.h> #include <mpUtils/mpUtils.h> #include <mpUtils/mpCuda.h> #define FULL_MASK 0xffffffff using namespace mpu; struct ManagedData : mpu::Managed { int i; }; __global__ void init(mpu::VectorReference<int> data, ManagedData* res, int N) { if(threadIdx.x == 0 && blockIdx.x == 0) res->i = 25; for( int idx : gridStrideRange(data.size())) { data[idx] += 2; } } int main() { Log myLog( LogLvl::ALL, ConsoleSink()); myLog.printHeader("cudaTest", "0.9.1"); int N = 32000; mpu::DeviceVector<int> data(N,5); ManagedData *res = new ManagedData; init<<<numBlocks(N,512),512>>>(data.getVectorReference(), res,N); // mpu::SimpleStopwatch sw; assert_cuda(cudaDeviceSynchronize()); // sw.pause(); // int resCPU = 0; // cudaMemcpy(&resCPU,res,sizeof(int),cudaMemcpyDeviceToHost); // mpu::PinnedVector<int> hostData = data; // int i = 10; // const int& ref = i; // const mpu::DeviceVector<int>& ref = data; DeviceVector<int> copy(30,10); copy[25] += 25; ManagedVector<int> managed(data); ManagedVector<int> managed2; managed2 = managed; myLog.print(LogLvl::INFO) << "result: " << res->i << " value[10]=" << data[10] << " copy[25]=" << copy[25] << " managed[13]= " << managed2[13]; DeviceVector<float> a(128); DeviceVector<float> b(a); DeviceVector<float> c(16000); DeviceVector<float> d(32000); DeviceVector<float> e(64000); c=d; e=a; d=c; swap(a,e); return 0; }
2840a62401792e3d5388c0e177916892c316104a.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include <iostream> #include <random> #include <time.h> #include <math.h> #include <fstream> #include <omp.h> #include <cstdlib> #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> using namespace std; typedef vector<vector<float>> matrix; #define BLOCK_SIZE 16 class Convolution { public: Convolution(); ~Convolution(); }; vector<matrix> load_matrix(char filename[], int rows) { fstream file; float array[200][200]; vector<matrix> dataset; file.open(filename, ios::in | ios::binary); if (!file) { cout << "file not open" << endl; } for (int i = 0; i < rows; ++i) { matrix m; file.read((char*)array, 200 * 200 * sizeof(float)); for (int j = 0;j < 200;j++) { m.push_back(vector<float>(array[j], array[j] + 200)); } dataset.push_back(m); } /*for (int i = 0; i < 200; ++i) { for(int j=0;j<200;j++) cout<<array[i][j]<<" "; cout<<endl; }*/ cout << endl << endl << "loaded file" << endl; file.close(); return dataset; } void print_matrix(matrix a, int beg_row = 0, int beg_col = 0, int row = 0, int col = 0)// displays a float matrix { if (row == 0 && col == 0) { row = a.size(); col = a[0].size(); } for (int i = 0; i < row; ++i) { for (int j = 0;j < col;j++) { cout << a[beg_row + i][beg_col + j] << " "; } cout << endl; } } // creates <num_filters> number of filters and stores in filter_bank // <filter_shape> -> dimension of each filter //<filter_bank>-> array of filters void init_filters(int num_filters, int filter_shape[], vector<matrix >& filter_bank) { default_random_engine generator; normal_distribution<float> distribution(0.0, 0.1); for (int i = 0; i < num_filters; ++i) { for (int j = 0; j < filter_shape[0]; ++j) { for (int k = 0; k < filter_shape[1]; ++k) { float number = distribution(generator); filter_bank[i][j][k] = number; } } } } __global__ void cuda_matrix_multiply(matrix a, int a_beg_row, int a_beg_col, matrix b, int b_beg_row, int b_beg_col, int row, int col) { ; } //multiplies corresponding elements of 2 matrices' submatrix such that sub_mat(a)[<row>][<col>] X sub_mat(b)[<row>][<col>] //<a> -> first matrix //<b> -> second matrix matrix matrix_multiply(matrix a, int a_beg_row, int a_beg_col, matrix b, int b_beg_row, int b_beg_col, int row, int col) { matrix product(row, vector<float>(col, 0)); //parallel for (int i = 0; i < row; ++i) { for (int j = 0; j < col; ++j) { product[i][j] = a[a_beg_row + i][a_beg_col + j] * b[b_beg_row + i][b_beg_col + j]; //cout<<product[i][j]<<" "; } //cout<<endl; } return product; } //sum of the matrix float matrix_sum(matrix a) { //parallel float sum = 0; for (int i = 0; i < a.size(); ++i) { for (int j = 0; j < a[0].size(); ++j) { sum += a[i][j]; } } return sum; } //gets an input as an input and filters it using <filter> //<img> is the input image //<filter> is the applied filter matrix convolve(matrix img, int img_shape[], matrix filter, int filter_shape[], int stride) { matrix filtered_img; for (int i = 0; i <= img_shape[0] - filter_shape[0]; i += stride) { vector<float> v; for (int j = 0; j <= img_shape[1] - filter_shape[1]; j += stride) { float masked_values = matrix_sum(matrix_multiply(img, i, j, filter, 0, 0, filter_shape[0], filter_shape[1])); v.push_back(masked_values); } filtered_img.push_back(v); } return filtered_img; } //applies multiple filters from the <filter_bank> on the input <img> vector<matrix > apply_filter(matrix img, int img_shape[], vector<matrix > filter_bank, int filter_shape[]) { vector<matrix > convolved_layer; //parallel for (int i = 0; i < filter_bank.size(); ++i) { convolved_layer.push_back(convolve(img, img_shape, filter_bank[i], filter_shape, 1)); } return convolved_layer; } // max value in matrix <a> float matrix_max(matrix a, int beg_row, int beg_col, int row, int col) { float l = -INFINITY; for (int i = 0; i < row; ++i) { for (int j = 0; j < col; ++j) { if (a[beg_row + i][beg_col + j] > l) { l = a[beg_row + i][beg_col + j]; } } } return l; } //applies pooling function on the <img> // <stride> -> step size of pooling frame // <pool_dime> -> size of the frame matrix apply_maxPool(matrix img, int img_shape[], int stride, int pool_dim) { matrix pooled_img; for (int i = 0; i <= img_shape[0] - pool_dim; i += stride) { vector<float> v; for (int j = 0; j <= img_shape[1] - pool_dim; j += stride) { //cout<<i<<' '<<j<<endl; float max_val = matrix_max(img, i, j, pool_dim, pool_dim); v.push_back(max_val); } pooled_img.push_back(v); } return pooled_img; } vector<matrix > apply_maxPool_to_filters(vector<matrix > prev_layer, int img_shape[]) { vector<matrix > pooled_layer; //parallel for (int i = 0; i < prev_layer.size(); ++i) { matrix v = apply_maxPool(prev_layer[i], img_shape, 2, 2); pooled_layer.push_back(v); } return pooled_layer; } float reLU(float x) { return x > 0 ? x : 0; } void apply_activation(matrix& inp) { //parallel for (int i = 0; i < inp.size(); ++i) { for (int j = 0; j < inp[0].size(); ++j) { inp[i][j] = reLU(inp[i][j]); } } } //takes an input <img> applies all the filters then takes the output of that and applies the activation function followed by max pooling vector<matrix > feed_through_layer(matrix img, int img_shape[], vector<matrix > filter_bank, int filter_shape[]) //void feed_through_layer(matrix img, int img_shape[], vector<matrix > filter_bank, int filter_shape[]) { double beg = 0, end = 0; beg = omp_get_wtime(); vector<matrix > temp = apply_filter(img, img_shape, filter_bank, filter_shape); end = omp_get_wtime(); cout << "Time to apply_filter: " << end - beg << endl; beg = omp_get_wtime(); for (int i = 0;i < temp.size();i++) apply_activation(temp[i]); end = omp_get_wtime(); cout << "Time to apply activation: " << end - beg << endl; beg = omp_get_wtime(); int filtered_img_shape[] = { temp[0].size(),temp[0][0].size() }; temp = apply_maxPool_to_filters(temp, filtered_img_shape); end = omp_get_wtime(); cout << "Time to perform max pooling: " << end - beg << endl; return temp; } int main(int argc, char const* argv[]) { int num_filters = 4; int filter_shape[] = { 3,3 }; omp_set_num_threads(8); vector<matrix> filter_bank(num_filters, vector<vector<float>>(filter_shape[0], vector<float>(filter_shape[1]))); double beg = omp_get_wtime(); init_filters(num_filters, filter_shape, filter_bank); double end = omp_get_wtime(); cout << "Time to initialize filters: " << end - beg << endl; beg = omp_get_wtime(); char filename[] = "imgs.dat"; vector <matrix> imgs = load_matrix(filename, 100); int img_shape[] = { 200,200 }; end = omp_get_wtime(); cout << "Time to load dataset: " << end - beg << endl << "Loaded file: " << filename << endl; beg = omp_get_wtime(); for (int i = 0; i < 100; ++i) { double init = omp_get_wtime(); vector<matrix > final_layer = feed_through_layer(imgs[i], img_shape, filter_bank, filter_shape); double final = omp_get_wtime(); cout << "Time to perform convolution on image " << i << ": " << final - init << endl; } end = omp_get_wtime(); cout << endl << "time for all images: " << end - beg << endl; return 0; }
2840a62401792e3d5388c0e177916892c316104a.cu
#include <vector> #include <iostream> #include <random> #include <time.h> #include <math.h> #include <fstream> #include <omp.h> #include <cstdlib> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime_api.h> using namespace std; typedef vector<vector<float>> matrix; #define BLOCK_SIZE 16 class Convolution { public: Convolution(); ~Convolution(); }; vector<matrix> load_matrix(char filename[], int rows) { fstream file; float array[200][200]; vector<matrix> dataset; file.open(filename, ios::in | ios::binary); if (!file) { cout << "file not open" << endl; } for (int i = 0; i < rows; ++i) { matrix m; file.read((char*)array, 200 * 200 * sizeof(float)); for (int j = 0;j < 200;j++) { m.push_back(vector<float>(array[j], array[j] + 200)); } dataset.push_back(m); } /*for (int i = 0; i < 200; ++i) { for(int j=0;j<200;j++) cout<<array[i][j]<<" "; cout<<endl; }*/ cout << endl << endl << "loaded file" << endl; file.close(); return dataset; } void print_matrix(matrix a, int beg_row = 0, int beg_col = 0, int row = 0, int col = 0)// displays a float matrix { if (row == 0 && col == 0) { row = a.size(); col = a[0].size(); } for (int i = 0; i < row; ++i) { for (int j = 0;j < col;j++) { cout << a[beg_row + i][beg_col + j] << " "; } cout << endl; } } // creates <num_filters> number of filters and stores in filter_bank // <filter_shape> -> dimension of each filter //<filter_bank>-> array of filters void init_filters(int num_filters, int filter_shape[], vector<matrix >& filter_bank) { default_random_engine generator; normal_distribution<float> distribution(0.0, 0.1); for (int i = 0; i < num_filters; ++i) { for (int j = 0; j < filter_shape[0]; ++j) { for (int k = 0; k < filter_shape[1]; ++k) { float number = distribution(generator); filter_bank[i][j][k] = number; } } } } __global__ void cuda_matrix_multiply(matrix a, int a_beg_row, int a_beg_col, matrix b, int b_beg_row, int b_beg_col, int row, int col) { ; } //multiplies corresponding elements of 2 matrices' submatrix such that sub_mat(a)[<row>][<col>] X sub_mat(b)[<row>][<col>] //<a> -> first matrix //<b> -> second matrix matrix matrix_multiply(matrix a, int a_beg_row, int a_beg_col, matrix b, int b_beg_row, int b_beg_col, int row, int col) { matrix product(row, vector<float>(col, 0)); //parallel for (int i = 0; i < row; ++i) { for (int j = 0; j < col; ++j) { product[i][j] = a[a_beg_row + i][a_beg_col + j] * b[b_beg_row + i][b_beg_col + j]; //cout<<product[i][j]<<" "; } //cout<<endl; } return product; } //sum of the matrix float matrix_sum(matrix a) { //parallel float sum = 0; for (int i = 0; i < a.size(); ++i) { for (int j = 0; j < a[0].size(); ++j) { sum += a[i][j]; } } return sum; } //gets an input as an input and filters it using <filter> //<img> is the input image //<filter> is the applied filter matrix convolve(matrix img, int img_shape[], matrix filter, int filter_shape[], int stride) { matrix filtered_img; for (int i = 0; i <= img_shape[0] - filter_shape[0]; i += stride) { vector<float> v; for (int j = 0; j <= img_shape[1] - filter_shape[1]; j += stride) { float masked_values = matrix_sum(matrix_multiply(img, i, j, filter, 0, 0, filter_shape[0], filter_shape[1])); v.push_back(masked_values); } filtered_img.push_back(v); } return filtered_img; } //applies multiple filters from the <filter_bank> on the input <img> vector<matrix > apply_filter(matrix img, int img_shape[], vector<matrix > filter_bank, int filter_shape[]) { vector<matrix > convolved_layer; //parallel for (int i = 0; i < filter_bank.size(); ++i) { convolved_layer.push_back(convolve(img, img_shape, filter_bank[i], filter_shape, 1)); } return convolved_layer; } // max value in matrix <a> float matrix_max(matrix a, int beg_row, int beg_col, int row, int col) { float l = -INFINITY; for (int i = 0; i < row; ++i) { for (int j = 0; j < col; ++j) { if (a[beg_row + i][beg_col + j] > l) { l = a[beg_row + i][beg_col + j]; } } } return l; } //applies pooling function on the <img> // <stride> -> step size of pooling frame // <pool_dime> -> size of the frame matrix apply_maxPool(matrix img, int img_shape[], int stride, int pool_dim) { matrix pooled_img; for (int i = 0; i <= img_shape[0] - pool_dim; i += stride) { vector<float> v; for (int j = 0; j <= img_shape[1] - pool_dim; j += stride) { //cout<<i<<' '<<j<<endl; float max_val = matrix_max(img, i, j, pool_dim, pool_dim); v.push_back(max_val); } pooled_img.push_back(v); } return pooled_img; } vector<matrix > apply_maxPool_to_filters(vector<matrix > prev_layer, int img_shape[]) { vector<matrix > pooled_layer; //parallel for (int i = 0; i < prev_layer.size(); ++i) { matrix v = apply_maxPool(prev_layer[i], img_shape, 2, 2); pooled_layer.push_back(v); } return pooled_layer; } float reLU(float x) { return x > 0 ? x : 0; } void apply_activation(matrix& inp) { //parallel for (int i = 0; i < inp.size(); ++i) { for (int j = 0; j < inp[0].size(); ++j) { inp[i][j] = reLU(inp[i][j]); } } } //takes an input <img> applies all the filters then takes the output of that and applies the activation function followed by max pooling vector<matrix > feed_through_layer(matrix img, int img_shape[], vector<matrix > filter_bank, int filter_shape[]) //void feed_through_layer(matrix img, int img_shape[], vector<matrix > filter_bank, int filter_shape[]) { double beg = 0, end = 0; beg = omp_get_wtime(); vector<matrix > temp = apply_filter(img, img_shape, filter_bank, filter_shape); end = omp_get_wtime(); cout << "Time to apply_filter: " << end - beg << endl; beg = omp_get_wtime(); for (int i = 0;i < temp.size();i++) apply_activation(temp[i]); end = omp_get_wtime(); cout << "Time to apply activation: " << end - beg << endl; beg = omp_get_wtime(); int filtered_img_shape[] = { temp[0].size(),temp[0][0].size() }; temp = apply_maxPool_to_filters(temp, filtered_img_shape); end = omp_get_wtime(); cout << "Time to perform max pooling: " << end - beg << endl; return temp; } int main(int argc, char const* argv[]) { int num_filters = 4; int filter_shape[] = { 3,3 }; omp_set_num_threads(8); vector<matrix> filter_bank(num_filters, vector<vector<float>>(filter_shape[0], vector<float>(filter_shape[1]))); double beg = omp_get_wtime(); init_filters(num_filters, filter_shape, filter_bank); double end = omp_get_wtime(); cout << "Time to initialize filters: " << end - beg << endl; beg = omp_get_wtime(); char filename[] = "imgs.dat"; vector <matrix> imgs = load_matrix(filename, 100); int img_shape[] = { 200,200 }; end = omp_get_wtime(); cout << "Time to load dataset: " << end - beg << endl << "Loaded file: " << filename << endl; beg = omp_get_wtime(); for (int i = 0; i < 100; ++i) { double init = omp_get_wtime(); vector<matrix > final_layer = feed_through_layer(imgs[i], img_shape, filter_bank, filter_shape); double final = omp_get_wtime(); cout << "Time to perform convolution on image " << i << ": " << final - init << endl; } end = omp_get_wtime(); cout << endl << "time for all images: " << end - beg << endl; return 0; }
d37a11bfe992779e908b2f7a4b3256efff10a458.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example demonstrates how to use the TileIterator in CUTLASS to load data from addressable memory, and store it back into addressable memory. TileIterator is a core concept in CUTLASS that enables efficient loading and storing of data from and to addressable memory. The TileIterator accepts a TileTraits type, which defines the shape of a tile and the distribution of accesses by individual entities, either threads or others. In this example, a LoadTileIterator is used to load elements from a tile in global memory, stored in column-major layout, into a fragment, and a corresponding StoreTileIterator is used to store the elements back into global memory (in the same column-major layout). https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/ This example uses CUTLASS utilities to ease the matrix operations. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // CUTLASS includes #include "cutlass/tile_iterator.h" #include "cutlass/tile_traits_standard.h" // // CUTLASS utility includes // // Defines operator<<() to write TensorView objects to std::ostream #include "tools/util/tensor_view_io.h" // Defines cutlass::HostMatrix<> #include "tools/util/host_matrix.h" // Defines cutlass::reference::device::TensorInitialize() #include "tools/util/reference/device/tensor_elementwise.h" // Defines cutlass::reference::host::TensorEquals() #include "tools/util/reference/host/tensor_elementwise.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // // This function defines load and store tile iterators to load and store a M-by-K tile, in // column-major layout, from and back into global memory. // /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Traits> __global__ void cutlass_tile_iterator_load_store_global( float const *input, float *output, int M, int K) { // Define a tile load iterator typedef cutlass::TileLoadIterator< Traits, // the Traits type, defines shape/distribution of accesses float, // elements are of type float cutlass::IteratorAdvance::kH, // post-increment accesses advance in strided (as opposed to // contiguous dimension cutlass::MemorySpace::kGlobal // iterator loads from global memory > TileLoadIterator; // Defines a tile store iterator typedef cutlass::TileStoreIterator< Traits, // the Traits type, defines shape/distribution of accesses float, // elements are of type float cutlass::IteratorAdvance::kH, // post-increment accesses advance in strided (as opposed to // contiguous) dimension cutlass::MemorySpace::kGlobal // iterator stores into global memory > TileStoreIterator; // Defines a predicate vector for managing statically sized vector of boolean predicates typedef typename TileLoadIterator::PredicateVector PredicateVector; // The parameters specified to the iterators. These include the pointer to the source of // addressable memory, and the strides and increments for each of the tile's dimensions typename TileLoadIterator::Params load_params; typename TileStoreIterator::Params store_params; // Initializing the parameters for both of the iterators. The TileLoadIterator accesses the // input matrix and TileStoreIterator accesses the output matrix. The strides are set // identically since the data is being stored in the same way as it is loaded (column-major // mapping). load_params.initialize(input, M*K, M, 1); store_params.initialize(output, M*K, M, 1); // Constructing the tile load and store iterators, and the predicates vector TileLoadIterator load_iterator(load_params); TileStoreIterator store_iterator(store_params); PredicateVector predicates; // Initializing the predicates with bounds set to <1, K, M>. This protects out-of-bounds loads. load_iterator.initialize_predicates(predicates.begin(), cutlass::make_Coord(1, K, M)); // The fragment in which the elements are loaded into and stored from. typename TileLoadIterator::Fragment fragment; // Loading a tile into a fragment and advancing to the next tile's position load_iterator.load_post_increment(fragment, predicates.begin()); // Storing a tile from fragment and advancing to the next tile's position store_iterator.store_post_increment(fragment); } /////////////////////////////////////////////////////////////////////////////////////////////////// // Launches cutlass_tile_iterator_load_store_global kernel hipError_t test_cutlass_tile_iterator() { hipError_t result = hipSuccess; // Creating a M-by-K (128-by-8) tile for this example. static int const M = 128; static int const K = 8; // The kernel is launched with 128 threads per thread block. static int const kThreadsPerThreadBlock = 128; // Define the tile type typedef cutlass::Shape<1, 8, 128> Tile; // CUTLASS provides a standard TileTraits type, which chooses the 'best' shape to enable warp // raking along the contiguous dimension if possible. typedef cutlass::TileTraitsStandard<Tile, kThreadsPerThreadBlock> Traits; // M-by-K input matrix of float cutlass::HostMatrix<float> input(cutlass::MatrixCoord(M, K)); // M-by-K output matrix of float cutlass::HostMatrix<float> output(cutlass::MatrixCoord(M, K)); // // Initialize input matrix with linear combination. // cutlass::Distribution dist; // Linear distribution in column-major format. dist.set_linear(1, 1, M); // Arbitrary RNG seed value. Hard-coded for deterministic results. int seed = 2080; cutlass::reference::device::TensorInitialize( input.device_view(), // concept: TensorView seed, dist); // Initialize output matrix to all zeroes. output.fill(0); // Launch kernel to load and store tiles from/to global memory. hipLaunchKernelGGL(( cutlass_tile_iterator_load_store_global<Traits>), dim3(dim3(1, 1, 1)), dim3(dim3(kThreadsPerThreadBlock, 1)) , 0, 0, input.device_data(), output.device_data(), M, K); result = hipDeviceSynchronize(); if (result != hipSuccess) { return result; } // Copy results to host output.sync_host(); // Verify results for(int i = 0; i < M; ++i) { for(int j = 0; j < K; ++j) { if(output.at(cutlass::make_Coord(i, j)) != float(M*j+i+1)){ std::cout << "FAILED: (" << i << ", " << j << ") -- expected: " << (M*j+i+1) << ", actual: " << output.at(cutlass::make_Coord(i, j)) << std::endl; result = hipErrorUnknown; break; } } } return result; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to tile_iterator example. // // usage: // // 04_tile_iterator // int main(int argc, const char *arg[]) { // Properties of CUDA device hipDeviceProp_t device_properties; // Assumne the device id is 0. int device_id = 0; hipError_t result = hipGetDeviceProperties(&device_properties, device_id); if (result != hipSuccess) { std::cerr << "Failed to get device properties: " << hipGetErrorString(result) << std::endl; return -1; } // // Run the CUTLASS tile iterator test. // result = test_cutlass_tile_iterator(); if (result == hipSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == hipSuccess ? 0 : -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////
d37a11bfe992779e908b2f7a4b3256efff10a458.cu
/*************************************************************************************************** * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* This example demonstrates how to use the TileIterator in CUTLASS to load data from addressable memory, and store it back into addressable memory. TileIterator is a core concept in CUTLASS that enables efficient loading and storing of data from and to addressable memory. The TileIterator accepts a TileTraits type, which defines the shape of a tile and the distribution of accesses by individual entities, either threads or others. In this example, a LoadTileIterator is used to load elements from a tile in global memory, stored in column-major layout, into a fragment, and a corresponding StoreTileIterator is used to store the elements back into global memory (in the same column-major layout). https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/ This example uses CUTLASS utilities to ease the matrix operations. */ // Standard Library includes #include <iostream> #include <sstream> #include <vector> // CUTLASS includes #include "cutlass/tile_iterator.h" #include "cutlass/tile_traits_standard.h" // // CUTLASS utility includes // // Defines operator<<() to write TensorView objects to std::ostream #include "tools/util/tensor_view_io.h" // Defines cutlass::HostMatrix<> #include "tools/util/host_matrix.h" // Defines cutlass::reference::device::TensorInitialize() #include "tools/util/reference/device/tensor_elementwise.h" // Defines cutlass::reference::host::TensorEquals() #include "tools/util/reference/host/tensor_elementwise.h" /////////////////////////////////////////////////////////////////////////////////////////////////// // // This function defines load and store tile iterators to load and store a M-by-K tile, in // column-major layout, from and back into global memory. // /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Traits> __global__ void cutlass_tile_iterator_load_store_global( float const *input, float *output, int M, int K) { // Define a tile load iterator typedef cutlass::TileLoadIterator< Traits, // the Traits type, defines shape/distribution of accesses float, // elements are of type float cutlass::IteratorAdvance::kH, // post-increment accesses advance in strided (as opposed to // contiguous dimension cutlass::MemorySpace::kGlobal // iterator loads from global memory > TileLoadIterator; // Defines a tile store iterator typedef cutlass::TileStoreIterator< Traits, // the Traits type, defines shape/distribution of accesses float, // elements are of type float cutlass::IteratorAdvance::kH, // post-increment accesses advance in strided (as opposed to // contiguous) dimension cutlass::MemorySpace::kGlobal // iterator stores into global memory > TileStoreIterator; // Defines a predicate vector for managing statically sized vector of boolean predicates typedef typename TileLoadIterator::PredicateVector PredicateVector; // The parameters specified to the iterators. These include the pointer to the source of // addressable memory, and the strides and increments for each of the tile's dimensions typename TileLoadIterator::Params load_params; typename TileStoreIterator::Params store_params; // Initializing the parameters for both of the iterators. The TileLoadIterator accesses the // input matrix and TileStoreIterator accesses the output matrix. The strides are set // identically since the data is being stored in the same way as it is loaded (column-major // mapping). load_params.initialize(input, M*K, M, 1); store_params.initialize(output, M*K, M, 1); // Constructing the tile load and store iterators, and the predicates vector TileLoadIterator load_iterator(load_params); TileStoreIterator store_iterator(store_params); PredicateVector predicates; // Initializing the predicates with bounds set to <1, K, M>. This protects out-of-bounds loads. load_iterator.initialize_predicates(predicates.begin(), cutlass::make_Coord(1, K, M)); // The fragment in which the elements are loaded into and stored from. typename TileLoadIterator::Fragment fragment; // Loading a tile into a fragment and advancing to the next tile's position load_iterator.load_post_increment(fragment, predicates.begin()); // Storing a tile from fragment and advancing to the next tile's position store_iterator.store_post_increment(fragment); } /////////////////////////////////////////////////////////////////////////////////////////////////// // Launches cutlass_tile_iterator_load_store_global kernel cudaError_t test_cutlass_tile_iterator() { cudaError_t result = cudaSuccess; // Creating a M-by-K (128-by-8) tile for this example. static int const M = 128; static int const K = 8; // The kernel is launched with 128 threads per thread block. static int const kThreadsPerThreadBlock = 128; // Define the tile type typedef cutlass::Shape<1, 8, 128> Tile; // CUTLASS provides a standard TileTraits type, which chooses the 'best' shape to enable warp // raking along the contiguous dimension if possible. typedef cutlass::TileTraitsStandard<Tile, kThreadsPerThreadBlock> Traits; // M-by-K input matrix of float cutlass::HostMatrix<float> input(cutlass::MatrixCoord(M, K)); // M-by-K output matrix of float cutlass::HostMatrix<float> output(cutlass::MatrixCoord(M, K)); // // Initialize input matrix with linear combination. // cutlass::Distribution dist; // Linear distribution in column-major format. dist.set_linear(1, 1, M); // Arbitrary RNG seed value. Hard-coded for deterministic results. int seed = 2080; cutlass::reference::device::TensorInitialize( input.device_view(), // concept: TensorView seed, dist); // Initialize output matrix to all zeroes. output.fill(0); // Launch kernel to load and store tiles from/to global memory. cutlass_tile_iterator_load_store_global<Traits><<< dim3(1, 1, 1), dim3(kThreadsPerThreadBlock, 1) >>>(input.device_data(), output.device_data(), M, K); result = cudaDeviceSynchronize(); if (result != cudaSuccess) { return result; } // Copy results to host output.sync_host(); // Verify results for(int i = 0; i < M; ++i) { for(int j = 0; j < K; ++j) { if(output.at(cutlass::make_Coord(i, j)) != float(M*j+i+1)){ std::cout << "FAILED: (" << i << ", " << j << ") -- expected: " << (M*j+i+1) << ", actual: " << output.at(cutlass::make_Coord(i, j)) << std::endl; result = cudaErrorUnknown; break; } } } return result; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to tile_iterator example. // // usage: // // 04_tile_iterator // int main(int argc, const char *arg[]) { // Properties of CUDA device cudaDeviceProp device_properties; // Assumne the device id is 0. int device_id = 0; cudaError_t result = cudaGetDeviceProperties(&device_properties, device_id); if (result != cudaSuccess) { std::cerr << "Failed to get device properties: " << cudaGetErrorString(result) << std::endl; return -1; } // // Run the CUTLASS tile iterator test. // result = test_cutlass_tile_iterator(); if (result == cudaSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == cudaSuccess ? 0 : -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////
73767174422d932955b7b8ea9f4322d31b3db140.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <gtest/gtest.h> #include <raft/label/classlabels.cuh> #include <raft/linalg/reduce.cuh> #include <raft/random/rng.cuh> #include <raft/spatial/knn/knn.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <selection/knn.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <iostream> #include <vector> namespace MLCommon { namespace Selection { struct KNNRegressionInputs { int rows; int cols; int n_labels; float cluster_std; int k; }; void generate_data( float* out_samples, float* out_labels, int n_rows, int n_cols, hipStream_t stream) { raft::random::Rng r(0ULL, raft::random::GenPC); r.uniform(out_samples, n_rows * n_cols, 0.0f, 1.0f, stream); raft::linalg::unaryOp<float>( out_samples, out_samples, n_rows, [=] __device__(float input) { return 2 * input - 1; }, stream); raft::linalg::reduce( out_labels, out_samples, n_cols, n_rows, 0.0f, true, true, stream, false, [=] __device__(float in, int n) { return in * in; }, raft::Sum<float>(), [=] __device__(float in) { return sqrt(in); }); thrust::device_ptr<float> d_ptr = thrust::device_pointer_cast(out_labels); float max = *(thrust::max_element(thrust::hip::par.on(stream), d_ptr, d_ptr + n_rows)); raft::linalg::unaryOp<float>( out_labels, out_labels, n_rows, [=] __device__(float input) { return input / max; }, stream); } class KNNRegressionTest : public ::testing::TestWithParam<KNNRegressionInputs> { public: KNNRegressionTest() : params(::testing::TestWithParam<KNNRegressionInputs>::GetParam()), stream(handle.get_stream()), train_samples(params.rows * params.cols, stream), train_labels(params.rows, stream), pred_labels(params.rows, stream), knn_indices(params.rows * params.k, stream), knn_dists(params.rows * params.k, stream) { } protected: void basicTest() { generate_data(train_samples.data(), train_labels.data(), params.rows, params.cols, stream); std::vector<float*> ptrs(1); std::vector<int> sizes(1); ptrs[0] = train_samples.data(); sizes[0] = params.rows; raft::spatial::knn::brute_force_knn(handle, ptrs, sizes, params.cols, train_samples.data(), params.rows, knn_indices.data(), knn_dists.data(), params.k); std::vector<float*> y; y.push_back(train_labels.data()); knn_regress( handle, pred_labels.data(), knn_indices.data(), y, params.rows, params.rows, params.k); handle.sync_stream(stream); } void SetUp() override { basicTest(); } protected: raft::handle_t handle; hipStream_t stream; KNNRegressionInputs params; rmm::device_uvector<float> train_samples; rmm::device_uvector<float> train_labels; rmm::device_uvector<float> pred_labels; rmm::device_uvector<int64_t> knn_indices; rmm::device_uvector<float> knn_dists; }; typedef KNNRegressionTest KNNRegressionTestF; TEST_P(KNNRegressionTestF, Fit) { ASSERT_TRUE(devArrMatch( train_labels.data(), pred_labels.data(), params.rows, MLCommon::CompareApprox<float>(0.3))); } const std::vector<KNNRegressionInputs> inputsf = {{100, 10, 2, 0.01f, 2}, {1000, 10, 5, 0.01f, 2}, {10000, 10, 5, 0.01f, 2}, {100, 10, 2, 0.01f, 10}, {1000, 10, 5, 0.01f, 10}, {10000, 10, 5, 0.01f, 10}, {100, 10, 2, 0.01f, 15}, {1000, 10, 5, 0.01f, 15}, {10000, 10, 5, 0.01f, 15}}; INSTANTIATE_TEST_CASE_P(KNNRegressionTest, KNNRegressionTestF, ::testing::ValuesIn(inputsf)); }; // end namespace Selection }; // namespace MLCommon
73767174422d932955b7b8ea9f4322d31b3db140.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <gtest/gtest.h> #include <raft/label/classlabels.cuh> #include <raft/linalg/reduce.cuh> #include <raft/random/rng.cuh> #include <raft/spatial/knn/knn.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> #include <selection/knn.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/extrema.h> #include <iostream> #include <vector> namespace MLCommon { namespace Selection { struct KNNRegressionInputs { int rows; int cols; int n_labels; float cluster_std; int k; }; void generate_data( float* out_samples, float* out_labels, int n_rows, int n_cols, cudaStream_t stream) { raft::random::Rng r(0ULL, raft::random::GenPC); r.uniform(out_samples, n_rows * n_cols, 0.0f, 1.0f, stream); raft::linalg::unaryOp<float>( out_samples, out_samples, n_rows, [=] __device__(float input) { return 2 * input - 1; }, stream); raft::linalg::reduce( out_labels, out_samples, n_cols, n_rows, 0.0f, true, true, stream, false, [=] __device__(float in, int n) { return in * in; }, raft::Sum<float>(), [=] __device__(float in) { return sqrt(in); }); thrust::device_ptr<float> d_ptr = thrust::device_pointer_cast(out_labels); float max = *(thrust::max_element(thrust::cuda::par.on(stream), d_ptr, d_ptr + n_rows)); raft::linalg::unaryOp<float>( out_labels, out_labels, n_rows, [=] __device__(float input) { return input / max; }, stream); } class KNNRegressionTest : public ::testing::TestWithParam<KNNRegressionInputs> { public: KNNRegressionTest() : params(::testing::TestWithParam<KNNRegressionInputs>::GetParam()), stream(handle.get_stream()), train_samples(params.rows * params.cols, stream), train_labels(params.rows, stream), pred_labels(params.rows, stream), knn_indices(params.rows * params.k, stream), knn_dists(params.rows * params.k, stream) { } protected: void basicTest() { generate_data(train_samples.data(), train_labels.data(), params.rows, params.cols, stream); std::vector<float*> ptrs(1); std::vector<int> sizes(1); ptrs[0] = train_samples.data(); sizes[0] = params.rows; raft::spatial::knn::brute_force_knn(handle, ptrs, sizes, params.cols, train_samples.data(), params.rows, knn_indices.data(), knn_dists.data(), params.k); std::vector<float*> y; y.push_back(train_labels.data()); knn_regress( handle, pred_labels.data(), knn_indices.data(), y, params.rows, params.rows, params.k); handle.sync_stream(stream); } void SetUp() override { basicTest(); } protected: raft::handle_t handle; cudaStream_t stream; KNNRegressionInputs params; rmm::device_uvector<float> train_samples; rmm::device_uvector<float> train_labels; rmm::device_uvector<float> pred_labels; rmm::device_uvector<int64_t> knn_indices; rmm::device_uvector<float> knn_dists; }; typedef KNNRegressionTest KNNRegressionTestF; TEST_P(KNNRegressionTestF, Fit) { ASSERT_TRUE(devArrMatch( train_labels.data(), pred_labels.data(), params.rows, MLCommon::CompareApprox<float>(0.3))); } const std::vector<KNNRegressionInputs> inputsf = {{100, 10, 2, 0.01f, 2}, {1000, 10, 5, 0.01f, 2}, {10000, 10, 5, 0.01f, 2}, {100, 10, 2, 0.01f, 10}, {1000, 10, 5, 0.01f, 10}, {10000, 10, 5, 0.01f, 10}, {100, 10, 2, 0.01f, 15}, {1000, 10, 5, 0.01f, 15}, {10000, 10, 5, 0.01f, 15}}; INSTANTIATE_TEST_CASE_P(KNNRegressionTest, KNNRegressionTestF, ::testing::ValuesIn(inputsf)); }; // end namespace Selection }; // namespace MLCommon
0e54b4b09dcc39adf019a353050f470ee37e67b2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" float PerformParallelSort_WithoutSharedMemory(int n, int A[], int N, int I, int T, float* t) { float totalTime_p; float totalSortingTime; float totalCopyingTime; hipEvent_t* s_evt = new hipEvent_t[n]; hipEvent_t* e_evt = new hipEvent_t[n]; hipEvent_t startEvent_p, endEvent_p; hipEvent_t startCopy, endCopy; hipEvent_t startSort, endSort; hipError_t errorCode; // Creating events for measuring performance hipEventCreate(&startEvent_p); hipEventCreate(&endEvent_p); hipEventCreate(&startCopy); hipEventCreate(&endCopy); hipEventCreate(&startSort); hipEventCreate(&endSort); for (int i = 0; i < n; i++) { hipEventCreate(&s_evt[i]); hipEventCreate(&e_evt[i]); } errorCode = hipMemcpy(dev_a, A, sizeof(int)*N, hipMemcpyHostToDevice); ASSERT(errorCode); hipEventRecord(startEvent_p); int* C = new int[N]; // NumberOfElementsPerBlock = Shared memory size / 2 // Sort<<<NumberOfBlocks, NumberOfThreadsPerBlock, SizeOfSharedMemory>>> // (Source,Destination,NumberOfElementsPerBlock) DeviceProperties deviceProps; hipEventRecord(startSort); for (int i = 0; i < I; i++) { int n = 0; for (int d = 1; d <= (N/2); d *= 2) { hipEventRecord(s_evt[n]); int nThreads = min(T, N / (2*d)); int numberOfBlocks = N / (2*d*nThreads); dim3 nBlocks = GetNBlocks(numberOfBlocks, deviceProps.MaxNumberOfBlocks); hipLaunchKernelGGL(( Sort_NonSharedMemory), dim3(nBlocks),dim3(nThreads), 0, 0, dev_a, dev_c, d); hipDeviceSynchronize(); errorCode = hipGetLastError(); ASSERT(errorCode); // Swap contents of dev_c & dev_a SwapDeviceArrays(); hipEventRecord(e_evt[n]); hipEventSynchronize(e_evt[n]); hipEventElapsedTime(&t[n], s_evt[n], e_evt[n]); n++; } } hipEventRecord(endSort); hipEventSynchronize(endSort); hipEventElapsedTime(&totalSortingTime, startSort, endSort); hipEventRecord(startCopy); errorCode = hipMemcpy(C, dev_a, sizeof(int)*N, hipMemcpyDeviceToHost); hipEventRecord(endCopy); hipEventSynchronize(endCopy); hipEventElapsedTime(&totalCopyingTime, startCopy, endCopy); hipEventRecord(endEvent_p); hipEventSynchronize(endEvent_p); hipEventElapsedTime(&totalTime_p, startEvent_p, endEvent_p); //cout << totalCopyingTime << " [msec] for copying results to host." << endl; delete[] C; delete[] s_evt; delete[] e_evt; return totalTime_p / I; }
0e54b4b09dcc39adf019a353050f470ee37e67b2.cu
float PerformParallelSort_WithoutSharedMemory(int n, int A[], int N, int I, int T, float* t) { float totalTime_p; float totalSortingTime; float totalCopyingTime; cudaEvent_t* s_evt = new cudaEvent_t[n]; cudaEvent_t* e_evt = new cudaEvent_t[n]; cudaEvent_t startEvent_p, endEvent_p; cudaEvent_t startCopy, endCopy; cudaEvent_t startSort, endSort; cudaError_t errorCode; // Creating events for measuring performance cudaEventCreate(&startEvent_p); cudaEventCreate(&endEvent_p); cudaEventCreate(&startCopy); cudaEventCreate(&endCopy); cudaEventCreate(&startSort); cudaEventCreate(&endSort); for (int i = 0; i < n; i++) { cudaEventCreate(&s_evt[i]); cudaEventCreate(&e_evt[i]); } errorCode = cudaMemcpy(dev_a, A, sizeof(int)*N, cudaMemcpyHostToDevice); ASSERT(errorCode); cudaEventRecord(startEvent_p); int* C = new int[N]; // NumberOfElementsPerBlock = Shared memory size / 2 // Sort<<<NumberOfBlocks, NumberOfThreadsPerBlock, SizeOfSharedMemory>>> // (Source,Destination,NumberOfElementsPerBlock) DeviceProperties deviceProps; cudaEventRecord(startSort); for (int i = 0; i < I; i++) { int n = 0; for (int d = 1; d <= (N/2); d *= 2) { cudaEventRecord(s_evt[n]); int nThreads = min(T, N / (2*d)); int numberOfBlocks = N / (2*d*nThreads); dim3 nBlocks = GetNBlocks(numberOfBlocks, deviceProps.MaxNumberOfBlocks); Sort_NonSharedMemory<<<nBlocks,nThreads>>>(dev_a, dev_c, d); cudaThreadSynchronize(); errorCode = cudaGetLastError(); ASSERT(errorCode); // Swap contents of dev_c & dev_a SwapDeviceArrays(); cudaEventRecord(e_evt[n]); cudaEventSynchronize(e_evt[n]); cudaEventElapsedTime(&t[n], s_evt[n], e_evt[n]); n++; } } cudaEventRecord(endSort); cudaEventSynchronize(endSort); cudaEventElapsedTime(&totalSortingTime, startSort, endSort); cudaEventRecord(startCopy); errorCode = cudaMemcpy(C, dev_a, sizeof(int)*N, cudaMemcpyDeviceToHost); cudaEventRecord(endCopy); cudaEventSynchronize(endCopy); cudaEventElapsedTime(&totalCopyingTime, startCopy, endCopy); cudaEventRecord(endEvent_p); cudaEventSynchronize(endEvent_p); cudaEventElapsedTime(&totalTime_p, startEvent_p, endEvent_p); //cout << totalCopyingTime << " [msec] for copying results to host." << endl; delete[] C; delete[] s_evt; delete[] e_evt; return totalTime_p / I; }
50e7d62e4cd0aa954345260793783cace75fd9d1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_fmin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; hipMalloc(&result, XSIZE*YSIZE); double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double *y = NULL; hipMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_fmin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_fmin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_fmin), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
50e7d62e4cd0aa954345260793783cace75fd9d1.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_fmin.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int n = XSIZE*YSIZE; double *result = NULL; cudaMalloc(&result, XSIZE*YSIZE); double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_fmin<<<gridBlock,threadBlock>>>(n,result,x,y); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_fmin<<<gridBlock,threadBlock>>>(n,result,x,y); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_fmin<<<gridBlock,threadBlock>>>(n,result,x,y); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f4fad39159cf8d1a40e71a0708acf7a1dcb107f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ============================================================================ Name : nothing.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> static void CheckCudaErrorAux (const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void reciprocalKernel(float *data, unsigned vectorSize) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < vectorSize) data[idx] = 1.0/data[idx]; } /** * Host function that copies the data and launches the work on GPU */ float *gpuReciprocal(float *data, unsigned size) { float *rc = new float[size]; float *gpuData; CUDA_CHECK_RETURN(hipMalloc((void **)&gpuData, sizeof(float)*size)); CUDA_CHECK_RETURN(hipMemcpy(gpuData, data, sizeof(float)*size, hipMemcpyHostToDevice)); static const int BLOCK_SIZE = 256; const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE; hipLaunchKernelGGL(( reciprocalKernel), dim3(blockCount), dim3(BLOCK_SIZE), 0, 0, gpuData, size); CUDA_CHECK_RETURN(hipMemcpy(rc, gpuData, sizeof(float)*size, hipMemcpyDeviceToHost)); CUDA_CHECK_RETURN(hipFree(gpuData)); return rc; } float *cpuReciprocal(float *data, unsigned size) { float *rc = new float[size]; for (unsigned cnt = 0; cnt < size; ++cnt) rc[cnt] = 1.0/data[cnt]; return rc; } void initialize(float *data, unsigned size) { for (unsigned i = 0; i < size; ++i) data[i] = .5*(i+1); } int main(void) { static const int WORK_SIZE = 65530; float *data = new float[WORK_SIZE]; initialize (data, WORK_SIZE); float *recCpu = cpuReciprocal(data, WORK_SIZE); float *recGpu = gpuReciprocal(data, WORK_SIZE); float cpuSum = std::accumulate (recCpu, recCpu+WORK_SIZE, 0.0); float gpuSum = std::accumulate (recGpu, recGpu+WORK_SIZE, 0.0); /* Verify the results */ std::cout<<"gpuSum = "<<gpuSum<< " cpuSum = " <<cpuSum<<std::endl; /* Free memory */ delete[] data; delete[] recCpu; delete[] recGpu; return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, hipError_t err) { if (err == hipSuccess) return; std::cerr << statement<<" returned " << hipGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
f4fad39159cf8d1a40e71a0708acf7a1dcb107f2.cu
/* ============================================================================ Name : nothing.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <numeric> #include <stdlib.h> static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /** * CUDA kernel that computes reciprocal values for a given vector */ __global__ void reciprocalKernel(float *data, unsigned vectorSize) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < vectorSize) data[idx] = 1.0/data[idx]; } /** * Host function that copies the data and launches the work on GPU */ float *gpuReciprocal(float *data, unsigned size) { float *rc = new float[size]; float *gpuData; CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*size)); CUDA_CHECK_RETURN(cudaMemcpy(gpuData, data, sizeof(float)*size, cudaMemcpyHostToDevice)); static const int BLOCK_SIZE = 256; const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE; reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuData, size); CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuData, sizeof(float)*size, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(gpuData)); return rc; } float *cpuReciprocal(float *data, unsigned size) { float *rc = new float[size]; for (unsigned cnt = 0; cnt < size; ++cnt) rc[cnt] = 1.0/data[cnt]; return rc; } void initialize(float *data, unsigned size) { for (unsigned i = 0; i < size; ++i) data[i] = .5*(i+1); } int main(void) { static const int WORK_SIZE = 65530; float *data = new float[WORK_SIZE]; initialize (data, WORK_SIZE); float *recCpu = cpuReciprocal(data, WORK_SIZE); float *recGpu = gpuReciprocal(data, WORK_SIZE); float cpuSum = std::accumulate (recCpu, recCpu+WORK_SIZE, 0.0); float gpuSum = std::accumulate (recGpu, recGpu+WORK_SIZE, 0.0); /* Verify the results */ std::cout<<"gpuSum = "<<gpuSum<< " cpuSum = " <<cpuSum<<std::endl; /* Free memory */ delete[] data; delete[] recCpu; delete[] recGpu; return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
6e59f5b79084ddcc77137fc9e4a3397d4f389e0a.hip
// !!! This is a file automatically generated by hipify!!! #define FP float #define BDIM 512 #define MAX_ITERATION 1500 #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <math.h> // handle vector operations, c = a + ceof * b __global__ void gpu_vecop(FP *a, FP *b, FP *c, FP coef, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) c[index] = a[index] + coef * b[index]; } // handle dop product, res = dot(a, b) __global__ void gpu_vecdot(FP *a, FP *b, FP *c, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) { c[index] = a[index] * b[index]; } } // handle assignment, a = b; __global__ void gpu_vecassign(FP *a, FP *b, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) a[index] = b[index]; } // reset vector, a = 0 __global__ void gpu_vecreset(FP *a, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) a[index] = 0.; } // handle matrix times vector, c = a * b __global__ void gpu_matmulvec(FP *a, int *row, int *col, FP *b, FP *c, int Nz) { int index = threadIdx.x + blockDim.x * blockIdx.x; int start_idx = row[index], end_idx = row[index+1]; for(int i=start_idx; i<end_idx; i++) { c[index] += a[i] * b[col[i]]; } } // use CSR scheme to store the matrix void initialize(FP *A, int *JR, int *JC, FP *b, int n, int N) { // initialize matrix A int index = 0; FP a[] = {-4, 20, -4}; FP t[] = {-1, -4, -1}; for (int grid_row = 0; grid_row < n; grid_row++) { int t0_grid_left = (grid_row - 1) * n, t0_grid_right = grid_row * n; int a_grid_left = grid_row * n, a_grid_right = (grid_row + 1) * n; int t1_grid_left = (grid_row + 1) * n, t1_grid_right = (grid_row + 2) * n; for (int row = grid_row * n, col = (grid_row - 1) * n - 1; row < (grid_row + 1) * n; row++, col++) { JR[row] = index; int base_col = col; //set up t0 if (grid_row != 0) { for (int i = 0; i < 3; i++) { int col_idx = base_col + i; if (t0_grid_left <= col_idx && base_col <= col_idx && col_idx < base_col + n && col_idx < t0_grid_right) { A[index] = t[i]; JC[index] = col_idx; index++; } } } //set up Ak base_col += n; for (int i = 0; i < 3; i++) { int col_idx = base_col + i; if (a_grid_left <= col_idx && base_col <= col_idx && col_idx < base_col + n && col_idx < a_grid_right) { A[index] = a[i]; JC[index] = col_idx; index++; } } base_col += n; //set up t1 if (grid_row != n - 1) { for (int i = 0; i < 3; i++) { int col_idx = base_col + i; if (t1_grid_left <= col_idx && base_col <= col_idx && col_idx < base_col + n && col_idx < t1_grid_right) { A[index] = t[i]; JC[index] = col_idx; index++; } } } } } JR[N] = index; // initialize vector b FP num = 6.0 / (n + 1) / (n + 1); b[(n / 2 - 1) * n + (n / 2 - 1)] = num; b[(n / 2 - 1) * n + (n / 2)] = num; b[(n / 2) * n + (n / 2 - 1)] = num; b[(n / 2) * n + (n / 2)] = num; } void print(FP *a, int n, int m) { for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { printf("%.6e ", a[i*m+j]); } printf("\n"); } } FP reduce(FP *a, int N) { FP res = 0.; for(int i=0; i<N; i++) res += a[i]; return res; } int main(int argc, char *argv[]) { int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int n, N, iter; // matrix dimension int Nz_in_grid, Nz; float rho, prev_rho; int *row, *col, *dev_row, *dev_col; FP *A, *b, *x, *p, *r, *q, *temp; FP *dev_A, *dev_b, *dev_x, *dev_p, *dev_r, *dev_q, *dev_temp; hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = hipGetDeviceCount(&gpucount); if (errorcode == hipErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if ((argc<2) || (argc>3)) { printf("Usage: matmul <matrix dim n> [<dev num>]\n"); exit (-1); } n = atoi(argv[1]); N = n * n; Nz_in_grid = (n - 2) * 3 + 2 * 2; // number of non-zero elements in a grid Nz = (n - 2) * 3 * Nz_in_grid + 2 * 2 * Nz_in_grid; // number of non-zero elements printf("Nz: %d\n", Nz); row = (int *) calloc(N+1, sizeof(int)); col = (int *) calloc(Nz, sizeof(int)); A = (FP *) calloc(Nz, sizeof(FP)); b = (FP *) calloc(N, sizeof(FP)); x = (FP *) calloc(N, sizeof(FP)); p = (FP *) calloc(N, sizeof(FP)); r = (FP *) calloc(N, sizeof(FP)); q = (FP *) calloc(N, sizeof(FP)); temp = (FP *) calloc(N, sizeof(FP)); initialize(A, row, col, b, n, N); if (argc==3) { gpunum = atoi(argv[2]); // Device number if ((gpunum > 2) || (gpunum < 0)) { printf("Error, Device number must be 0, 1, or 2\n"); exit (-1); } } hipSetDevice(gpunum); printf("Using device %d\n",gpunum); // ------------- COMPUTATION DONE ON GPU ---------------------------- hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); printf("Setting up cuda memory\n"); hipMalloc((void**)&dev_A, Nz * sizeof(FP)); // allocate memory on device hipMalloc((void**)&dev_row, (N+1) * sizeof(int)); hipMalloc((void**)&dev_col, Nz * sizeof(int)); hipMalloc((void**)&dev_b, N * sizeof(FP)); hipMalloc((void**)&dev_x, N * sizeof(FP)); hipMalloc((void**)&dev_p, N * sizeof(FP)); hipMalloc((void**)&dev_r, N * sizeof(FP)); hipMalloc((void**)&dev_q, N * sizeof(FP)); hipMalloc((void**)&dev_temp, N * sizeof(FP)); printf("Copy from host to cuda memory\n"); hipMemcpy(dev_A, A , Nz * sizeof(FP), hipMemcpyHostToDevice); hipMemcpy(dev_row, row , (N+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_col, col , Nz * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_b, b , N * sizeof(FP), hipMemcpyHostToDevice); hipMemcpy(dev_x, x , N * sizeof(FP), hipMemcpyHostToDevice); hipMemcpy(dev_p, p , N * sizeof(FP), hipMemcpyHostToDevice); hipMemcpy(dev_r, r , N * sizeof(FP), hipMemcpyHostToDevice); hipMemcpy(dev_q, q , N * sizeof(FP), hipMemcpyHostToDevice); hipMemcpy(dev_temp, temp, N * sizeof(FP), hipMemcpyHostToDevice); // const FP THRESHOLD = sqrt(vecdot(b, b, N)) / 1000000.; hipLaunchKernelGGL(( gpu_vecdot), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_b, dev_b, dev_temp, N); hipMemcpy(temp,dev_temp, N * sizeof(FP), hipMemcpyDeviceToHost); const FP THRESHOLD = sqrt(reduce(temp, N)) / 1000000.; hipLaunchKernelGGL(( gpu_matmulvec), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_A, dev_row, dev_col, dev_x, dev_r, Nz); hipLaunchKernelGGL(( gpu_vecop), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_b, dev_r, dev_r, -1., N); hipLaunchKernelGGL(( gpu_vecdot), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_r, dev_r, dev_temp, N); hipMemcpy(temp,dev_temp, N * sizeof(FP), hipMemcpyDeviceToHost); rho = reduce(temp, N); printf("Stopping tolerance: %.6e\n", THRESHOLD); printf("Initial residuals: %.6e\n", sqrt(rho)); for(iter = 0; iter < MAX_ITERATION && sqrt(rho) >= THRESHOLD; iter++) { prev_rho = rho; hipLaunchKernelGGL(( gpu_vecdot), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_r, dev_r, dev_temp, N); hipMemcpy(temp,dev_temp, N * sizeof(FP), hipMemcpyDeviceToHost); rho = reduce(temp, N); // printf("current residuals: %f\n", sqrt(rho)); if(iter == 0) { hipLaunchKernelGGL(( gpu_vecassign), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_p, dev_r, N); } else { FP beta = rho / prev_rho; // printf("beta: %.6e\n", beta); hipLaunchKernelGGL(( gpu_vecop), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_r, dev_p, dev_p, beta, N); } // q = Ap hipLaunchKernelGGL(( gpu_vecreset), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_q, N); hipLaunchKernelGGL(( gpu_matmulvec), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_A, dev_row, dev_col, dev_p, dev_q, Nz); //a = pTq hipLaunchKernelGGL(( gpu_vecdot), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_p, dev_q, dev_temp, N); hipMemcpy(temp,dev_temp, N * sizeof(FP), hipMemcpyDeviceToHost); FP alpha = reduce(temp, N); // printf("alpha: %.6e\n", alpha); // x = x + ap hipLaunchKernelGGL(( gpu_vecop), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_x, dev_p, dev_x, alpha, N); // vecop(x, p, x, alpha, N); // r = r -aq hipLaunchKernelGGL(( gpu_vecop), dim3(N/BDIM), dim3(BDIM), 0, 0, dev_r, dev_q, dev_r, -alpha, N); // vecop(r, q, r, -alpha, N); } hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop ); printf("#CG iterations: %d\n", iter); printf("Final residuals: %.6e\n", sqrt(rho)); printf("Time to calculate results on GPU: %f seconds.\n", elapsed_time_ms/1000.); // -------------- clean up --------------------------------------- free(A); free(row); free(col); free(b); free(x); free(p); free(r); free(q); hipFree(dev_A); hipFree(dev_row); hipFree(dev_col); hipFree(dev_b); hipFree(dev_x); hipFree(dev_p); hipFree(dev_r); hipFree(dev_q); hipFree(dev_temp); return 0; }
6e59f5b79084ddcc77137fc9e4a3397d4f389e0a.cu
#define FP float #define BDIM 512 #define MAX_ITERATION 1500 #include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> // handle vector operations, c = a + ceof * b __global__ void gpu_vecop(FP *a, FP *b, FP *c, FP coef, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) c[index] = a[index] + coef * b[index]; } // handle dop product, res = dot(a, b) __global__ void gpu_vecdot(FP *a, FP *b, FP *c, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) { c[index] = a[index] * b[index]; } } // handle assignment, a = b; __global__ void gpu_vecassign(FP *a, FP *b, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) a[index] = b[index]; } // reset vector, a = 0 __global__ void gpu_vecreset(FP *a, int N) { int index = threadIdx.x + blockDim.x * blockIdx.x; if(index < N) a[index] = 0.; } // handle matrix times vector, c = a * b __global__ void gpu_matmulvec(FP *a, int *row, int *col, FP *b, FP *c, int Nz) { int index = threadIdx.x + blockDim.x * blockIdx.x; int start_idx = row[index], end_idx = row[index+1]; for(int i=start_idx; i<end_idx; i++) { c[index] += a[i] * b[col[i]]; } } // use CSR scheme to store the matrix void initialize(FP *A, int *JR, int *JC, FP *b, int n, int N) { // initialize matrix A int index = 0; FP a[] = {-4, 20, -4}; FP t[] = {-1, -4, -1}; for (int grid_row = 0; grid_row < n; grid_row++) { int t0_grid_left = (grid_row - 1) * n, t0_grid_right = grid_row * n; int a_grid_left = grid_row * n, a_grid_right = (grid_row + 1) * n; int t1_grid_left = (grid_row + 1) * n, t1_grid_right = (grid_row + 2) * n; for (int row = grid_row * n, col = (grid_row - 1) * n - 1; row < (grid_row + 1) * n; row++, col++) { JR[row] = index; int base_col = col; //set up t0 if (grid_row != 0) { for (int i = 0; i < 3; i++) { int col_idx = base_col + i; if (t0_grid_left <= col_idx && base_col <= col_idx && col_idx < base_col + n && col_idx < t0_grid_right) { A[index] = t[i]; JC[index] = col_idx; index++; } } } //set up Ak base_col += n; for (int i = 0; i < 3; i++) { int col_idx = base_col + i; if (a_grid_left <= col_idx && base_col <= col_idx && col_idx < base_col + n && col_idx < a_grid_right) { A[index] = a[i]; JC[index] = col_idx; index++; } } base_col += n; //set up t1 if (grid_row != n - 1) { for (int i = 0; i < 3; i++) { int col_idx = base_col + i; if (t1_grid_left <= col_idx && base_col <= col_idx && col_idx < base_col + n && col_idx < t1_grid_right) { A[index] = t[i]; JC[index] = col_idx; index++; } } } } } JR[N] = index; // initialize vector b FP num = 6.0 / (n + 1) / (n + 1); b[(n / 2 - 1) * n + (n / 2 - 1)] = num; b[(n / 2 - 1) * n + (n / 2)] = num; b[(n / 2) * n + (n / 2 - 1)] = num; b[(n / 2) * n + (n / 2)] = num; } void print(FP *a, int n, int m) { for(int i=0; i<n; i++) { for(int j=0; j<m; j++) { printf("%.6e ", a[i*m+j]); } printf("\n"); } } FP reduce(FP *a, int N) { FP res = 0.; for(int i=0; i<N; i++) res += a[i]; return res; } int main(int argc, char *argv[]) { int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int n, N, iter; // matrix dimension int Nz_in_grid, Nz; float rho, prev_rho; int *row, *col, *dev_row, *dev_col; FP *A, *b, *x, *p, *r, *q, *temp; FP *dev_A, *dev_b, *dev_x, *dev_p, *dev_r, *dev_q, *dev_temp; cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if ((argc<2) || (argc>3)) { printf("Usage: matmul <matrix dim n> [<dev num>]\n"); exit (-1); } n = atoi(argv[1]); N = n * n; Nz_in_grid = (n - 2) * 3 + 2 * 2; // number of non-zero elements in a grid Nz = (n - 2) * 3 * Nz_in_grid + 2 * 2 * Nz_in_grid; // number of non-zero elements printf("Nz: %d\n", Nz); row = (int *) calloc(N+1, sizeof(int)); col = (int *) calloc(Nz, sizeof(int)); A = (FP *) calloc(Nz, sizeof(FP)); b = (FP *) calloc(N, sizeof(FP)); x = (FP *) calloc(N, sizeof(FP)); p = (FP *) calloc(N, sizeof(FP)); r = (FP *) calloc(N, sizeof(FP)); q = (FP *) calloc(N, sizeof(FP)); temp = (FP *) calloc(N, sizeof(FP)); initialize(A, row, col, b, n, N); if (argc==3) { gpunum = atoi(argv[2]); // Device number if ((gpunum > 2) || (gpunum < 0)) { printf("Error, Device number must be 0, 1, or 2\n"); exit (-1); } } cudaSetDevice(gpunum); printf("Using device %d\n",gpunum); // ------------- COMPUTATION DONE ON GPU ---------------------------- cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); printf("Setting up cuda memory\n"); cudaMalloc((void**)&dev_A, Nz * sizeof(FP)); // allocate memory on device cudaMalloc((void**)&dev_row, (N+1) * sizeof(int)); cudaMalloc((void**)&dev_col, Nz * sizeof(int)); cudaMalloc((void**)&dev_b, N * sizeof(FP)); cudaMalloc((void**)&dev_x, N * sizeof(FP)); cudaMalloc((void**)&dev_p, N * sizeof(FP)); cudaMalloc((void**)&dev_r, N * sizeof(FP)); cudaMalloc((void**)&dev_q, N * sizeof(FP)); cudaMalloc((void**)&dev_temp, N * sizeof(FP)); printf("Copy from host to cuda memory\n"); cudaMemcpy(dev_A, A , Nz * sizeof(FP), cudaMemcpyHostToDevice); cudaMemcpy(dev_row, row , (N+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_col, col , Nz * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b , N * sizeof(FP), cudaMemcpyHostToDevice); cudaMemcpy(dev_x, x , N * sizeof(FP), cudaMemcpyHostToDevice); cudaMemcpy(dev_p, p , N * sizeof(FP), cudaMemcpyHostToDevice); cudaMemcpy(dev_r, r , N * sizeof(FP), cudaMemcpyHostToDevice); cudaMemcpy(dev_q, q , N * sizeof(FP), cudaMemcpyHostToDevice); cudaMemcpy(dev_temp, temp, N * sizeof(FP), cudaMemcpyHostToDevice); // const FP THRESHOLD = sqrt(vecdot(b, b, N)) / 1000000.; gpu_vecdot<<<N/BDIM, BDIM>>>(dev_b, dev_b, dev_temp, N); cudaMemcpy(temp,dev_temp, N * sizeof(FP), cudaMemcpyDeviceToHost); const FP THRESHOLD = sqrt(reduce(temp, N)) / 1000000.; gpu_matmulvec<<<N/BDIM, BDIM>>>(dev_A, dev_row, dev_col, dev_x, dev_r, Nz); gpu_vecop<<<N/BDIM, BDIM>>>(dev_b, dev_r, dev_r, -1., N); gpu_vecdot<<<N/BDIM, BDIM>>>(dev_r, dev_r, dev_temp, N); cudaMemcpy(temp,dev_temp, N * sizeof(FP), cudaMemcpyDeviceToHost); rho = reduce(temp, N); printf("Stopping tolerance: %.6e\n", THRESHOLD); printf("Initial residuals: %.6e\n", sqrt(rho)); for(iter = 0; iter < MAX_ITERATION && sqrt(rho) >= THRESHOLD; iter++) { prev_rho = rho; gpu_vecdot<<<N/BDIM, BDIM>>>(dev_r, dev_r, dev_temp, N); cudaMemcpy(temp,dev_temp, N * sizeof(FP), cudaMemcpyDeviceToHost); rho = reduce(temp, N); // printf("current residuals: %f\n", sqrt(rho)); if(iter == 0) { gpu_vecassign<<<N/BDIM, BDIM>>>(dev_p, dev_r, N); } else { FP beta = rho / prev_rho; // printf("beta: %.6e\n", beta); gpu_vecop<<<N/BDIM, BDIM>>>(dev_r, dev_p, dev_p, beta, N); } // q = Ap gpu_vecreset<<<N/BDIM, BDIM>>>(dev_q, N); gpu_matmulvec<<<N/BDIM, BDIM>>>(dev_A, dev_row, dev_col, dev_p, dev_q, Nz); //a = pTq gpu_vecdot<<<N/BDIM, BDIM>>>(dev_p, dev_q, dev_temp, N); cudaMemcpy(temp,dev_temp, N * sizeof(FP), cudaMemcpyDeviceToHost); FP alpha = reduce(temp, N); // printf("alpha: %.6e\n", alpha); // x = x + ap gpu_vecop<<<N/BDIM, BDIM>>>(dev_x, dev_p, dev_x, alpha, N); // vecop(x, p, x, alpha, N); // r = r -aq gpu_vecop<<<N/BDIM, BDIM>>>(dev_r, dev_q, dev_r, -alpha, N); // vecop(r, q, r, -alpha, N); } cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("#CG iterations: %d\n", iter); printf("Final residuals: %.6e\n", sqrt(rho)); printf("Time to calculate results on GPU: %f seconds.\n", elapsed_time_ms/1000.); // -------------- clean up --------------------------------------- free(A); free(row); free(col); free(b); free(x); free(p); free(r); free(q); cudaFree(dev_A); cudaFree(dev_row); cudaFree(dev_col); cudaFree(dev_b); cudaFree(dev_x); cudaFree(dev_p); cudaFree(dev_r); cudaFree(dev_q); cudaFree(dev_temp); return 0; }
08c57667ab731e514e0e38ec9d8c5358ea93e7f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Inverse Discrete Sine Transform in Column wise (DST three) * DST_III_Column_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DST_III_Column_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DSTIII_Column_Inverse_Kernel_GPUA(double const * const A, double const * const B, double * const C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } __global__ void DSTIII_Column_Inverse_Kernel(double *A, double *B, double *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void CalculateTransform(double * A, double * B, double * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //double * hostComputedC; double * deviceA=0; double * deviceB=0; double * deviceC=0; //hostA = (double *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; // Allocate GPU buffers for three vectors (two input, one output) . //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); hipMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns); hipMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns); hipMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns); hipMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, hipMemcpyHostToDevice); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTIII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize();//To synchronize the device // Copy the results in GPU memory back to the CPU hipMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, hipMemcpyDeviceToHost); C = hostC; hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; hipError_t errCode =hipGetDeviceCount(&nDevices); //int nDevices; //hipGetDeviceCount(&nDevices); if (errCode != hipSuccess){ printf("Error! No CUDA devices found! \n"); return; } /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray const *DCOS; mxGPUArray *B; double const *d_A, *d_DCOS; double *d_B; // mxArray * hostcos; //test // double * hostcos, *pointer; double *pointer; //int N; int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; /* Initialize the MathWorks GPU API. */ mxInitGPU(); if ((nrhs!=1)) { mexErrMsgIdAndTxt(errId, errMsg); } A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numDCOSRows=numDCOSColumns = numARows; numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } // numDCOSRows=numDCOSColumns = numARows; // numCRows = numARows; // // numCColumns = numAColumns; // numDCOSRows=numDCOSColumns=numAColumns; // numCRows = numARows; // numCColumns = numDCOSColumns; mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL); pointer = mxGetPr(COS); for (int i = 0; i < numDCOSRows; i++){ for (int j = 0; j < numDCOSColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; //hostBinv[i * numBColumns + j] = 1; //hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1)); //hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column //hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1))); pointer[i* numDCOSColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numDCOSColumns))*sqrt((2.0 - DELTA(i + 1, numDCOSRows)) / (numDCOSColumns)); //hostB[i + j* numBColumns] = 1; //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); } } // for (int i = 0; i < numDCOSRows; i++){ // for (int j = 0; j < numDCOSColumns; j++){ // //hostB[i * numBColumns + j] = i + j* numAColumns; // //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; // //hostBinv[i * numBColumns + j] = 1; // //hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns); // pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns); // //hostB[i + j* numBColumns] = 1; // // //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); // // // } // } DCOS=mxGPUCreateFromMxArray(COS); // DCOS=mxGPUCreateFromMxArray(hostcos); if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; //(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns); //DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns); DSTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns); // hipError_t err1 = hipPeekAtLastError();//To capture last error in function call //hipDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(DCOS); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numBRows = numBColumns = numARows; numCRows = numARows; numCColumns = numAColumns; // numBRows = numBColumns = numAColumns; // numCRows = numARows; // // numCColumns = numBColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix double * hostB ; // The B matrix /* Initialize the MathWorks GPU API. */ //mxInitGPU(); /* Throw an error if the input is not a GPU array. */ //if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) { //mexErrMsgIdAndTxt(errId, errMsg); //} //hostA = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns); hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns); //const mxArray *G =prhs[0]; // if ((nrhs != 1) || (mxIsGPUArray(G))) { //mexErrMsgIdAndTxt(errId, errMsg); // G = gather(G); // } hostA = (double *)mxGetData(prhs[0]); // hostA = (double *)mxGetData(G); //Inverse Discrete Sine Transform in Columns wise for (int i = 0; i < numBRows; i++){ for (int j = 0; j < numBColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; //hostBinv[i * numBColumns + j] = 1; //hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1)); //hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column //hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1))); hostB[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns)); //hostB[i + j* numBColumns] = 1; //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); } } // for (int i = 0; i < numBRows; i++){ // for (int j = 0; j < numBColumns; j++){ // //hostB[i * numBColumns + j] = i + j* numAColumns; // //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; // //hostBinv[i * numBColumns + j] = 1; // //hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns); // hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns); // //hostB[i + j* numBColumns] = 1; // // //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); // // // } // } //plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL); //hostC = (double*)mxGetData(plhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); // (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); // CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns); //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); free(hostB); } }
08c57667ab731e514e0e38ec9d8c5358ea93e7f0.cu
/* * Inverse Discrete Sine Transform in Column wise (DST three) * DST_III_Column_Inverse * This CUDA code can handle/work with any type of the input mxArrays, * GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array} * gpuArray output, B=DST_III_Column_Inverse(A)=mexFunction(A). * Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London * Wellcome Trust Centre for Neuroimaging * Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm) * Copyright 2018 * Kevin Bronik */ #include "matrix.h" #include "mex.h" #include "gpu/mxGPUArray.h" #define DEFAULT_DIM 32 #define DELTA(i, j) ((i==j)?1:0) const double PI_d = 3.141592653589793238462643383279502884; //pi __global__ void DSTIII_Column_Inverse_Kernel_GPUA(double const * const A, double const * const B, double * const C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } __global__ void DSTIII_Column_Inverse_Kernel(double *A, double *B, double *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double CValue = 0.0; int Row = blockIdx.y*DEFAULT_DIM + threadIdx.y; int Col = blockIdx.x*DEFAULT_DIM + threadIdx.x; for (int k = 0; k < (DEFAULT_DIM + numAColumns - 1) / DEFAULT_DIM; k++) { for (int n = 0; n < DEFAULT_DIM; ++n) if ((k*DEFAULT_DIM + n < numAColumns && Row < numARows) && (k*DEFAULT_DIM + n < numBRows && Col < numBColumns)) CValue += A[Row*numAColumns + k*DEFAULT_DIM + n] * B[(k*DEFAULT_DIM + n)*numBColumns + Col]; } if (Row < numCRows && Col < numCColumns) C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void CalculateTransform(double * A, double * B, double * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { double * hostA = A; // The A matrix double * hostB = B; // The B matrix double * hostC = C; // The output C matrix //double * hostComputedC; double * deviceA=0; double * deviceB=0; double * deviceC=0; //hostA = (double *)malloc(sizeof(float)*numARows*numAColumns); //hostB = (v *)malloc(sizeof(float)*numBRows*numBColumns); // Setting numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; // Allocate GPU buffers for three vectors (two input, one output) . //hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns); //hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns); cudaMalloc((void **)&deviceA, sizeof(double )*numARows*numAColumns); cudaMalloc((void **)&deviceB, sizeof(double )*numBRows*numBColumns); cudaMalloc((void **)&deviceC, sizeof(double )*numCRows*numCColumns); cudaMemcpy(deviceA, hostA, sizeof(double )*numARows*numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeof(double )*numBRows*numBColumns, cudaMemcpyHostToDevice); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; DSTIII_Column_Inverse_Kernel << <dimGrid, dimBlock >> >(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize();//To synchronize the device // Copy the results in GPU memory back to the CPU cudaMemcpy(hostC, deviceC, sizeof(double)*numCRows*numCColumns, cudaMemcpyDeviceToHost); C = hostC; cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { int nDevices; cudaError_t errCode =cudaGetDeviceCount(&nDevices); //int nDevices; //cudaGetDeviceCount(&nDevices); if (errCode != cudaSuccess){ printf("Error! No CUDA devices found! \n"); return; } /// input standard GPUarray if (mxIsGPUArray(prhs[0])) { //mexErrMsgIdAndTxt(errId, errMsg); /* Declare all variables.*/ mxGPUArray const *A; mxGPUArray const *DCOS; mxGPUArray *B; double const *d_A, *d_DCOS; double *d_B; // mxArray * hostcos; //test // double * hostcos, *pointer; double *pointer; //int N; int numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns; char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput"; char const * const errMsg = "Invalid input to MEX file."; /* Initialize the MathWorks GPU API. */ mxInitGPU(); if ((nrhs!=1)) { mexErrMsgIdAndTxt(errId, errMsg); } A = mxGPUCreateFromMxArray(prhs[0]); const mwSize *dims; dims=mxGPUGetDimensions(A); numARows = (int)dims[0]; /* gets number of rows of A */ numAColumns = (int)dims[1]; /* gets number of columns of A */ numDCOSRows=numDCOSColumns = numARows; numCRows = numARows; numCColumns = numAColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } // numDCOSRows=numDCOSColumns = numARows; // numCRows = numARows; // // numCColumns = numAColumns; // numDCOSRows=numDCOSColumns=numAColumns; // numCRows = numARows; // numCColumns = numDCOSColumns; mxArray *COS= mxCreateNumericMatrix(numDCOSRows, numDCOSColumns, mxDOUBLE_CLASS, mxREAL); pointer = mxGetPr(COS); for (int i = 0; i < numDCOSRows; i++){ for (int j = 0; j < numDCOSColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; //hostBinv[i * numBColumns + j] = 1; //hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1)); //hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column //hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1))); pointer[i* numDCOSColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numDCOSColumns))*sqrt((2.0 - DELTA(i + 1, numDCOSRows)) / (numDCOSColumns)); //hostB[i + j* numBColumns] = 1; //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); } } // for (int i = 0; i < numDCOSRows; i++){ // for (int j = 0; j < numDCOSColumns; j++){ // //hostB[i * numBColumns + j] = i + j* numAColumns; // //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; // //hostBinv[i * numBColumns + j] = 1; // //hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns); // pointer[i* numDCOSColumns + j] = cos(((2 * j + 1) / (2.0 * numDCOSColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numDCOSColumns); // //hostB[i + j* numBColumns] = 1; // // //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); // // // } // } DCOS=mxGPUCreateFromMxArray(COS); // DCOS=mxGPUCreateFromMxArray(hostcos); if (mxGPUGetClassID(A) != mxDOUBLE_CLASS) { mexErrMsgIdAndTxt(errId, errMsg); } d_A = (double const *)(mxGPUGetDataReadOnly(A)); d_DCOS=(double const *)(mxGPUGetDataReadOnly(DCOS)); B = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(A), mxGPUGetDimensions(A), mxGPUGetClassID(A), mxGPUGetComplexity(A), MX_GPU_DO_NOT_INITIALIZE); d_B = (double *)(mxGPUGetData(B)); dim3 dimBlock(DEFAULT_DIM, DEFAULT_DIM, 1); dim3 dimGrid; dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x; dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y; //(hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns); //DCTII_Column_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_A, d_DCOS, d_B, numARows, numAColumns, numDCOSRows, numDCOSColumns, numCRows, numCColumns); DSTIII_Column_Inverse_Kernel_GPUA<< <dimGrid, dimBlock >> >(d_DCOS, d_A, d_B, numDCOSRows, numDCOSColumns, numARows, numAColumns, numCRows, numCColumns); // cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call //cudaDeviceSynchronize();//To synchronize the device plhs[0] = mxGPUCreateMxArrayOnGPU(B); mxGPUDestroyGPUArray(A); mxGPUDestroyGPUArray(DCOS); mxGPUDestroyGPUArray(B); } /// input standard array else if (!(mxIsGPUArray(prhs[0]))){ int numARows = (int)mxGetM(prhs[0]); // number of rows in the matrix A int numAColumns = (int)mxGetN(prhs[0]); // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) numBRows = numBColumns = numARows; numCRows = numARows; numCColumns = numAColumns; // numBRows = numBColumns = numAColumns; // numCRows = numARows; // // numCColumns = numBColumns; if (numARows==1) { printf("Attention, this is a row vector, please try Inverse Discrete Sine Transform in row wise \n"); return; } //char const * const errId = "parallel:gpu:DCTTWO:InvalidInput"; //char const * const errMsg = "Invalid input to MEX file."; double * hostA ; // The A matrix double * hostB ; // The B matrix /* Initialize the MathWorks GPU API. */ //mxInitGPU(); /* Throw an error if the input is not a GPU array. */ //if ((nrhs != 1) || !(mxIsGPUArray(prhs[0]))) { //mexErrMsgIdAndTxt(errId, errMsg); //} //hostA = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAx = (double *)malloc(sizeof(double)*numARows*numAColumns); //hostAy = (double *)malloc(sizeof(double)*numARows*numAColumns); hostB = (double *)malloc(sizeof(double)*numBRows*numBColumns); //const mxArray *G =prhs[0]; // if ((nrhs != 1) || (mxIsGPUArray(G))) { //mexErrMsgIdAndTxt(errId, errMsg); // G = gather(G); // } hostA = (double *)mxGetData(prhs[0]); // hostA = (double *)mxGetData(G); //Inverse Discrete Sine Transform in Columns wise for (int i = 0; i < numBRows; i++){ for (int j = 0; j < numBColumns; j++){ //hostB[i * numBColumns + j] = i + j* numAColumns; //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; //hostBinv[i * numBColumns + j] = 1; //hostBinv[i + j* numBColumns] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1)); //hostBinvL[i* numBColumns + j] = sin((((j + 1)*3.14*(i + 1)) / (numBColumns + 1)))*sqrt(2.0 / (numBColumns + 1));//DST I Column //hostBinv[i + j* numBColumns] = sin(((j + 1)*3.14*(i + 0.5)) / (numBColumns))*sqrt(2.0 / (numBColumns))*sqrt(1.0 / (1 + DELTA(numBColumns, j + 1))); hostB[i* numBColumns + j] = sin(((j + 0.5)*PI_d*(i + 1)) / (numBColumns))*sqrt((2.0 - DELTA(i + 1, numBRows)) / (numBColumns)); //hostB[i + j* numBColumns] = 1; //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); } } // for (int i = 0; i < numBRows; i++){ // for (int j = 0; j < numBColumns; j++){ // //hostB[i * numBColumns + j] = i + j* numAColumns; // //hostBinv[i * numBColumns + j] = cosval2x[i + j* numAColumns]; // //hostBinv[i * numBColumns + j] = 1; // //hostBinv[i + j* numBColumns] = cos(((2 * j + 1) / (2.0 * numBColumns))*3.14*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns); // hostB[i* numBColumns + j] = cos(((2 * j + 1) / (2.0 * numBColumns))*PI_d*i)*sqrt(1.0 / (1 + DELTA(i + 1, 1)))*sqrt(2.0 / numBColumns); // //hostB[i + j* numBColumns] = 1; // // //hostBinvL[i* numBColumns + j] = cos(((2 * i + 1) / (2.0 * numBColumns))*3.14*j)*sqrt(1.0 / numBColumns); // // // } // } //plhs[0] = mxCreateNumericMatrix(numARows, numBColumns, mxDOUBLE_CLASS, mxREAL); //hostC = (double*)mxGetData(plhs[0]); plhs[0] = mxCreateNumericMatrix(numCRows, numCColumns, mxDOUBLE_CLASS, mxREAL); double *pointer = mxGetPr(plhs[0]); // (hostL, hostA, hostC, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns); //CalculateTransform(hostA, hostB, hostC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); // CalculateTransform(hostA, hostB, pointer, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); CalculateTransform( hostB, hostA, pointer, numBRows, numBColumns, numARows, numAColumns, numCRows, numCColumns); //memcpy(pointer, hostC, numCRows*numCColumns*sizeof(double)); free(hostB); } }
52cccd51ca4c36e418155160eebd22ecc7b06861.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_helper.h" __global__ void scale_kernel(float* ptr, coord_t size, float a, float b) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = (b - a) * ptr[i] + a; } } __global__ void ones_kernel(float* ptr, coord_t size) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = 1.0f; } } __global__ void assign_kernel(float* ptr, coord_t size, float value) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = value; } } __global__ void reluBackward(float *grad_ptr, const float *input, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = (input[i] > 0.0f) ? grad_ptr[i] : 0; } } __global__ void apply_add(float *data_ptr, const float *replica_ptr, size_t size) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += replica_ptr[i]; } } __global__ void apply_add_with_scale(float *data_ptr, const float *grad_ptr, size_t size, float scale) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += grad_ptr[i] * scale; } } __host__ void updateGAS(float* para_ptr, const float* grad_ptr, size_t replica_size, int num_replica, float learning_rate) { // Step 1: gater gradients to the first replica for (int i = 1; i < num_replica; i++) { const float *replica = grad_ptr + i * replica_size; hipLaunchKernelGGL(( apply_add), dim3(GET_BLOCKS(replica_size)), dim3(CUDA_NUM_THREADS), 0, 0, (float*)grad_ptr, replica, replica_size); } // Step 2: scale the first replica float scale_factor = 1.0f / num_replica * (-learning_rate); hipLaunchKernelGGL(( apply_add_with_scale), dim3(GET_BLOCKS(replica_size)), dim3(CUDA_NUM_THREADS), 0, 0, para_ptr, grad_ptr, replica_size, scale_factor); }
52cccd51ca4c36e418155160eebd22ecc7b06861.cu
#include "cuda_helper.h" __global__ void scale_kernel(float* ptr, coord_t size, float a, float b) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = (b - a) * ptr[i] + a; } } __global__ void ones_kernel(float* ptr, coord_t size) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = 1.0f; } } __global__ void assign_kernel(float* ptr, coord_t size, float value) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = value; } } __global__ void reluBackward(float *grad_ptr, const float *input, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = (input[i] > 0.0f) ? grad_ptr[i] : 0; } } __global__ void apply_add(float *data_ptr, const float *replica_ptr, size_t size) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += replica_ptr[i]; } } __global__ void apply_add_with_scale(float *data_ptr, const float *grad_ptr, size_t size, float scale) { CUDA_KERNEL_LOOP(i, size) { data_ptr[i] += grad_ptr[i] * scale; } } __host__ void updateGAS(float* para_ptr, const float* grad_ptr, size_t replica_size, int num_replica, float learning_rate) { // Step 1: gater gradients to the first replica for (int i = 1; i < num_replica; i++) { const float *replica = grad_ptr + i * replica_size; apply_add<<<GET_BLOCKS(replica_size), CUDA_NUM_THREADS>>>( (float*)grad_ptr, replica, replica_size); } // Step 2: scale the first replica float scale_factor = 1.0f / num_replica * (-learning_rate); apply_add_with_scale<<<GET_BLOCKS(replica_size), CUDA_NUM_THREADS>>>( para_ptr, grad_ptr, replica_size, scale_factor); }
ae34ac93bacd620d8089b90dfd2bffdea70266b1.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/kernels/normalize/normalize_gpu.h" // NOLINT #include "dali/kernels/normalize/normalize_gpu_impl.cuh" // NOLINT #include <gtest/gtest.h> #include <cmath> #include <initializer_list> #include <iostream> #include <random> #include <utility> #include "dali/core/cuda_event.h" #include "dali/kernels/kernel_manager.h" #include "dali/test/device_test.h" #include "dali/test/test_tensors.h" #include "dali/test/tensor_test_utils.h" namespace dali { namespace kernels { template <bool calc_inv_stddev, typename Out, typename In> void RefNormalize( const OutTensorCPU<Out> &out, const InTensorCPU<In> &in, const InTensorCPU<float> &base, const InTensorCPU<float> &scale, float epsilon, float global_scale, float shift, TensorShape<> &data_pos, TensorShape<> &base_pos, TensorShape<> &scale_pos, int dim) { int db = 0, ds = 0; int64_t extent = 0; if (dim < in.dim()) { db = base.shape[dim] > 1 ? 1 : 0; ds = scale.shape[dim] > 1 ? 1 : 0; extent = in.shape[dim]; } if (dim >= in.dim() - 1) { // handles both last dimension and degenerate case Out *optr = out(data_pos); const In *iptr = in(data_pos); const float *sptr = scale(scale_pos); const float *bptr = base(base_pos); for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) { float mul; if (calc_inv_stddev) { float x = sptr[s] * sptr[s] + epsilon; mul = x ? rsqrt(x) * global_scale : 0; } else { mul = sptr[s] * global_scale; } optr[i] = ConvertSat<Out>(std::fma(iptr[i] - bptr[b], mul, shift)); } } else { for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) { data_pos[dim] = i; base_pos[dim] = b; scale_pos[dim] = s; RefNormalize<calc_inv_stddev>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, dim + 1); } } } /** * @brief Reference normalization of a single tensor * * If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis. * * @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which * is subsequently regularized using given epsilon value */ template <typename Out, typename In> void RefNormalize( const OutTensorCPU<Out> &out, const InTensorCPU<In> &in, const InTensorCPU<float> &base, const InTensorCPU<float> &scale, float global_scale, float shift, bool calc_inv_stddev, float epsilon) { TensorShape<> data_pos, base_pos, scale_pos; int D = in.dim(); data_pos.resize(D); base_pos.resize(D); scale_pos.resize(D); if (calc_inv_stddev) { RefNormalize<true>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, 0); } else { RefNormalize<false>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, 0); } } /** * @brief Reference implementation of normalization * * Goes over all input samples and normalizes them using given base and scale tensor lists. * If base/scale TL has 1 element, it is reused for normalization of all samples. * If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis. * * @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which * is subsequently regularized using given epsilon value */ template <typename Out, typename In> void RefNormalize( const OutListCPU<Out> &out, const TensorListView<StorageCPU, In> &in, const InListCPU<float> &base, const InListCPU<float> &scale, float global_scale, float shift, bool calc_inv_stddev = false, float epsilon = 0) { assert(out.shape == in.shape); int N = in.num_samples(); int db = base.num_samples() > 1; int ds = scale.num_samples() > 1; for (int i = 0, b = 0, s = 0; i < N; i++, b += db, s += ds) { RefNormalize<Out, In>(out[i], in[i], base[b], scale[s], global_scale, shift, calc_inv_stddev, epsilon); } } template <typename RNG> TensorListShape<> RandomDataShape(int num_samples, int ndim, int64_t max_volume, uint64_t reduced_axes, bool reduce_batch, RNG &rng) { assert(max_volume >= 1); TensorListShape<> sh; sh.resize(num_samples, ndim); int64_t extent_range = ::ceil(pow(max_volume, 1.0 / ndim)); std::uniform_int_distribution<int64_t> shape_dist(1, extent_range); for (int i = 0; i < num_samples; i++) { auto sample_shape = sh.tensor_shape_span(i); do { for (int d = 0; d < ndim; d++) { // when reducing samples in the batch, the non-reduced extents must be uniform // across all samples sample_shape[d] = reduced_axes & (1 << d) || !reduce_batch || i == 0 ? shape_dist(rng) : sh.tensor_shape_span(0)[d]; } } while (volume(sample_shape) > max_volume); } return sh; } /** * @brief Creates a tensor list which contains a repeated scalar * * If ndim > 0, then the tensor list will contain 1x1x...x1 tensors with given dimensionality */ template <typename T> TensorListView<StorageCPU, T> ScalarTLV(T &scalar, int num_samples, int ndim = 0) { TensorListView<StorageCPU, T> tlv; TensorShape<> ts; ts.resize(ndim); for (int d = 0; d < ndim; d++) ts[d] = 1; tlv.shape = uniform_list_shape(num_samples, ts); tlv.data.resize(num_samples); for (int i = 0 ; i < num_samples; i++) tlv.data[i] = &scalar; return tlv; } template <typename Params> class NormalizeImplGPUTest; template <typename Out, typename In> class NormalizeImplGPUTest<std::pair<Out, In>> : public ::testing::Test { public: // this will test both the top-level pImpl class and the internal implementation class using Kernel = std::conditional_t<std::is_same<Out, In>::value, NormalizeGPU<Out, In>, normalize_impl::NormalizeImplGPU<Out, In, float, float> >; void Init(int num_samples, int ndim, int64_t max_sample_volume, std::initializer_list<int> reduced_axes, bool reduce_batch, bool scalar_base, bool scalar_scale, bool scale_is_stddev) { Init(num_samples, ndim, max_sample_volume, { reduced_axes.begin(), reduced_axes.end() }, reduce_batch, scalar_base, scalar_scale, scale_is_stddev); } void Init(int num_samples, int ndim, int64_t max_sample_volume, span<const int> reduced_axes, bool reduce_batch, bool scalar_base, bool scalar_scale, bool scale_is_stddev) { In lo = 0, hi = 100; use_scalar_base_ = scalar_base; use_scalar_scale_ = scalar_scale; axis_mask_ = to_bit_mask(reduced_axes); reduced_axes_ = { begin(reduced_axes), end(reduced_axes) }; reduce_batch_ = reduce_batch; scale_is_stddev_ = scale_is_stddev; data_shape_ = RandomDataShape(num_samples, ndim, max_sample_volume, axis_mask_, reduce_batch_, rng_); in_.reshape(data_shape_); UniformRandomFill(in_.cpu(), rng_, lo, hi); if (!scalar_base || !scalar_scale) { int param_samples = reduce_batch ? 1 : num_samples; param_shape_.resize(param_samples, ndim); for (int i = 0; i < param_samples; i++) { for (int d = 0; d < ndim; d++) { bool reduced = axis_mask_ & (1 << d); param_shape_.tensor_shape_span(i)[d] = reduced ? 1 : data_shape_.tensor_shape_span(i)[d]; } } } else { param_shape_.resize(1, 0); } auto scale_dist = uniform_distribution(0.1f, 10.0f); if (scalar_scale) { scalar_scale_ = scale_dist(rng_); } else { scale_.reshape(param_shape_); UniformRandomFill(scale_.cpu(), rng_, scale_dist.a(), scale_dist.b()); } if (scalar_base) { scalar_base_ = uniform_distribution(lo, hi)(rng_); } else { base_.reshape(param_shape_); UniformRandomFill(base_.cpu(), rng_, lo, hi); } if (std::is_integral<Out>::value) { global_scale_ = std::exp2f(7 * sizeof(Out)) / hi; // scale to half range if (std::is_unsigned<Out>::value) shift_ = global_scale_; // shift half range up } } void RunTest() { kmgr_.Resize<Kernel>(1, 1); KernelContext ctx; for (int iter = 0; iter < 3; iter++) { auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_, use_scalar_base_, use_scalar_scale_, scale_is_stddev_); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0], data_shape_); out_.reshape(data_shape_); ref_.reshape(data_shape_); Launch(ctx); int param_samples = param_shape_.num_samples(); auto ref_base = use_scalar_base_ ? ScalarTLV(scalar_base_, param_samples, data_shape_.sample_dim()) : base_.cpu(); auto ref_scale = use_scalar_scale_ ? ScalarTLV(scalar_scale_, param_samples, data_shape_.sample_dim()) : scale_.cpu(); RefNormalize(ref_.cpu(), in_.cpu(), ref_base, ref_scale, global_scale_, shift_, scale_is_stddev_, epsilon_); if (scale_is_stddev_ && !std::is_integral<Out>::value) Check(out_.cpu(), ref_.cpu(), EqualEpsRel(1e-6, 1e-6)); else Check(out_.cpu(), ref_.cpu(), EqualUlp(4)); } } void RunPerf() { kmgr_.Resize<Kernel>(1, 1); KernelContext ctx; auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_, use_scalar_base_, use_scalar_scale_, scale_is_stddev_); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0], data_shape_); out_.reshape(data_shape_); CUDAEvent start = CUDAEvent::CreateWithFlags(0); CUDAEvent end = CUDAEvent::CreateWithFlags(0); auto out_gpu = out_.gpu(); CUDA_CALL( hipMemsetAsync(out_gpu.data[0], 0, sizeof(Out) * out_gpu.num_elements(), ctx.gpu.stream)); Launch(ctx); CUDA_CALL(hipEventRecord(start, ctx.gpu.stream)); Launch(ctx); CUDA_CALL(hipEventRecord(end, ctx.gpu.stream)); float time; CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipEventElapsedTime(&time, start, end)); time *= 1e+6f; // convert to nanoseconds int64_t out_size = data_shape_.num_elements() * sizeof(Out); int64_t in_size = data_shape_.num_elements() * sizeof(In); int64_t base_size = scalar_base_ ? 0 : param_shape_.num_elements() * sizeof(float); int64_t scale_size = scalar_scale_ ? 0 : param_shape_.num_elements() * sizeof(float); int64_t data_size = out_size + in_size + base_size + scale_size; std::cerr << "Throughput: " << data_size / time << " GB/s\n"; } void Launch(KernelContext &ctx) { if (use_scalar_base_) { if (use_scalar_scale_) { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scalar_scale_, global_scale_, shift_, epsilon_); } else { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scale_.gpu(), global_scale_, shift_, epsilon_); } } else { if (use_scalar_scale_) { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scalar_scale_, global_scale_, shift_, epsilon_); } else { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scale_.gpu(), global_scale_, shift_, epsilon_); } } } protected: KernelManager kmgr_; TestTensorList<In> in_; TestTensorList<Out> out_; TestTensorList<float> ref_; TestTensorList<float> base_, scale_; TensorListShape<> data_shape_, param_shape_; SmallVector<int, 6> reduced_axes_; uint64_t axis_mask_; bool reduce_batch_ = false; bool use_scalar_base_ = false; bool use_scalar_scale_ = false; bool scale_is_stddev_ = false; float scalar_base_ = 0, scalar_scale_ = 1; float global_scale_ = 1.25f, shift_ = 0.1f, epsilon_ = 0.2f; std::mt19937_64 rng_; }; using NormalizeTestTypes = ::testing::Types< std::pair<int16_t, uint8_t>, std::pair<float, uint16_t>, std::pair<float, float>>; TYPED_TEST_SUITE(NormalizeImplGPUTest, NormalizeTestTypes); TYPED_TEST(NormalizeImplGPUTest, NonScalar) { this->Init(10, 4, 10000, { 1, 3 }, false, false, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarBase) { this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarScale) { this->Init(10, 4, 10000, { 1, 3 }, false, false, true, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, true, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarParams) { this->Init(10, 4, 10000, {}, false, true, true, false); this->RunTest(); this->Init(10, 3, 10000, {}, true, true, true, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, NonScalar_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, false, false, true); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, false, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarBase_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarScale_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, false, true, true); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, true, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarParams_InvStdDev) { this->Init(10, 4, 10000, {}, false, true, true, true); this->RunTest(); this->Init(10, 3, 10000, {}, true, true, true, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D) { this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01) { this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12) { this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams) { this->Init(64, 3, 1<<20, {}, false, true, true, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D_InvStdDev) { this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01_InvStdDev) { this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12_InvStdDev) { this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams_InvStdDev) { this->Init(64, 3, 1<<20, {}, false, true, true, true); this->RunPerf(); } } // namespace kernels } // namespace dali
ae34ac93bacd620d8089b90dfd2bffdea70266b1.cu
// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "dali/kernels/normalize/normalize_gpu.h" // NOLINT #include "dali/kernels/normalize/normalize_gpu_impl.cuh" // NOLINT #include <gtest/gtest.h> #include <cmath> #include <initializer_list> #include <iostream> #include <random> #include <utility> #include "dali/core/cuda_event.h" #include "dali/kernels/kernel_manager.h" #include "dali/test/device_test.h" #include "dali/test/test_tensors.h" #include "dali/test/tensor_test_utils.h" namespace dali { namespace kernels { template <bool calc_inv_stddev, typename Out, typename In> void RefNormalize( const OutTensorCPU<Out> &out, const InTensorCPU<In> &in, const InTensorCPU<float> &base, const InTensorCPU<float> &scale, float epsilon, float global_scale, float shift, TensorShape<> &data_pos, TensorShape<> &base_pos, TensorShape<> &scale_pos, int dim) { int db = 0, ds = 0; int64_t extent = 0; if (dim < in.dim()) { db = base.shape[dim] > 1 ? 1 : 0; ds = scale.shape[dim] > 1 ? 1 : 0; extent = in.shape[dim]; } if (dim >= in.dim() - 1) { // handles both last dimension and degenerate case Out *optr = out(data_pos); const In *iptr = in(data_pos); const float *sptr = scale(scale_pos); const float *bptr = base(base_pos); for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) { float mul; if (calc_inv_stddev) { float x = sptr[s] * sptr[s] + epsilon; mul = x ? rsqrt(x) * global_scale : 0; } else { mul = sptr[s] * global_scale; } optr[i] = ConvertSat<Out>(std::fma(iptr[i] - bptr[b], mul, shift)); } } else { for (int64_t i = 0, b = 0, s = 0; i < extent; i++, b += db, s += ds) { data_pos[dim] = i; base_pos[dim] = b; scale_pos[dim] = s; RefNormalize<calc_inv_stddev>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, dim + 1); } } } /** * @brief Reference normalization of a single tensor * * If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis. * * @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which * is subsequently regularized using given epsilon value */ template <typename Out, typename In> void RefNormalize( const OutTensorCPU<Out> &out, const InTensorCPU<In> &in, const InTensorCPU<float> &base, const InTensorCPU<float> &scale, float global_scale, float shift, bool calc_inv_stddev, float epsilon) { TensorShape<> data_pos, base_pos, scale_pos; int D = in.dim(); data_pos.resize(D); base_pos.resize(D); scale_pos.resize(D); if (calc_inv_stddev) { RefNormalize<true>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, 0); } else { RefNormalize<false>(out, in, base, scale, epsilon, global_scale, shift, data_pos, base_pos, scale_pos, 0); } } /** * @brief Reference implementation of normalization * * Goes over all input samples and normalizes them using given base and scale tensor lists. * If base/scale TL has 1 element, it is reused for normalization of all samples. * If base/scale has an extent of 1 in any given dimension, it's broadcast along this axis. * * @param calc_inv_stddev if true, `scale` is assumed to contain standard deviation, which * is subsequently regularized using given epsilon value */ template <typename Out, typename In> void RefNormalize( const OutListCPU<Out> &out, const TensorListView<StorageCPU, In> &in, const InListCPU<float> &base, const InListCPU<float> &scale, float global_scale, float shift, bool calc_inv_stddev = false, float epsilon = 0) { assert(out.shape == in.shape); int N = in.num_samples(); int db = base.num_samples() > 1; int ds = scale.num_samples() > 1; for (int i = 0, b = 0, s = 0; i < N; i++, b += db, s += ds) { RefNormalize<Out, In>(out[i], in[i], base[b], scale[s], global_scale, shift, calc_inv_stddev, epsilon); } } template <typename RNG> TensorListShape<> RandomDataShape(int num_samples, int ndim, int64_t max_volume, uint64_t reduced_axes, bool reduce_batch, RNG &rng) { assert(max_volume >= 1); TensorListShape<> sh; sh.resize(num_samples, ndim); int64_t extent_range = std::ceil(pow(max_volume, 1.0 / ndim)); std::uniform_int_distribution<int64_t> shape_dist(1, extent_range); for (int i = 0; i < num_samples; i++) { auto sample_shape = sh.tensor_shape_span(i); do { for (int d = 0; d < ndim; d++) { // when reducing samples in the batch, the non-reduced extents must be uniform // across all samples sample_shape[d] = reduced_axes & (1 << d) || !reduce_batch || i == 0 ? shape_dist(rng) : sh.tensor_shape_span(0)[d]; } } while (volume(sample_shape) > max_volume); } return sh; } /** * @brief Creates a tensor list which contains a repeated scalar * * If ndim > 0, then the tensor list will contain 1x1x...x1 tensors with given dimensionality */ template <typename T> TensorListView<StorageCPU, T> ScalarTLV(T &scalar, int num_samples, int ndim = 0) { TensorListView<StorageCPU, T> tlv; TensorShape<> ts; ts.resize(ndim); for (int d = 0; d < ndim; d++) ts[d] = 1; tlv.shape = uniform_list_shape(num_samples, ts); tlv.data.resize(num_samples); for (int i = 0 ; i < num_samples; i++) tlv.data[i] = &scalar; return tlv; } template <typename Params> class NormalizeImplGPUTest; template <typename Out, typename In> class NormalizeImplGPUTest<std::pair<Out, In>> : public ::testing::Test { public: // this will test both the top-level pImpl class and the internal implementation class using Kernel = std::conditional_t<std::is_same<Out, In>::value, NormalizeGPU<Out, In>, normalize_impl::NormalizeImplGPU<Out, In, float, float> >; void Init(int num_samples, int ndim, int64_t max_sample_volume, std::initializer_list<int> reduced_axes, bool reduce_batch, bool scalar_base, bool scalar_scale, bool scale_is_stddev) { Init(num_samples, ndim, max_sample_volume, { reduced_axes.begin(), reduced_axes.end() }, reduce_batch, scalar_base, scalar_scale, scale_is_stddev); } void Init(int num_samples, int ndim, int64_t max_sample_volume, span<const int> reduced_axes, bool reduce_batch, bool scalar_base, bool scalar_scale, bool scale_is_stddev) { In lo = 0, hi = 100; use_scalar_base_ = scalar_base; use_scalar_scale_ = scalar_scale; axis_mask_ = to_bit_mask(reduced_axes); reduced_axes_ = { begin(reduced_axes), end(reduced_axes) }; reduce_batch_ = reduce_batch; scale_is_stddev_ = scale_is_stddev; data_shape_ = RandomDataShape(num_samples, ndim, max_sample_volume, axis_mask_, reduce_batch_, rng_); in_.reshape(data_shape_); UniformRandomFill(in_.cpu(), rng_, lo, hi); if (!scalar_base || !scalar_scale) { int param_samples = reduce_batch ? 1 : num_samples; param_shape_.resize(param_samples, ndim); for (int i = 0; i < param_samples; i++) { for (int d = 0; d < ndim; d++) { bool reduced = axis_mask_ & (1 << d); param_shape_.tensor_shape_span(i)[d] = reduced ? 1 : data_shape_.tensor_shape_span(i)[d]; } } } else { param_shape_.resize(1, 0); } auto scale_dist = uniform_distribution(0.1f, 10.0f); if (scalar_scale) { scalar_scale_ = scale_dist(rng_); } else { scale_.reshape(param_shape_); UniformRandomFill(scale_.cpu(), rng_, scale_dist.a(), scale_dist.b()); } if (scalar_base) { scalar_base_ = uniform_distribution(lo, hi)(rng_); } else { base_.reshape(param_shape_); UniformRandomFill(base_.cpu(), rng_, lo, hi); } if (std::is_integral<Out>::value) { global_scale_ = std::exp2f(7 * sizeof(Out)) / hi; // scale to half range if (std::is_unsigned<Out>::value) shift_ = global_scale_; // shift half range up } } void RunTest() { kmgr_.Resize<Kernel>(1, 1); KernelContext ctx; for (int iter = 0; iter < 3; iter++) { auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_, use_scalar_base_, use_scalar_scale_, scale_is_stddev_); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0], data_shape_); out_.reshape(data_shape_); ref_.reshape(data_shape_); Launch(ctx); int param_samples = param_shape_.num_samples(); auto ref_base = use_scalar_base_ ? ScalarTLV(scalar_base_, param_samples, data_shape_.sample_dim()) : base_.cpu(); auto ref_scale = use_scalar_scale_ ? ScalarTLV(scalar_scale_, param_samples, data_shape_.sample_dim()) : scale_.cpu(); RefNormalize(ref_.cpu(), in_.cpu(), ref_base, ref_scale, global_scale_, shift_, scale_is_stddev_, epsilon_); if (scale_is_stddev_ && !std::is_integral<Out>::value) Check(out_.cpu(), ref_.cpu(), EqualEpsRel(1e-6, 1e-6)); else Check(out_.cpu(), ref_.cpu(), EqualUlp(4)); } } void RunPerf() { kmgr_.Resize<Kernel>(1, 1); KernelContext ctx; auto req = kmgr_.Setup<Kernel>(0, ctx, data_shape_, param_shape_, use_scalar_base_, use_scalar_scale_, scale_is_stddev_); ASSERT_EQ(req.output_shapes.size(), 1u); ASSERT_EQ(req.output_shapes[0], data_shape_); out_.reshape(data_shape_); CUDAEvent start = CUDAEvent::CreateWithFlags(0); CUDAEvent end = CUDAEvent::CreateWithFlags(0); auto out_gpu = out_.gpu(); CUDA_CALL( cudaMemsetAsync(out_gpu.data[0], 0, sizeof(Out) * out_gpu.num_elements(), ctx.gpu.stream)); Launch(ctx); CUDA_CALL(cudaEventRecord(start, ctx.gpu.stream)); Launch(ctx); CUDA_CALL(cudaEventRecord(end, ctx.gpu.stream)); float time; CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaEventElapsedTime(&time, start, end)); time *= 1e+6f; // convert to nanoseconds int64_t out_size = data_shape_.num_elements() * sizeof(Out); int64_t in_size = data_shape_.num_elements() * sizeof(In); int64_t base_size = scalar_base_ ? 0 : param_shape_.num_elements() * sizeof(float); int64_t scale_size = scalar_scale_ ? 0 : param_shape_.num_elements() * sizeof(float); int64_t data_size = out_size + in_size + base_size + scale_size; std::cerr << "Throughput: " << data_size / time << " GB/s\n"; } void Launch(KernelContext &ctx) { if (use_scalar_base_) { if (use_scalar_scale_) { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scalar_scale_, global_scale_, shift_, epsilon_); } else { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), scalar_base_, scale_.gpu(), global_scale_, shift_, epsilon_); } } else { if (use_scalar_scale_) { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scalar_scale_, global_scale_, shift_, epsilon_); } else { kmgr_.Run<Kernel>(0, 0, ctx, out_.gpu(), in_.gpu(), base_.gpu(), scale_.gpu(), global_scale_, shift_, epsilon_); } } } protected: KernelManager kmgr_; TestTensorList<In> in_; TestTensorList<Out> out_; TestTensorList<float> ref_; TestTensorList<float> base_, scale_; TensorListShape<> data_shape_, param_shape_; SmallVector<int, 6> reduced_axes_; uint64_t axis_mask_; bool reduce_batch_ = false; bool use_scalar_base_ = false; bool use_scalar_scale_ = false; bool scale_is_stddev_ = false; float scalar_base_ = 0, scalar_scale_ = 1; float global_scale_ = 1.25f, shift_ = 0.1f, epsilon_ = 0.2f; std::mt19937_64 rng_; }; using NormalizeTestTypes = ::testing::Types< std::pair<int16_t, uint8_t>, std::pair<float, uint16_t>, std::pair<float, float>>; TYPED_TEST_SUITE(NormalizeImplGPUTest, NormalizeTestTypes); TYPED_TEST(NormalizeImplGPUTest, NonScalar) { this->Init(10, 4, 10000, { 1, 3 }, false, false, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarBase) { this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarScale) { this->Init(10, 4, 10000, { 1, 3 }, false, false, true, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, true, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarParams) { this->Init(10, 4, 10000, {}, false, true, true, false); this->RunTest(); this->Init(10, 3, 10000, {}, true, true, true, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, NonScalar_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, false, false, true); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, false, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarBase_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, true, false, false); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, true, false, false); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarScale_InvStdDev) { this->Init(10, 4, 10000, { 1, 3 }, false, false, true, true); this->RunTest(); this->Init(10, 3, 10000, { 0, 2 }, true, false, true, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, ScalarParams_InvStdDev) { this->Init(10, 4, 10000, {}, false, true, true, true); this->RunTest(); this->Init(10, 3, 10000, {}, true, true, true, true); this->RunTest(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D) { this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01) { this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12) { this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams) { this->Init(64, 3, 1<<20, {}, false, true, true, false); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar5D_InvStdDev) { this->Init(64, 5, 1<<20, { 1, 3 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce01_InvStdDev) { this->Init(64, 3, 1<<20, { 0, 1 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_NonScalar3D_Reduce12_InvStdDev) { this->Init(64, 3, 1<<20, { 1, 2 }, false, false, false, true); this->RunPerf(); } TYPED_TEST(NormalizeImplGPUTest, Perf_ScalarParams_InvStdDev) { this->Init(64, 3, 1<<20, {}, false, true, true, true); this->RunPerf(); } } // namespace kernels } // namespace dali
22cee0fc9cc39f4c7218ec9f5eafb034a5f2d968.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/TemporalMaxPooling.cu" #else static inline void THNN_(TemporalMaxPooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices, int kW, int dW) { int dimT = 0; // Temporal dimension int dimF = 1; // Feature dimension int input_w; int input_n; int output_w; int ndims = input->dim(); if (ndims == 3) { dimT = 1; dimF = 2; } THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input, "non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s"); THArgCheck(input->size[dimT] >= kW, 2, "input sequence smaller than kernel size. Got: %d, Expected: %d", input->size[dimT], kW); input_w = input->size[dimT]; input_n = input->size[dimF]; output_w = (input_w - kW) / dW + 1; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndims, dimT, output_w); THCUNN_check_dim_size(state, gradOutput, ndims, dimF, input_n) } if (indices != NULL) { THCUNN_check_dim_size_indices(state, indices, ndims, dimT, output_w); THCUNN_check_dim_size_indices(state, indices, ndims, dimF, input_n); } } void THNN_(TemporalMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int kW, int dW) { int dimT = 0; // Temporal dimension int dimF = 1; // Feature dimension int batch = 1; int input_w; int input_n; int output_w; int nthreads; real *input_data; real *output_data; THCIndex_t *indices_data; THCUNN_assertSameGPU(state, 3, input, output, indices); THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW); if (input->dim() == 3) { dimT = 1; dimF = 2; batch = input->size[0]; } input = THCTensor_(newContiguous)(state, input); input_w = input->size[dimT]; input_n = input->size[dimF]; output_w = (input_w - kW) / dW + 1; if (input->dim() == 2) { THCTensor_(resize2d)(state, output, output_w, input->size[dimF]); THCIndexTensor_(resize2d)(state, indices, output_w, input->size[dimF]); } else { THCTensor_(resize3d)(state, output, batch, output_w, input->size[dimF]); THCIndexTensor_(resize3d)(state, indices, batch, output_w, input->size[dimF]); } input_data = THCTensor_(data)(state, input); output_data = THCTensor_(data)(state, output); indices_data = THCIndexTensor_(data)(state, indices); dim3 blocks(batch); nthreads = (output_w / 32) * 32; if (output_w % 32 > 0) { nthreads += 32; } if (nthreads > TEMPORAL_MAX_POOLING_THREADS) { blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS; if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) { blocks.y += 1; } nthreads = TEMPORAL_MAX_POOLING_THREADS; } dim3 threads(nthreads); hipLaunchKernelGGL(( cunn_TemporalMaxPooling_updateOutputKernel) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) , input_data, output_data, indices_data, input_w, input_n, output_w, kW, dW); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); } void THNN_(TemporalMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices, int kW, int dW) { int dimT = 0; // Temporal dimension int dimF = 1; // Feature dimension int batch = 1; int input_w; int input_n; int output_w; int nthreads; real *gradInput_data; real *gradOutput_data; THCIndex_t *indices_data; THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, indices); THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); if (input->dim() == 3) { dimT = 1; dimF = 2; batch = input->size[0]; } gradOutput = THCTensor_(newContiguous)(state, gradOutput); input_w = input->size[dimT]; input_n = input->size[dimF]; output_w = (input_w - kW) / dW + 1; gradInput_data = THCTensor_(data)(state, gradInput); gradOutput_data = THCTensor_(data)(state, gradOutput); indices_data = THCIndexTensor_(data)(state, indices); dim3 blocks(batch); nthreads = (output_w / 32) * 32; if (output_w % 32 > 0) { nthreads += 32; } if (nthreads > TEMPORAL_MAX_POOLING_THREADS) { blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS; if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) { blocks.y += 1; } nthreads = TEMPORAL_MAX_POOLING_THREADS; } dim3 threads(nthreads); if (kW <= dW) { hipLaunchKernelGGL(( cunn_TemporalMaxPooling_updateGradInputKernel) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) , gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW); } else { hipLaunchKernelGGL(( cunn_TemporalMaxPooling_updateGradInputKernelAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state) , gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW); } THCudaCheck(hipGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
22cee0fc9cc39f4c7218ec9f5eafb034a5f2d968.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/TemporalMaxPooling.cu" #else static inline void THNN_(TemporalMaxPooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCIndexTensor *indices, int kW, int dW) { int dimT = 0; // Temporal dimension int dimF = 1; // Feature dimension int input_w; int input_n; int output_w; int ndims = input->dim(); if (ndims == 3) { dimT = 1; dimF = 2; } THArgCheck(kW > 0, 5, "kernel size should be greater than zero, but got kW: %d", kW); THArgCheck(dW > 0, 6, "stride should be greater than zero, but got dW: %d", dW); THCUNN_argCheck(state, !input->is_empty() && (input->dim() == 2 || input->dim() == 3), 2, input, "non-empty 2D or 3D (batch mode) tensor expected for input, but got: %s"); THArgCheck(input->size[dimT] >= kW, 2, "input sequence smaller than kernel size. Got: %d, Expected: %d", input->size[dimT], kW); input_w = input->size[dimT]; input_n = input->size[dimF]; output_w = (input_w - kW) / dW + 1; if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndims, dimT, output_w); THCUNN_check_dim_size(state, gradOutput, ndims, dimF, input_n) } if (indices != NULL) { THCUNN_check_dim_size_indices(state, indices, ndims, dimT, output_w); THCUNN_check_dim_size_indices(state, indices, ndims, dimF, input_n); } } void THNN_(TemporalMaxPooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *indices, int kW, int dW) { int dimT = 0; // Temporal dimension int dimF = 1; // Feature dimension int batch = 1; int input_w; int input_n; int output_w; int nthreads; real *input_data; real *output_data; THCIndex_t *indices_data; THCUNN_assertSameGPU(state, 3, input, output, indices); THNN_(TemporalMaxPooling_shapeCheck)(state, input, NULL, NULL, kW, dW); if (input->dim() == 3) { dimT = 1; dimF = 2; batch = input->size[0]; } input = THCTensor_(newContiguous)(state, input); input_w = input->size[dimT]; input_n = input->size[dimF]; output_w = (input_w - kW) / dW + 1; if (input->dim() == 2) { THCTensor_(resize2d)(state, output, output_w, input->size[dimF]); THCIndexTensor_(resize2d)(state, indices, output_w, input->size[dimF]); } else { THCTensor_(resize3d)(state, output, batch, output_w, input->size[dimF]); THCIndexTensor_(resize3d)(state, indices, batch, output_w, input->size[dimF]); } input_data = THCTensor_(data)(state, input); output_data = THCTensor_(data)(state, output); indices_data = THCIndexTensor_(data)(state, indices); dim3 blocks(batch); nthreads = (output_w / 32) * 32; if (output_w % 32 > 0) { nthreads += 32; } if (nthreads > TEMPORAL_MAX_POOLING_THREADS) { blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS; if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) { blocks.y += 1; } nthreads = TEMPORAL_MAX_POOLING_THREADS; } dim3 threads(nthreads); cunn_TemporalMaxPooling_updateOutputKernel <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>( input_data, output_data, indices_data, input_w, input_n, output_w, kW, dW); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); } void THNN_(TemporalMaxPooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCIndexTensor *indices, int kW, int dW) { int dimT = 0; // Temporal dimension int dimF = 1; // Feature dimension int batch = 1; int input_w; int input_n; int output_w; int nthreads; real *gradInput_data; real *gradOutput_data; THCIndex_t *indices_data; THCUNN_assertSameGPU(state, 4, input, gradOutput, gradInput, indices); THNN_(TemporalMaxPooling_shapeCheck)(state, input, gradOutput, indices, kW, dW); THCTensor_(resizeAs)(state, gradInput, input); THCTensor_(zero)(state, gradInput); if (input->dim() == 3) { dimT = 1; dimF = 2; batch = input->size[0]; } gradOutput = THCTensor_(newContiguous)(state, gradOutput); input_w = input->size[dimT]; input_n = input->size[dimF]; output_w = (input_w - kW) / dW + 1; gradInput_data = THCTensor_(data)(state, gradInput); gradOutput_data = THCTensor_(data)(state, gradOutput); indices_data = THCIndexTensor_(data)(state, indices); dim3 blocks(batch); nthreads = (output_w / 32) * 32; if (output_w % 32 > 0) { nthreads += 32; } if (nthreads > TEMPORAL_MAX_POOLING_THREADS) { blocks.y = nthreads / TEMPORAL_MAX_POOLING_THREADS; if (nthreads % TEMPORAL_MAX_POOLING_THREADS > 0) { blocks.y += 1; } nthreads = TEMPORAL_MAX_POOLING_THREADS; } dim3 threads(nthreads); if (kW <= dW) { cunn_TemporalMaxPooling_updateGradInputKernel <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>( gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW); } else { cunn_TemporalMaxPooling_updateGradInputKernelAtomic <<< blocks, threads, 0, THCState_getCurrentStream(state) >>>( gradInput_data, gradOutput_data, indices_data, input_w, input_n, output_w, kW, dW); } THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
8c97a72b567e79c66b57c82c8eae75ea68e77eb5.hip
// !!! This is a file automatically generated by hipify!!! // // nvcc list_threads.cu // // basic into to cuda kernel // #include <hip/hip_runtime.h> #include <cstdlib> #include <iostream> using namespace std; __global__ void saveTid(int *tids, int numElements) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < numElements) { tids[tid*2] = blockIdx.x; tids[tid*2+1] = threadIdx.x; } } int main(int argc, char *argv[]) { if(argc < 3) { cout << "missing argument.\nUsage: list_threads <numElements> <numThreads>\n" "try: list_threads 20 5\n"; return -1; } int numElements = atoi(argv[1]); int numThreads = atoi(argv[2]); int *dTids; int threadsPerBlock = numThreads; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n"; hipMalloc(&dTids, sizeof(int)*numElements*2); hipLaunchKernelGGL(( saveTid), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dTids, numElements); int *hTids = new int[numElements * 2]; hipMemcpy(hTids, dTids, sizeof(int) * numElements * 2, hipMemcpyDeviceToHost); for(int i = 0; i < numElements; ++i) { std::cout << i << ": blockId " << hTids[i*2] << ", threadId " << hTids[i*2+1] << "\n"; } delete[] hTids; hipFree(dTids); return 0; }
8c97a72b567e79c66b57c82c8eae75ea68e77eb5.cu
// // nvcc list_threads.cu // // basic into to cuda kernel // #include <cuda_runtime.h> #include <cstdlib> #include <iostream> using namespace std; __global__ void saveTid(int *tids, int numElements) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < numElements) { tids[tid*2] = blockIdx.x; tids[tid*2+1] = threadIdx.x; } } int main(int argc, char *argv[]) { if(argc < 3) { cout << "missing argument.\nUsage: list_threads <numElements> <numThreads>\n" "try: list_threads 20 5\n"; return -1; } int numElements = atoi(argv[1]); int numThreads = atoi(argv[2]); int *dTids; int threadsPerBlock = numThreads; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n"; cudaMalloc(&dTids, sizeof(int)*numElements*2); saveTid<<<blocksPerGrid,threadsPerBlock>>>(dTids, numElements); int *hTids = new int[numElements * 2]; cudaMemcpy(hTids, dTids, sizeof(int) * numElements * 2, cudaMemcpyDeviceToHost); for(int i = 0; i < numElements; ++i) { std::cout << i << ": blockId " << hTids[i*2] << ", threadId " << hTids[i*2+1] << "\n"; } delete[] hTids; cudaFree(dTids); return 0; }
8459e313fd484e2a991e384fcb207661c0bdc3eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/ell_kernels.hpp" #include <array> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/base/types.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/matrix/dense_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/zero_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The ELL matrix format namespace. * * @ingroup ell */ namespace ell { constexpr int default_block_size = 512; // TODO: num_threads_per_core and ratio are parameters should be tuned /** * num_threads_per_core is the oversubscribing parameter. There are * `num_threads_per_core` threads assigned to each physical core. */ constexpr int num_threads_per_core = 4; /** * ratio is the parameter to decide when to use threads to do reduction on each * row. (#cols/#rows > ratio) */ constexpr double ratio = 1e-2; /** * A compile-time list of sub-warp sizes for which the spmv kernels should be * compiled. * 0 is a special case where it uses a sub-warp size of 32 in * combination with atomic_adds. */ using compiled_kernels = syn::value_list<int, 0, 1, 2, 4, 8, 16, 32>; namespace kernel { namespace { template <int subwarp_size, bool atomic, typename ValueType, typename IndexType, typename Closure> __device__ void spmv_kernel(const size_type num_rows, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride, Closure op) { const auto tidx = static_cast<IndexType>(blockDim.x) * blockIdx.x + threadIdx.x; const auto nwarps_per_row = gridDim.x * blockDim.x / num_rows / subwarp_size; const auto x = tidx / subwarp_size / nwarps_per_row; const auto warp_id = tidx / subwarp_size % nwarps_per_row; const auto y_start = tidx % subwarp_size + num_stored_elements_per_row * warp_id / nwarps_per_row; const auto y_end = num_stored_elements_per_row * (warp_id + 1) / nwarps_per_row; if (x < num_rows) { const auto tile_block = group::tiled_partition<subwarp_size>(group::this_thread_block()); ValueType temp = zero<ValueType>(); const auto column_id = blockIdx.y; for (IndexType idx = y_start; idx < y_end; idx += subwarp_size) { const auto ind = x + idx * stride; const auto col_idx = col[ind]; if (col_idx < idx) { break; } else { temp += val[ind] * b[col_idx * b_stride + column_id]; } } const auto answer = reduce( tile_block, temp, [](ValueType x, ValueType y) { return x + y; }); if (tile_block.thread_rank() == 0) { if (atomic) { atomic_add(&(c[x * c_stride + column_id]), op(answer, c[x * c_stride + column_id])); } else { c[x * c_stride + column_id] = op(answer, c[x * c_stride + column_id]); } } } } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride) { spmv_kernel<subwarp_size, atomic>( num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [](const ValueType &x, const ValueType &y) { return x; }); } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const ValueType *__restrict__ alpha, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, const ValueType *__restrict__ beta, ValueType *__restrict__ c, const size_type c_stride) { const ValueType alpha_val = alpha[0]; const ValueType beta_val = beta[0]; // Because the atomic operation changes the values of c during computation, // it can not do the right alpha * a * b + beta * c operation. // Thus, the cuda kernel only computes alpha * a * b when it uses atomic // operation. if (atomic) { spmv_kernel<subwarp_size, atomic>( num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val](const ValueType &x, const ValueType &y) { return alpha_val * x; }); } else { spmv_kernel<subwarp_size, atomic>( num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val, &beta_val](const ValueType &x, const ValueType &y) { return alpha_val * x + beta_val * y; }); } } } // namespace } // namespace kernel namespace { template <int info, typename ValueType, typename IndexType> void abstract_spmv(syn::value_list<int, info>, int nwarps_per_row, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c, const matrix::Dense<ValueType> *alpha = nullptr, const matrix::Dense<ValueType> *beta = nullptr) { const auto nrows = a->get_size()[0]; constexpr int subwarp_size = (info == 0) ? 32 : info; constexpr bool atomic = (info == 0); const dim3 block_size(default_block_size, 1, 1); const dim3 grid_size( ceildiv(nrows * subwarp_size * nwarps_per_row, block_size.x), b->get_size()[1], 1); if (alpha == nullptr && beta == nullptr) { hipLaunchKernelGGL(( kernel::spmv<subwarp_size, atomic>), dim3(grid_size), dim3(block_size), 0, 0, nrows, as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(c->get_values()), c->get_stride()); } else if (alpha != nullptr && beta != nullptr) { hipLaunchKernelGGL(( kernel::spmv<subwarp_size, atomic>), dim3(grid_size), dim3(block_size), 0, 0, nrows, as_cuda_type(alpha->get_const_values()), as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(beta->get_const_values()), as_cuda_type(c->get_values()), c->get_stride()); } else { GKO_KERNEL_NOT_FOUND; } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_abstract_spmv, abstract_spmv); template <typename ValueType, typename IndexType> std::array<int, 3> compute_subwarp_size_and_atomicity( std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a) { int subwarp_size = 1; int atomic = 0; int nwarps_per_row = 1; const auto nrows = a->get_size()[0]; const auto ell_ncols = a->get_num_stored_elements_per_row(); const auto nwarps = exec->get_num_cores_per_sm() / cuda_config::warp_size * exec->get_num_multiprocessor() * num_threads_per_core; // Use multithreads to perform the reduction on each row when the matrix is // wide. // To make every thread have computation, so pick the value which is the // power of 2 less than 32 and is less than or equal to ell_ncols. If the // subwarp_size is 32 and allow more than one warps to work on the same row, // use atomic add to handle the warps write the value into the same // position. The #warps is decided according to the number of warps allowed // on GPU. if (static_cast<double>(ell_ncols) / nrows > ratio) { while (subwarp_size < 32 && (subwarp_size << 1) <= ell_ncols) { subwarp_size <<= 1; } if (subwarp_size == 32) { nwarps_per_row = ::min(ell_ncols / cuda_config::warp_size, nwarps / nrows); nwarps_per_row = ::max(nwarps_per_row, 1); } if (nwarps_per_row > 1) { atomic = 1; } } return {subwarp_size, atomic, nwarps_per_row}; } } // namespace template <typename ValueType, typename IndexType> void spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by 32 threads with atomic operation * for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { zero_array(c->get_num_stored_elements(), c->get_values()); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_ELL_SPMV_KERNEL); template <typename ValueType, typename IndexType> void advanced_spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by 32 threads with atomic operation * for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { dense::scale(exec, beta, c); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c, alpha, beta); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_ADVANCED_SPMV_KERNEL); namespace kernel { template <typename ValueType> __global__ __launch_bounds__(cuda_config::max_block_size) void initialize_zero_dense( size_type num_rows, size_type num_cols, size_type stride, ValueType *__restrict__ result) { const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x; const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y; if (tidx_x < num_cols && tidx_y < num_rows) { result[tidx_y * stride + tidx_x] = zero<ValueType>(); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_dense( size_type num_rows, size_type nnz, size_type source_stride, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, size_type result_stride, ValueType *__restrict__ result) { const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; if (tidx < num_rows) { for (auto col = 0; col < nnz; col++) { result[tidx * result_stride + col_idxs[tidx + col * source_stride]] += values[tidx + col * source_stride]; } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_dense(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *result, const matrix::Ell<ValueType, IndexType> *source) { const auto num_rows = result->get_size()[0]; const auto num_cols = result->get_size()[1]; const auto result_stride = result->get_stride(); const auto col_idxs = source->get_const_col_idxs(); const auto vals = source->get_const_values(); const auto source_stride = source->get_stride(); const dim3 block_size(cuda_config::warp_size, cuda_config::max_block_size / cuda_config::warp_size, 1); const dim3 init_grid_dim(ceildiv(result_stride, block_size.x), ceildiv(num_rows, block_size.y), 1); hipLaunchKernelGGL(( kernel::initialize_zero_dense), dim3(init_grid_dim), dim3(block_size), 0, 0, num_rows, num_cols, result_stride, as_cuda_type(result->get_values())); const auto grid_dim = ceildiv(num_rows, default_block_size); hipLaunchKernelGGL(( kernel::fill_in_dense), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, source->get_num_stored_elements_per_row(), source_stride, as_cuda_type(col_idxs), as_cuda_type(vals), result_stride, as_cuda_type(result->get_values())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_DENSE_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void count_nnz_per_row( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ values, IndexType *__restrict__ result) { constexpr auto warp_size = cuda_config::warp_size; const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; const auto row_idx = tidx / warp_size; if (row_idx < num_rows) { IndexType part_result{}; for (auto i = threadIdx.x % warp_size; i < max_nnz_per_row; i += warp_size) { if (values[stride * i + row_idx] != zero<ValueType>()) { part_result += 1; } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); result[row_idx] = reduce( warp_tile, part_result, [](const size_type &a, const size_type &b) { return a + b; }); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_csr( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ source_values, const IndexType *__restrict__ source_col_idxs, IndexType *__restrict__ result_row_ptrs, IndexType *__restrict__ result_col_idxs, ValueType *__restrict__ result_values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { auto write_to = result_row_ptrs[tidx]; for (auto i = 0; i < max_nnz_per_row; i++) { const auto source_idx = tidx + stride * i; if (source_values[source_idx] != zero<ValueType>()) { result_values[write_to] = source_values[source_idx]; result_col_idxs[write_to] = source_col_idxs[source_idx]; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_csr(std::shared_ptr<const CudaExecutor> exec, matrix::Csr<ValueType, IndexType> *result, const matrix::Ell<ValueType, IndexType> *source) { auto num_rows = result->get_size()[0]; auto row_ptrs = result->get_row_ptrs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); const auto stride = source->get_stride(); const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); constexpr auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block); hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim_nnz), dim3(default_block_size), 0, 0, num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs)); size_type grid_dim = ceildiv(num_rows + 1, default_block_size); auto add_values = Array<IndexType>(exec, grid_dim); hipLaunchKernelGGL(( start_prefix_sum<default_block_size>) , dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_data())); hipLaunchKernelGGL(( finalize_prefix_sum<default_block_size>), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_const_data())); hipLaunchKernelGGL(( kernel::fill_in_csr), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(source->get_const_col_idxs()), as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values)); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_CSR_KERNEL); namespace kernel { __global__ __launch_bounds__(default_block_size) void reduce_nnz( size_type size, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result) { extern __shared__ size_type block_sum[]; reduce_array(size, nnz_per_row, block_sum, [](const size_type &x, const size_type &y) { return x + y; }); if (threadIdx.x == 0) { result[blockIdx.x] = block_sum[0]; } } } // namespace kernel template <typename ValueType, typename IndexType> void count_nonzeros(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); const auto n = ceildiv(num_rows, default_block_size); const size_type grid_dim = (n <= default_block_size) ? n : default_block_size; auto block_results = Array<size_type>(exec, grid_dim); hipLaunchKernelGGL(( kernel::reduce_nnz), dim3(grid_dim), dim3(default_block_size), default_block_size * sizeof(size_type), 0, num_rows, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(block_results.get_data())); auto d_result = Array<size_type>(exec, 1); hipLaunchKernelGGL(( kernel::reduce_nnz), dim3(1), dim3(default_block_size), default_block_size * sizeof(size_type), 0, grid_dim, as_cuda_type(block_results.get_const_data()), as_cuda_type(d_result.get_data())); exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(), result); d_result.clear(); block_results.clear(); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_COUNT_NONZEROS_KERNEL); template <typename ValueType, typename IndexType> void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, Array<size_type> *result) { const auto num_rows = source->get_size()[0]; const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); const auto stride = source->get_stride(); const auto values = source->get_const_values(); const auto warp_size = cuda_config::warp_size; const auto grid_dim = ceildiv(num_rows * warp_size, default_block_size); hipLaunchKernelGGL(( kernel::count_nnz_per_row), dim3(grid_dim), dim3(default_block_size), 0, 0, num_rows, max_nnz_per_row, stride, as_cuda_type(values), as_cuda_type(result->get_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CALCULATE_NONZEROS_PER_ROW_KERNEL); } // namespace ell } // namespace cuda } // namespace kernels } // namespace gko
8459e313fd484e2a991e384fcb207661c0bdc3eb.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2019, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "core/matrix/ell_kernels.hpp" #include <array> #include <ginkgo/core/base/exception_helpers.hpp> #include <ginkgo/core/base/math.hpp> #include <ginkgo/core/base/types.hpp> #include <ginkgo/core/matrix/csr.hpp> #include <ginkgo/core/matrix/dense.hpp> #include "core/matrix/dense_kernels.hpp" #include "core/synthesizer/implementation_selection.hpp" #include "cuda/base/cusparse_bindings.hpp" #include "cuda/base/types.hpp" #include "cuda/components/atomic.cuh" #include "cuda/components/cooperative_groups.cuh" #include "cuda/components/prefix_sum.cuh" #include "cuda/components/reduction.cuh" #include "cuda/components/zero_array.hpp" namespace gko { namespace kernels { namespace cuda { /** * @brief The ELL matrix format namespace. * * @ingroup ell */ namespace ell { constexpr int default_block_size = 512; // TODO: num_threads_per_core and ratio are parameters should be tuned /** * num_threads_per_core is the oversubscribing parameter. There are * `num_threads_per_core` threads assigned to each physical core. */ constexpr int num_threads_per_core = 4; /** * ratio is the parameter to decide when to use threads to do reduction on each * row. (#cols/#rows > ratio) */ constexpr double ratio = 1e-2; /** * A compile-time list of sub-warp sizes for which the spmv kernels should be * compiled. * 0 is a special case where it uses a sub-warp size of 32 in * combination with atomic_adds. */ using compiled_kernels = syn::value_list<int, 0, 1, 2, 4, 8, 16, 32>; namespace kernel { namespace { template <int subwarp_size, bool atomic, typename ValueType, typename IndexType, typename Closure> __device__ void spmv_kernel(const size_type num_rows, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride, Closure op) { const auto tidx = static_cast<IndexType>(blockDim.x) * blockIdx.x + threadIdx.x; const auto nwarps_per_row = gridDim.x * blockDim.x / num_rows / subwarp_size; const auto x = tidx / subwarp_size / nwarps_per_row; const auto warp_id = tidx / subwarp_size % nwarps_per_row; const auto y_start = tidx % subwarp_size + num_stored_elements_per_row * warp_id / nwarps_per_row; const auto y_end = num_stored_elements_per_row * (warp_id + 1) / nwarps_per_row; if (x < num_rows) { const auto tile_block = group::tiled_partition<subwarp_size>(group::this_thread_block()); ValueType temp = zero<ValueType>(); const auto column_id = blockIdx.y; for (IndexType idx = y_start; idx < y_end; idx += subwarp_size) { const auto ind = x + idx * stride; const auto col_idx = col[ind]; if (col_idx < idx) { break; } else { temp += val[ind] * b[col_idx * b_stride + column_id]; } } const auto answer = reduce( tile_block, temp, [](ValueType x, ValueType y) { return x + y; }); if (tile_block.thread_rank() == 0) { if (atomic) { atomic_add(&(c[x * c_stride + column_id]), op(answer, c[x * c_stride + column_id])); } else { c[x * c_stride + column_id] = op(answer, c[x * c_stride + column_id]); } } } } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, ValueType *__restrict__ c, const size_type c_stride) { spmv_kernel<subwarp_size, atomic>( num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [](const ValueType &x, const ValueType &y) { return x; }); } template <int subwarp_size, bool atomic = false, typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void spmv( const size_type num_rows, const ValueType *__restrict__ alpha, const ValueType *__restrict__ val, const IndexType *__restrict__ col, const size_type stride, const size_type num_stored_elements_per_row, const ValueType *__restrict__ b, const size_type b_stride, const ValueType *__restrict__ beta, ValueType *__restrict__ c, const size_type c_stride) { const ValueType alpha_val = alpha[0]; const ValueType beta_val = beta[0]; // Because the atomic operation changes the values of c during computation, // it can not do the right alpha * a * b + beta * c operation. // Thus, the cuda kernel only computes alpha * a * b when it uses atomic // operation. if (atomic) { spmv_kernel<subwarp_size, atomic>( num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val](const ValueType &x, const ValueType &y) { return alpha_val * x; }); } else { spmv_kernel<subwarp_size, atomic>( num_rows, val, col, stride, num_stored_elements_per_row, b, b_stride, c, c_stride, [&alpha_val, &beta_val](const ValueType &x, const ValueType &y) { return alpha_val * x + beta_val * y; }); } } } // namespace } // namespace kernel namespace { template <int info, typename ValueType, typename IndexType> void abstract_spmv(syn::value_list<int, info>, int nwarps_per_row, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c, const matrix::Dense<ValueType> *alpha = nullptr, const matrix::Dense<ValueType> *beta = nullptr) { const auto nrows = a->get_size()[0]; constexpr int subwarp_size = (info == 0) ? 32 : info; constexpr bool atomic = (info == 0); const dim3 block_size(default_block_size, 1, 1); const dim3 grid_size( ceildiv(nrows * subwarp_size * nwarps_per_row, block_size.x), b->get_size()[1], 1); if (alpha == nullptr && beta == nullptr) { kernel::spmv<subwarp_size, atomic><<<grid_size, block_size, 0, 0>>>( nrows, as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(c->get_values()), c->get_stride()); } else if (alpha != nullptr && beta != nullptr) { kernel::spmv<subwarp_size, atomic><<<grid_size, block_size, 0, 0>>>( nrows, as_cuda_type(alpha->get_const_values()), as_cuda_type(a->get_const_values()), a->get_const_col_idxs(), a->get_stride(), a->get_num_stored_elements_per_row(), as_cuda_type(b->get_const_values()), b->get_stride(), as_cuda_type(beta->get_const_values()), as_cuda_type(c->get_values()), c->get_stride()); } else { GKO_KERNEL_NOT_FOUND; } } GKO_ENABLE_IMPLEMENTATION_SELECTION(select_abstract_spmv, abstract_spmv); template <typename ValueType, typename IndexType> std::array<int, 3> compute_subwarp_size_and_atomicity( std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a) { int subwarp_size = 1; int atomic = 0; int nwarps_per_row = 1; const auto nrows = a->get_size()[0]; const auto ell_ncols = a->get_num_stored_elements_per_row(); const auto nwarps = exec->get_num_cores_per_sm() / cuda_config::warp_size * exec->get_num_multiprocessor() * num_threads_per_core; // Use multithreads to perform the reduction on each row when the matrix is // wide. // To make every thread have computation, so pick the value which is the // power of 2 less than 32 and is less than or equal to ell_ncols. If the // subwarp_size is 32 and allow more than one warps to work on the same row, // use atomic add to handle the warps write the value into the same // position. The #warps is decided according to the number of warps allowed // on GPU. if (static_cast<double>(ell_ncols) / nrows > ratio) { while (subwarp_size < 32 && (subwarp_size << 1) <= ell_ncols) { subwarp_size <<= 1; } if (subwarp_size == 32) { nwarps_per_row = std::min(ell_ncols / cuda_config::warp_size, nwarps / nrows); nwarps_per_row = std::max(nwarps_per_row, 1); } if (nwarps_per_row > 1) { atomic = 1; } } return {subwarp_size, atomic, nwarps_per_row}; } } // namespace template <typename ValueType, typename IndexType> void spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by 32 threads with atomic operation * for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { zero_array(c->get_num_stored_elements(), c->get_values()); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE(GKO_DECLARE_ELL_SPMV_KERNEL); template <typename ValueType, typename IndexType> void advanced_spmv(std::shared_ptr<const CudaExecutor> exec, const matrix::Dense<ValueType> *alpha, const matrix::Ell<ValueType, IndexType> *a, const matrix::Dense<ValueType> *b, const matrix::Dense<ValueType> *beta, matrix::Dense<ValueType> *c) { const auto data = compute_subwarp_size_and_atomicity(exec, a); const int subwarp_size = std::get<0>(data); const int atomic = std::get<1>(data); const int nwarps_per_row = std::get<2>(data); /** * info is the parameter for selecting the cuda kernel. * for info == 0, it uses the kernel by 32 threads with atomic operation * for other value, it uses the kernel without atomic_add */ const int info = (!atomic) * subwarp_size; if (atomic) { dense::scale(exec, beta, c); } select_abstract_spmv( compiled_kernels(), [&info](int compiled_info) { return info == compiled_info; }, syn::value_list<int>(), syn::type_list<>(), nwarps_per_row, a, b, c, alpha, beta); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_ADVANCED_SPMV_KERNEL); namespace kernel { template <typename ValueType> __global__ __launch_bounds__(cuda_config::max_block_size) void initialize_zero_dense( size_type num_rows, size_type num_cols, size_type stride, ValueType *__restrict__ result) { const auto tidx_x = threadIdx.x + blockDim.x * blockIdx.x; const auto tidx_y = threadIdx.y + blockDim.y * blockIdx.y; if (tidx_x < num_cols && tidx_y < num_rows) { result[tidx_y * stride + tidx_x] = zero<ValueType>(); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_dense( size_type num_rows, size_type nnz, size_type source_stride, const IndexType *__restrict__ col_idxs, const ValueType *__restrict__ values, size_type result_stride, ValueType *__restrict__ result) { const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; if (tidx < num_rows) { for (auto col = 0; col < nnz; col++) { result[tidx * result_stride + col_idxs[tidx + col * source_stride]] += values[tidx + col * source_stride]; } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_dense(std::shared_ptr<const CudaExecutor> exec, matrix::Dense<ValueType> *result, const matrix::Ell<ValueType, IndexType> *source) { const auto num_rows = result->get_size()[0]; const auto num_cols = result->get_size()[1]; const auto result_stride = result->get_stride(); const auto col_idxs = source->get_const_col_idxs(); const auto vals = source->get_const_values(); const auto source_stride = source->get_stride(); const dim3 block_size(cuda_config::warp_size, cuda_config::max_block_size / cuda_config::warp_size, 1); const dim3 init_grid_dim(ceildiv(result_stride, block_size.x), ceildiv(num_rows, block_size.y), 1); kernel::initialize_zero_dense<<<init_grid_dim, block_size>>>( num_rows, num_cols, result_stride, as_cuda_type(result->get_values())); const auto grid_dim = ceildiv(num_rows, default_block_size); kernel::fill_in_dense<<<grid_dim, default_block_size>>>( num_rows, source->get_num_stored_elements_per_row(), source_stride, as_cuda_type(col_idxs), as_cuda_type(vals), result_stride, as_cuda_type(result->get_values())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_DENSE_KERNEL); namespace kernel { template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void count_nnz_per_row( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ values, IndexType *__restrict__ result) { constexpr auto warp_size = cuda_config::warp_size; const auto tidx = threadIdx.x + blockIdx.x * blockDim.x; const auto row_idx = tidx / warp_size; if (row_idx < num_rows) { IndexType part_result{}; for (auto i = threadIdx.x % warp_size; i < max_nnz_per_row; i += warp_size) { if (values[stride * i + row_idx] != zero<ValueType>()) { part_result += 1; } } auto warp_tile = group::tiled_partition<warp_size>(group::this_thread_block()); result[row_idx] = reduce( warp_tile, part_result, [](const size_type &a, const size_type &b) { return a + b; }); } } template <typename ValueType, typename IndexType> __global__ __launch_bounds__(default_block_size) void fill_in_csr( size_type num_rows, size_type max_nnz_per_row, size_type stride, const ValueType *__restrict__ source_values, const IndexType *__restrict__ source_col_idxs, IndexType *__restrict__ result_row_ptrs, IndexType *__restrict__ result_col_idxs, ValueType *__restrict__ result_values) { const auto tidx = threadIdx.x + blockDim.x * blockIdx.x; if (tidx < num_rows) { auto write_to = result_row_ptrs[tidx]; for (auto i = 0; i < max_nnz_per_row; i++) { const auto source_idx = tidx + stride * i; if (source_values[source_idx] != zero<ValueType>()) { result_values[write_to] = source_values[source_idx]; result_col_idxs[write_to] = source_col_idxs[source_idx]; write_to++; } } } } } // namespace kernel template <typename ValueType, typename IndexType> void convert_to_csr(std::shared_ptr<const CudaExecutor> exec, matrix::Csr<ValueType, IndexType> *result, const matrix::Ell<ValueType, IndexType> *source) { auto num_rows = result->get_size()[0]; auto row_ptrs = result->get_row_ptrs(); auto col_idxs = result->get_col_idxs(); auto values = result->get_values(); const auto stride = source->get_stride(); const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); constexpr auto rows_per_block = ceildiv(default_block_size, cuda_config::warp_size); const auto grid_dim_nnz = ceildiv(source->get_size()[0], rows_per_block); kernel::count_nnz_per_row<<<grid_dim_nnz, default_block_size>>>( num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(row_ptrs)); size_type grid_dim = ceildiv(num_rows + 1, default_block_size); auto add_values = Array<IndexType>(exec, grid_dim); start_prefix_sum<default_block_size> <<<grid_dim, default_block_size>>>(num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_data())); finalize_prefix_sum<default_block_size><<<grid_dim, default_block_size>>>( num_rows + 1, as_cuda_type(row_ptrs), as_cuda_type(add_values.get_const_data())); kernel::fill_in_csr<<<grid_dim, default_block_size>>>( num_rows, max_nnz_per_row, stride, as_cuda_type(source->get_const_values()), as_cuda_type(source->get_const_col_idxs()), as_cuda_type(row_ptrs), as_cuda_type(col_idxs), as_cuda_type(values)); add_values.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CONVERT_TO_CSR_KERNEL); namespace kernel { __global__ __launch_bounds__(default_block_size) void reduce_nnz( size_type size, const size_type *__restrict__ nnz_per_row, size_type *__restrict__ result) { extern __shared__ size_type block_sum[]; reduce_array(size, nnz_per_row, block_sum, [](const size_type &x, const size_type &y) { return x + y; }); if (threadIdx.x == 0) { result[blockIdx.x] = block_sum[0]; } } } // namespace kernel template <typename ValueType, typename IndexType> void count_nonzeros(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, size_type *result) { const auto num_rows = source->get_size()[0]; auto nnz_per_row = Array<size_type>(exec, num_rows); calculate_nonzeros_per_row(exec, source, &nnz_per_row); const auto n = ceildiv(num_rows, default_block_size); const size_type grid_dim = (n <= default_block_size) ? n : default_block_size; auto block_results = Array<size_type>(exec, grid_dim); kernel::reduce_nnz<<<grid_dim, default_block_size, default_block_size * sizeof(size_type)>>>( num_rows, as_cuda_type(nnz_per_row.get_const_data()), as_cuda_type(block_results.get_data())); auto d_result = Array<size_type>(exec, 1); kernel::reduce_nnz<<<1, default_block_size, default_block_size * sizeof(size_type)>>>( grid_dim, as_cuda_type(block_results.get_const_data()), as_cuda_type(d_result.get_data())); exec->get_master()->copy_from(exec.get(), 1, d_result.get_const_data(), result); d_result.clear(); block_results.clear(); nnz_per_row.clear(); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_COUNT_NONZEROS_KERNEL); template <typename ValueType, typename IndexType> void calculate_nonzeros_per_row(std::shared_ptr<const CudaExecutor> exec, const matrix::Ell<ValueType, IndexType> *source, Array<size_type> *result) { const auto num_rows = source->get_size()[0]; const auto max_nnz_per_row = source->get_num_stored_elements_per_row(); const auto stride = source->get_stride(); const auto values = source->get_const_values(); const auto warp_size = cuda_config::warp_size; const auto grid_dim = ceildiv(num_rows * warp_size, default_block_size); kernel::count_nnz_per_row<<<grid_dim, default_block_size>>>( num_rows, max_nnz_per_row, stride, as_cuda_type(values), as_cuda_type(result->get_data())); } GKO_INSTANTIATE_FOR_EACH_VALUE_AND_INDEX_TYPE( GKO_DECLARE_ELL_CALCULATE_NONZEROS_PER_ROW_KERNEL); } // namespace ell } // namespace cuda } // namespace kernels } // namespace gko
840020b0213540d921d9ea70ecc0b33ef58644e6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/edit_distance_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/mixed_vector.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { using phi::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void FillFirstRow(T* dist, const int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N + 1) { dist[idx] = idx; } } template <typename T> __global__ void FillFirstColumn(T* dist, const int M, const int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < M + 1) { dist[idx * (N + 1)] = idx; } } template <typename T> __global__ void Levenshtein(T* dist, const int64_t* x1, const int64_t* x2, const int M, const int N, const int start) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int offset = N; int index = start + idx * offset; int row = index / (N + 1); int col = index % (N + 1); if (row > 0 && col > 0 && row < M + 1 && col < N + 1) { int cost = x1[row - 1] == x2[col - 1] ? 0 : 1; int dels = dist[(row - 1) * (N + 1) + col] + 1; int ins = dist[row * (N + 1) + col - 1] + 1; int subs = dist[(row - 1) * (N + 1) + (col - 1)] + cost; dist[index] = min(dels, min(ins, subs)); } } template <typename T> __global__ void SetOutput( T* out, const T* dist, const int M, const int N, bool normalized) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx == 0) { out[0] = normalized ? dist[M * (N + 1) + N] / N : dist[M * (N + 1) + N]; } } template <typename T, typename Context> void EditDistanceKernel(const Context& ctx, const DenseTensor& hyps, const DenseTensor& refs, const paddle::optional<DenseTensor>& hypslength, const paddle::optional<DenseTensor>& refslength, bool normalized, DenseTensor* sequencenum, DenseTensor* out) { ctx.template Alloc<int64_t>(sequencenum); auto batch_size = hyps.dims()[0]; auto stream = reinterpret_cast<const phi::GPUContext&>(ctx).stream(); phi::Vector<size_t> hyp_lod(batch_size + 1); phi::Vector<size_t> ref_lod(batch_size + 1); bool use_length = hypslength.get_ptr() != nullptr; if (use_length) { DenseTensor hyp_length_cpu; DenseTensor ref_length_cpu; phi::Copy( ctx, *(hypslength.get_ptr()), phi::CPUPlace(), false, &hyp_length_cpu); phi::Copy( ctx, *(refslength.get_ptr()), phi::CPUPlace(), false, &ref_length_cpu); for (auto i = 0; i < batch_size; i++) { hyp_lod[i + 1] = hyp_lod[i] + hyp_length_cpu.data<int64_t>()[i]; ref_lod[i + 1] = ref_lod[i] + ref_length_cpu.data<int64_t>()[i]; } } else { hyp_lod = hyps.lod()[0]; ref_lod = refs.lod()[0]; } if (normalized) { for (size_t i = 1; i < ref_lod.size(); ++i) { PADDLE_ENFORCE_GT( ref_lod[i], ref_lod[i - 1], errors::InvalidArgument("Reference string %d is empty.", i)); } } const size_t num_strs = hyp_lod.size() - 1; phi::funcs::SetConstant<GPUContext, int64_t> set_constant; set_constant(ctx, sequencenum, static_cast<int64_t>(num_strs)); out->Resize({static_cast<int64_t>(num_strs), 1}); ctx.template Alloc<T>(out); auto out_data = out->data<T>(); T distance = 0.0; for (size_t num = 0; num < num_strs; num++) { auto m = static_cast<int64_t>(hyp_lod[num + 1] - hyp_lod[num]); auto n = static_cast<int64_t>(ref_lod[num + 1] - ref_lod[num]); if (m == 0 || n == 0) { distance = ::max(m, n); if (normalized) { distance = distance / n; } memory_utils::Copy(ctx.GetPlace(), out_data + num, CPUPlace(), &distance, sizeof(T), stream); } else { DenseTensor dist_t; dist_t.Resize({m + 1, n + 1}); ctx.template Alloc<T>(&dist_t); auto dist = dist_t.data<T>(); auto hyp_offset = use_length ? num * hyps.dims()[1] : hyp_lod[num]; auto ref_offset = use_length ? num * refs.dims()[1] : ref_lod[num]; auto x1 = hyps.data<int64_t>() + hyp_offset; auto x2 = refs.data<int64_t>() + ref_offset; hipLaunchKernelGGL(( FillFirstColumn<T>), dim3(1 + m / PADDLE_CUDA_NUM_THREADS), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, m, n); hipLaunchKernelGGL(( FillFirstRow<T>), dim3(1 + n / PADDLE_CUDA_NUM_THREADS), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, n); // Compute the elements of distance matrix in the anti-diagonal diretion for (int64_t slice = 2; slice < m + n + 1; ++slice) { int z_m = slice < m + 1 ? 0 : slice - m; int z_n = slice < n + 1 ? 0 : slice - n; int size = slice - (z_m + z_n) + 1; // number of elments in the same // anti-diagonal line to update // the start index at which computes from int start = slice < n + 1 ? slice : (z_n + 1) * (n + 1) - 1; hipLaunchKernelGGL(( Levenshtein<T>), dim3(1 + (size - 1) / PADDLE_CUDA_NUM_THREADS), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, dist, x1, x2, m, n, start); } hipLaunchKernelGGL(( SetOutput<T>), dim3(1), dim3(1), 0, stream, out_data + num, dist, m, n, normalized); } } } } // namespace phi PD_REGISTER_KERNEL( edit_distance, GPU, ALL_LAYOUT, phi::EditDistanceKernel, float) { kernel->OutputAt(0).SetDataType(phi::DataType::INT64); }
840020b0213540d921d9ea70ecc0b33ef58644e6.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/edit_distance_kernel.h" #include <algorithm> #include <vector> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/core/mixed_vector.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace phi { using phi::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void FillFirstRow(T* dist, const int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N + 1) { dist[idx] = idx; } } template <typename T> __global__ void FillFirstColumn(T* dist, const int M, const int N) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < M + 1) { dist[idx * (N + 1)] = idx; } } template <typename T> __global__ void Levenshtein(T* dist, const int64_t* x1, const int64_t* x2, const int M, const int N, const int start) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int offset = N; int index = start + idx * offset; int row = index / (N + 1); int col = index % (N + 1); if (row > 0 && col > 0 && row < M + 1 && col < N + 1) { int cost = x1[row - 1] == x2[col - 1] ? 0 : 1; int dels = dist[(row - 1) * (N + 1) + col] + 1; int ins = dist[row * (N + 1) + col - 1] + 1; int subs = dist[(row - 1) * (N + 1) + (col - 1)] + cost; dist[index] = min(dels, min(ins, subs)); } } template <typename T> __global__ void SetOutput( T* out, const T* dist, const int M, const int N, bool normalized) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx == 0) { out[0] = normalized ? dist[M * (N + 1) + N] / N : dist[M * (N + 1) + N]; } } template <typename T, typename Context> void EditDistanceKernel(const Context& ctx, const DenseTensor& hyps, const DenseTensor& refs, const paddle::optional<DenseTensor>& hypslength, const paddle::optional<DenseTensor>& refslength, bool normalized, DenseTensor* sequencenum, DenseTensor* out) { ctx.template Alloc<int64_t>(sequencenum); auto batch_size = hyps.dims()[0]; auto stream = reinterpret_cast<const phi::GPUContext&>(ctx).stream(); phi::Vector<size_t> hyp_lod(batch_size + 1); phi::Vector<size_t> ref_lod(batch_size + 1); bool use_length = hypslength.get_ptr() != nullptr; if (use_length) { DenseTensor hyp_length_cpu; DenseTensor ref_length_cpu; phi::Copy( ctx, *(hypslength.get_ptr()), phi::CPUPlace(), false, &hyp_length_cpu); phi::Copy( ctx, *(refslength.get_ptr()), phi::CPUPlace(), false, &ref_length_cpu); for (auto i = 0; i < batch_size; i++) { hyp_lod[i + 1] = hyp_lod[i] + hyp_length_cpu.data<int64_t>()[i]; ref_lod[i + 1] = ref_lod[i] + ref_length_cpu.data<int64_t>()[i]; } } else { hyp_lod = hyps.lod()[0]; ref_lod = refs.lod()[0]; } if (normalized) { for (size_t i = 1; i < ref_lod.size(); ++i) { PADDLE_ENFORCE_GT( ref_lod[i], ref_lod[i - 1], errors::InvalidArgument("Reference string %d is empty.", i)); } } const size_t num_strs = hyp_lod.size() - 1; phi::funcs::SetConstant<GPUContext, int64_t> set_constant; set_constant(ctx, sequencenum, static_cast<int64_t>(num_strs)); out->Resize({static_cast<int64_t>(num_strs), 1}); ctx.template Alloc<T>(out); auto out_data = out->data<T>(); T distance = 0.0; for (size_t num = 0; num < num_strs; num++) { auto m = static_cast<int64_t>(hyp_lod[num + 1] - hyp_lod[num]); auto n = static_cast<int64_t>(ref_lod[num + 1] - ref_lod[num]); if (m == 0 || n == 0) { distance = std::max(m, n); if (normalized) { distance = distance / n; } memory_utils::Copy(ctx.GetPlace(), out_data + num, CPUPlace(), &distance, sizeof(T), stream); } else { DenseTensor dist_t; dist_t.Resize({m + 1, n + 1}); ctx.template Alloc<T>(&dist_t); auto dist = dist_t.data<T>(); auto hyp_offset = use_length ? num * hyps.dims()[1] : hyp_lod[num]; auto ref_offset = use_length ? num * refs.dims()[1] : ref_lod[num]; auto x1 = hyps.data<int64_t>() + hyp_offset; auto x2 = refs.data<int64_t>() + ref_offset; FillFirstColumn<T><<<1 + m / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, m, n); FillFirstRow<T><<<1 + n / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, n); // Compute the elements of distance matrix in the anti-diagonal diretion for (int64_t slice = 2; slice < m + n + 1; ++slice) { int z_m = slice < m + 1 ? 0 : slice - m; int z_n = slice < n + 1 ? 0 : slice - n; int size = slice - (z_m + z_n) + 1; // number of elments in the same // anti-diagonal line to update // the start index at which computes from int start = slice < n + 1 ? slice : (z_n + 1) * (n + 1) - 1; Levenshtein<T><<<1 + (size - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>(dist, x1, x2, m, n, start); } SetOutput<T><<<1, 1, 0, stream>>>(out_data + num, dist, m, n, normalized); } } } } // namespace phi PD_REGISTER_KERNEL( edit_distance, GPU, ALL_LAYOUT, phi::EditDistanceKernel, float) { kernel->OutputAt(0).SetDataType(phi::DataType::INT64); }
8ee88cecfe9cf3cf7e687a2a69e3c48824fa5daa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Reference code implementing the box blur filter. Build and execute as follows: make clean && make ./blur_filter size Author: Naga Kandasamy Date created: May 3, 2019 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> /* #define DEBUG */ /* Include the kernel code. */ #include "blur_filter_kernel.cu" #define NUM_THREAD_BLOCKS 240 #define THREAD_BLOCK_SIZE 128 extern "C" void compute_gold (const image_t, image_t); void compute_on_device (const image_t, image_t); int check_results (const float *, const float *, int, float); void print_image (const image_t); image_t allocate_image_on_device (const image_t); void copy_image_to_device(image_t, image_t); void copy_image_from_device(image_t, image_t); int main (int argc, char **argv) { struct timeval start, stop; if (argc < 2) { printf ("Usage: %s size\n", argv[0]); printf ("size: Height of the image. The program assumes size x size image.\n"); exit (EXIT_FAILURE); } /* Allocate memory for the input and output images. */ int size = atoi (argv[1]); printf ("Creating %d x %d images\n", size, size); image_t in, out_gold, out_gpu; in.size = out_gold.size = out_gpu.size = size; in.element = (float *) malloc (sizeof (float) * size * size); out_gold.element = (float *) malloc (sizeof (float) * size * size); out_gpu.element = (float *) malloc (sizeof (float) * size * size); if ((in.element == NULL) || (out_gold.element == NULL) || (out_gpu.element == NULL)) { perror ("Malloc"); exit (EXIT_FAILURE); } /* Poplulate our image with random values between [-0.5 +0.5] */ srand (time (NULL)); for (int i = 0; i < size * size; i++) in.element[i] = rand ()/ (float) RAND_MAX - 0.5; // in.element[i] = 1; /* Calculate the blur on the CPU. The result is stored in out_gold. */ printf ("Calculating blur on the CPU\n"); gettimeofday(&start, NULL); compute_gold (in, out_gold); gettimeofday(&stop, NULL); printf ("Execution time for CPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); #ifdef DEBUG print_image (in); print_image (out_gold); #endif /* Calculate the blur on the GPU. The result is stored in out_gpu. */ printf ("Calculating blur on the GPU\n"); compute_on_device (in, out_gpu); /* Check the CPU and GPU results for correctness. */ printf ("Checking CPU and GPU results\n"); int num_elements = out_gold.size * out_gold.size; float eps = 1e-6; int check = check_results (out_gold.element, out_gpu.element, num_elements, eps); if (check == 1) printf ("TEST PASSED\n"); else printf ("TEST FAILED\n"); /* Free data structures on the host. */ free ((void *) in.element); free ((void *) out_gold.element); free ((void *) out_gpu.element); exit (EXIT_SUCCESS); } /* Calculate the blur on the GPU. */ void compute_on_device (const image_t in, image_t out) { struct timeval start, stop; /* Allocate device memory */ image_t d_in = allocate_image_on_device(in); image_t d_out = allocate_image_on_device(out); /* Copy image to device memory */ copy_image_to_device(d_in, in); /* Set up execution grid */ dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1); dim3 grid(NUM_THREAD_BLOCKS,1); /* Launch kernel */ gettimeofday(&start, NULL); hipLaunchKernelGGL(( blur_filter_kernel), dim3(grid), dim3(thread_block), 0, 0, d_in.element, d_out.element, d_in.size); gettimeofday(&stop, NULL); printf ("Execution time for GPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Check if kernel execution generated an error */ hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf (stderr, "Kernel execution failed: %s\n", hipGetErrorString (err)); exit (EXIT_FAILURE); } // Copy out back over copy_image_from_device(out, d_out); /* Clean up memory on GPU */ hipFree(d_in.element); hipFree(d_out.element); return; } /* Function to check correctness of the results. */ int check_results (const float *pix1, const float *pix2, int num_elements, float eps) { for (int i = 0; i < num_elements; i++) if (fabsf ((pix1[i] - pix2[i])/pix1[i]) > eps) return 0; return 1; } /* Function to print out the image contents. */ void print_image (const image_t img) { for (int i = 0; i < img.size; i++) { for (int j = 0; j < img.size; j++) { float val = img.element[i * img.size + j]; printf ("%0.4f ", val); } printf ("\n"); } printf ("\n"); return; } /* Allocate image on device */ image_t allocate_image_on_device (const image_t im) { image_t im_device = im; int size = (im.size * im.size) * sizeof (float); hipMalloc((void**) &im_device.element, size); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf (stderr, "hipMalloc failed: %s\n", hipGetErrorString (err)); exit (EXIT_FAILURE); } return im_device; } /* Copy from host to device */ void copy_image_to_device(image_t im_device, const image_t im_host) { int size = (im_host.size * im_host.size) * sizeof (float); im_device.size = im_host.size; hipMemcpy(im_device.element, im_host.element, size, hipMemcpyHostToDevice); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf (stderr, "hipMemcpy (h2d) failed: %s\n", hipGetErrorString (err)); exit (EXIT_FAILURE); } } /* Copy from device to host */ void copy_image_from_device(image_t im_host, image_t im_device) { int size = (im_device.size * im_device.size) * sizeof (float); hipMemcpy(im_host.element, im_device.element, size, hipMemcpyDeviceToHost); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf (stderr, "hipMemcpy (d2h) failed: %s\n", hipGetErrorString (err)); exit (EXIT_FAILURE); } }
8ee88cecfe9cf3cf7e687a2a69e3c48824fa5daa.cu
/* Reference code implementing the box blur filter. Build and execute as follows: make clean && make ./blur_filter size Author: Naga Kandasamy Date created: May 3, 2019 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> /* #define DEBUG */ /* Include the kernel code. */ #include "blur_filter_kernel.cu" #define NUM_THREAD_BLOCKS 240 #define THREAD_BLOCK_SIZE 128 extern "C" void compute_gold (const image_t, image_t); void compute_on_device (const image_t, image_t); int check_results (const float *, const float *, int, float); void print_image (const image_t); image_t allocate_image_on_device (const image_t); void copy_image_to_device(image_t, image_t); void copy_image_from_device(image_t, image_t); int main (int argc, char **argv) { struct timeval start, stop; if (argc < 2) { printf ("Usage: %s size\n", argv[0]); printf ("size: Height of the image. The program assumes size x size image.\n"); exit (EXIT_FAILURE); } /* Allocate memory for the input and output images. */ int size = atoi (argv[1]); printf ("Creating %d x %d images\n", size, size); image_t in, out_gold, out_gpu; in.size = out_gold.size = out_gpu.size = size; in.element = (float *) malloc (sizeof (float) * size * size); out_gold.element = (float *) malloc (sizeof (float) * size * size); out_gpu.element = (float *) malloc (sizeof (float) * size * size); if ((in.element == NULL) || (out_gold.element == NULL) || (out_gpu.element == NULL)) { perror ("Malloc"); exit (EXIT_FAILURE); } /* Poplulate our image with random values between [-0.5 +0.5] */ srand (time (NULL)); for (int i = 0; i < size * size; i++) in.element[i] = rand ()/ (float) RAND_MAX - 0.5; // in.element[i] = 1; /* Calculate the blur on the CPU. The result is stored in out_gold. */ printf ("Calculating blur on the CPU\n"); gettimeofday(&start, NULL); compute_gold (in, out_gold); gettimeofday(&stop, NULL); printf ("Execution time for CPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); #ifdef DEBUG print_image (in); print_image (out_gold); #endif /* Calculate the blur on the GPU. The result is stored in out_gpu. */ printf ("Calculating blur on the GPU\n"); compute_on_device (in, out_gpu); /* Check the CPU and GPU results for correctness. */ printf ("Checking CPU and GPU results\n"); int num_elements = out_gold.size * out_gold.size; float eps = 1e-6; int check = check_results (out_gold.element, out_gpu.element, num_elements, eps); if (check == 1) printf ("TEST PASSED\n"); else printf ("TEST FAILED\n"); /* Free data structures on the host. */ free ((void *) in.element); free ((void *) out_gold.element); free ((void *) out_gpu.element); exit (EXIT_SUCCESS); } /* Calculate the blur on the GPU. */ void compute_on_device (const image_t in, image_t out) { struct timeval start, stop; /* Allocate device memory */ image_t d_in = allocate_image_on_device(in); image_t d_out = allocate_image_on_device(out); /* Copy image to device memory */ copy_image_to_device(d_in, in); /* Set up execution grid */ dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1); dim3 grid(NUM_THREAD_BLOCKS,1); /* Launch kernel */ gettimeofday(&start, NULL); blur_filter_kernel<<<grid, thread_block>>>(d_in.element, d_out.element, d_in.size); gettimeofday(&stop, NULL); printf ("Execution time for GPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Check if kernel execution generated an error */ cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf (stderr, "Kernel execution failed: %s\n", cudaGetErrorString (err)); exit (EXIT_FAILURE); } // Copy out back over copy_image_from_device(out, d_out); /* Clean up memory on GPU */ cudaFree(d_in.element); cudaFree(d_out.element); return; } /* Function to check correctness of the results. */ int check_results (const float *pix1, const float *pix2, int num_elements, float eps) { for (int i = 0; i < num_elements; i++) if (fabsf ((pix1[i] - pix2[i])/pix1[i]) > eps) return 0; return 1; } /* Function to print out the image contents. */ void print_image (const image_t img) { for (int i = 0; i < img.size; i++) { for (int j = 0; j < img.size; j++) { float val = img.element[i * img.size + j]; printf ("%0.4f ", val); } printf ("\n"); } printf ("\n"); return; } /* Allocate image on device */ image_t allocate_image_on_device (const image_t im) { image_t im_device = im; int size = (im.size * im.size) * sizeof (float); cudaMalloc((void**) &im_device.element, size); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf (stderr, "cudaMalloc failed: %s\n", cudaGetErrorString (err)); exit (EXIT_FAILURE); } return im_device; } /* Copy from host to device */ void copy_image_to_device(image_t im_device, const image_t im_host) { int size = (im_host.size * im_host.size) * sizeof (float); im_device.size = im_host.size; cudaMemcpy(im_device.element, im_host.element, size, cudaMemcpyHostToDevice); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf (stderr, "cudaMemcpy (h2d) failed: %s\n", cudaGetErrorString (err)); exit (EXIT_FAILURE); } } /* Copy from device to host */ void copy_image_from_device(image_t im_host, image_t im_device) { int size = (im_device.size * im_device.size) * sizeof (float); cudaMemcpy(im_host.element, im_device.element, size, cudaMemcpyDeviceToHost); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf (stderr, "cudaMemcpy (d2h) failed: %s\n", cudaGetErrorString (err)); exit (EXIT_FAILURE); } }
85d8b48ea283110d52c8b5a56e0a290f3076207b.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "LstmInstance.h" #include "Global.h" #include "CudnnDescriptors.h" namespace cytonLib { Variable* LstmInstance::init(string tag_, LstmCell* cell_, Variable* x_, Variable* hx_, Variable *cx_) { tag=tag_; cell=cell_; x=x_; hx=hx_; cx=cx_; batchSize=cell->batchSize; inputSize=cell->inputSize; hiddenSize=cell->hiddenSize; numLayers=cell->numLayers; seqLen=1; assert(x->n==batchSize && x->c == inputSize); assert(hx->n==numLayers && hx->c==batchSize && hx->h == hiddenSize); assert(cx->n==numLayers && cx->c==batchSize && cx->h == hiddenSize); y.resize(batchSize, hiddenSize, 1, 1); hy.resize(numLayers, batchSize, hiddenSize, 1); cy.resize(numLayers, batchSize, hiddenSize, 1); CudnnDescriptors::createNdDesc(xDesc, x->n, x->c, x->h); CudnnDescriptors::createNdDesc(yDesc, y.n, y.c, y.h); CudnnDescriptors::createNdDesc(hDesc, hx->n, hx->c, hx->h); CudnnDescriptors::createNdDesc(cDesc, cx->n, cx->c, cx->h); // Only needed in training, shouldn't be touched between passes. size_t reserveSize; checkError(cudnnGetRNNTrainingReserveSize(global.cudnnHandle, cell->rnnDesc, seqLen, &x->desc, &reserveSize)); reserveSpace.resize(reserveSize, 1); checkError(hipDeviceSynchronize()); return &y; } void LstmInstance::prevTransfer(DevMatPrec* hx_, DevMatPrec* cx_, DevMatPrec* hy_, DevMatPrec* cy_) { hx->data=hx_->data; cx->data=cx_->data; hy.data=hy_->data; cy.data=cy_->data; } void LstmInstance::forward() { assert(seqLen==1); if(!testMode) { checkError(cudnnRNNForwardTraining(global.cudnnHandle, cell->rnnDesc, seqLen, &xDesc, x->data, hDesc, hx->data, cDesc, cx->data, cell->w.desc, cell->w.data, &yDesc, y.data, hDesc, hy.data, cDesc, cy.data, cell->workspace.data, cell->workspace.ni, reserveSpace.data, reserveSpace.ni)); } else { checkError(cudnnRNNForwardInference(global.cudnnHandle, cell->rnnDesc, seqLen, &xDesc, x->data, hDesc, hx->data, cDesc, cx->data, cell->w.desc, cell->w.data, &yDesc, y.data, hDesc, hy.data, cDesc, cy.data, cell->workspace.data, cell->workspace.ni)); } } void LstmInstance::setStateGradZero() { hy.grad.setZero(); cy.grad.setZero(); } void LstmInstance::backward() { assert(!testMode); checkError(cudnnRNNBackwardData(global.cudnnHandle, cell->rnnDesc, seqLen, &yDesc, y.data, &yDesc, y.grad.data, hDesc, hy.grad.data, cDesc, cy.grad.data, cell->w.desc, cell->w.data, hDesc, hx->data, cDesc, cx->data, &xDesc, x->grad.data, hDesc, hx->grad.data, cDesc, cx->grad.data, cell->workspace.data, cell->workspace.ni, reserveSpace.data, reserveSpace.ni )); } void LstmInstance::calculateGradient() { checkError(cudnnRNNBackwardWeights(global.cudnnHandle, cell->rnnDesc, seqLen, &xDesc, x->data, hDesc, hx->data, &yDesc, y.data, cell->workspace.data, cell->workspace.ni, cell->w.desc, cell->w.grad.data, reserveSpace.data, reserveSpace.ni )); } } /* namespace cytonLib */
85d8b48ea283110d52c8b5a56e0a290f3076207b.cu
/* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "LstmInstance.h" #include "Global.h" #include "CudnnDescriptors.h" namespace cytonLib { Variable* LstmInstance::init(string tag_, LstmCell* cell_, Variable* x_, Variable* hx_, Variable *cx_) { tag=tag_; cell=cell_; x=x_; hx=hx_; cx=cx_; batchSize=cell->batchSize; inputSize=cell->inputSize; hiddenSize=cell->hiddenSize; numLayers=cell->numLayers; seqLen=1; assert(x->n==batchSize && x->c == inputSize); assert(hx->n==numLayers && hx->c==batchSize && hx->h == hiddenSize); assert(cx->n==numLayers && cx->c==batchSize && cx->h == hiddenSize); y.resize(batchSize, hiddenSize, 1, 1); hy.resize(numLayers, batchSize, hiddenSize, 1); cy.resize(numLayers, batchSize, hiddenSize, 1); CudnnDescriptors::createNdDesc(xDesc, x->n, x->c, x->h); CudnnDescriptors::createNdDesc(yDesc, y.n, y.c, y.h); CudnnDescriptors::createNdDesc(hDesc, hx->n, hx->c, hx->h); CudnnDescriptors::createNdDesc(cDesc, cx->n, cx->c, cx->h); // Only needed in training, shouldn't be touched between passes. size_t reserveSize; checkError(cudnnGetRNNTrainingReserveSize(global.cudnnHandle, cell->rnnDesc, seqLen, &x->desc, &reserveSize)); reserveSpace.resize(reserveSize, 1); checkError(cudaDeviceSynchronize()); return &y; } void LstmInstance::prevTransfer(DevMatPrec* hx_, DevMatPrec* cx_, DevMatPrec* hy_, DevMatPrec* cy_) { hx->data=hx_->data; cx->data=cx_->data; hy.data=hy_->data; cy.data=cy_->data; } void LstmInstance::forward() { assert(seqLen==1); if(!testMode) { checkError(cudnnRNNForwardTraining(global.cudnnHandle, cell->rnnDesc, seqLen, &xDesc, x->data, hDesc, hx->data, cDesc, cx->data, cell->w.desc, cell->w.data, &yDesc, y.data, hDesc, hy.data, cDesc, cy.data, cell->workspace.data, cell->workspace.ni, reserveSpace.data, reserveSpace.ni)); } else { checkError(cudnnRNNForwardInference(global.cudnnHandle, cell->rnnDesc, seqLen, &xDesc, x->data, hDesc, hx->data, cDesc, cx->data, cell->w.desc, cell->w.data, &yDesc, y.data, hDesc, hy.data, cDesc, cy.data, cell->workspace.data, cell->workspace.ni)); } } void LstmInstance::setStateGradZero() { hy.grad.setZero(); cy.grad.setZero(); } void LstmInstance::backward() { assert(!testMode); checkError(cudnnRNNBackwardData(global.cudnnHandle, cell->rnnDesc, seqLen, &yDesc, y.data, &yDesc, y.grad.data, hDesc, hy.grad.data, cDesc, cy.grad.data, cell->w.desc, cell->w.data, hDesc, hx->data, cDesc, cx->data, &xDesc, x->grad.data, hDesc, hx->grad.data, cDesc, cx->grad.data, cell->workspace.data, cell->workspace.ni, reserveSpace.data, reserveSpace.ni )); } void LstmInstance::calculateGradient() { checkError(cudnnRNNBackwardWeights(global.cudnnHandle, cell->rnnDesc, seqLen, &xDesc, x->data, hDesc, hx->data, &yDesc, y.data, cell->workspace.data, cell->workspace.ni, cell->w.desc, cell->w.grad.data, reserveSpace.data, reserveSpace.ni )); } } /* namespace cytonLib */
3eecfdfc7b32f8bd768c56431ee7866af8faf336.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "Saxy_device.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); float *d = NULL; hipMalloc(&d, XSIZE*YSIZE); float xb = 1; float yb = 1; int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( Saxy_device), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,d,xb,yb,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( Saxy_device), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,d,xb,yb,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( Saxy_device), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,d,xb,yb,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
3eecfdfc7b32f8bd768c56431ee7866af8faf336.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "Saxy_device.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); float *d = NULL; cudaMalloc(&d, XSIZE*YSIZE); float xb = 1; float yb = 1; int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); Saxy_device<<<gridBlock,threadBlock>>>(x,y,d,xb,yb,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { Saxy_device<<<gridBlock,threadBlock>>>(x,y,d,xb,yb,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { Saxy_device<<<gridBlock,threadBlock>>>(x,y,d,xb,yb,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4cea51f6baaf34837572a8b853a525c032256767.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kernel_add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); const float *y = NULL; hipMalloc(&y, XSIZE*YSIZE); const int N = 1; float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kernel_add), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,N,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kernel_add), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,N,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kernel_add), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,N,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4cea51f6baaf34837572a8b853a525c032256767.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kernel_add.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); const float *y = NULL; cudaMalloc(&y, XSIZE*YSIZE); const int N = 1; float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kernel_add<<<gridBlock,threadBlock>>>(x,y,N,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kernel_add<<<gridBlock,threadBlock>>>(x,y,N,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kernel_add<<<gridBlock,threadBlock>>>(x,y,N,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
1329e3328ae3244b354043b81574215fa923f532.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <functions/dev_image.h> #include <device/device_defines.h> #include <device/cuda_utils.h> #include <device/handles.h> // // one thread per source image pixel. //dimensions = samples * channels * height * width -- Column Major format // GLOBAL void ker_im2col( const int n, const real* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int channels, real *data_col, int samples ) { int imgIdx = blockIdx.y; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x ){ int h_out = index % height_col; int h_index = index / height_col; int w_out = h_index % width_col; int channel_in = h_index / width_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; //output pointer real *data_col_ptr = data_col; data_col_ptr += channel_out * height_col * width_col * samples + imgIdx * height_col * width_col + w_out * height_col + h_out; //input pointer. const real* data_im_ptr = data_im + imgIdx * height * width; data_im_ptr += (channel_in * width * samples + w_in) * height+ h_in; for (int j = 0; j < ksize; j ++){ //columns for (int i = 0; i < ksize; i ++) { //rows int h = h_in + i; int w = w_in + j; *data_col_ptr = ((h >= 0) && (w >= 0) && (h < height) && (w < width )) ? data_im_ptr[ j * height + i ] : 0; data_col_ptr += height_col * width_col * samples; } } } } GLOBAL void ker_im2col_row_major( const int n, const real* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int channels, real *data_col, int samples ) { int imgIdx = blockIdx.y; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x ){ int h_out = index % height_col; int h_index = index / height_col; int w_out = h_index % width_col; int channel_in = h_index / width_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; //output pointer real *data_col_ptr = data_col; data_col_ptr += channel_out * height_col * width_col * samples + imgIdx * height_col * width_col + w_out + h_out * width_col; //input pointer. const real* data_im_ptr = data_im + imgIdx * height * width; data_im_ptr += (channel_in * width * samples + w_in) * height+ h_in; for (int r = 0; r < ksize; r ++) { //rows for (int c = 0; c < ksize; c ++){ //columns int h = h_in + r; int w = w_in + c; *data_col_ptr = ((h >= 0) && (w >= 0) && (h < height) && (w < width )) ? data_im_ptr[ c * height + r ] : 0; data_col_ptr += height_col * width_col * samples; } } } } void getImageCols( real* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, real* data_col) { int height_col = (height + 2 * pad - ksize ) / stride + 1; int width_col = (width + 2 * pad - ksize ) / stride + 1; int num_kernels = channels * height_col * width_col; //int num_kernels = 4; int blocks = (num_kernels + BLOCK_SIZE - 1) / BLOCK_SIZE; //one thread per element kernel. hipLaunchKernelGGL(( ker_im2col) , dim3(blocks), dim3(BLOCK_SIZE), 0, 0, num_kernels, data_im, height, width, ksize, pad, stride, height_col, width_col, channels, data_col, 1 ); //ker_im2col <<<1, 28*28>>> // ( 28*28, data_im, 32, 32, 5, 0, 1, 28, 28, data_col ); hipDeviceSynchronize (); cudaCheckError (); } void getBatchImageCols( real* data_in, int samples, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, real *data_col ) { int height_col = (height + 2 * pad - ksize ) / stride + 1; int width_col = (width + 2 * pad - ksize ) / stride + 1; int num_kernels = channels * height_col * width_col; int x_blocks = (num_kernels + BLOCK_SIZE - 1) / BLOCK_SIZE; int y_blocks = samples; //BUG-FIX //dim3 blocks(1, y_blocks, x_blocks ); dim3 blocks(x_blocks, y_blocks, 1); //One thread per source image pixel //no. of grids = batch size... hipLaunchKernelGGL(( ker_im2col) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, num_kernels, data_in, height, width, ksize, pad, stride, height_col, width_col, channels, data_col, samples ); /* hipLaunchKernelGGL(( ker_im2col) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, num_kernels, data_in, channels, height, width, ksize, pad, stride, height_col, width_col, data_col, samples ); */ hipDeviceSynchronize (); cudaCheckError (); } void getBatchImageColsRowMajor( real* data_in, int samples, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, real *data_col ) { int height_col = (height + 2 * pad - ksize ) / stride + 1; int width_col = (width + 2 * pad - ksize ) / stride + 1; int num_kernels = channels * height_col * width_col; int x_blocks = (num_kernels + BLOCK_SIZE - 1) / BLOCK_SIZE; int y_blocks = samples; //BUG-FIX //dim3 blocks(1, y_blocks, x_blocks ); dim3 blocks(x_blocks, y_blocks, 1); //One thread per source image pixel //no. of grids = batch size... hipLaunchKernelGGL(( ker_im2col_row_major), dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, num_kernels, data_in, height, width, ksize, pad, stride, height_col, width_col, channels, data_col, samples ); /* hipLaunchKernelGGL(( ker_im2col) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, num_kernels, data_in, channels, height, width, ksize, pad, stride, height_col, width_col, data_col, samples ); */ hipDeviceSynchronize (); cudaCheckError (); }
1329e3328ae3244b354043b81574215fa923f532.cu
#include <functions/dev_image.h> #include <device/device_defines.h> #include <device/cuda_utils.h> #include <device/handles.h> // // one thread per source image pixel. //dimensions = samples * channels * height * width -- Column Major format // GLOBAL void ker_im2col( const int n, const real* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int channels, real *data_col, int samples ) { int imgIdx = blockIdx.y; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x ){ int h_out = index % height_col; int h_index = index / height_col; int w_out = h_index % width_col; int channel_in = h_index / width_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; //output pointer real *data_col_ptr = data_col; data_col_ptr += channel_out * height_col * width_col * samples + imgIdx * height_col * width_col + w_out * height_col + h_out; //input pointer. const real* data_im_ptr = data_im + imgIdx * height * width; data_im_ptr += (channel_in * width * samples + w_in) * height+ h_in; for (int j = 0; j < ksize; j ++){ //columns for (int i = 0; i < ksize; i ++) { //rows int h = h_in + i; int w = w_in + j; *data_col_ptr = ((h >= 0) && (w >= 0) && (h < height) && (w < width )) ? data_im_ptr[ j * height + i ] : 0; data_col_ptr += height_col * width_col * samples; } } } } GLOBAL void ker_im2col_row_major( const int n, const real* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, int channels, real *data_col, int samples ) { int imgIdx = blockIdx.y; for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < n; index += blockDim.x * gridDim.x ){ int h_out = index % height_col; int h_index = index / height_col; int w_out = h_index % width_col; int channel_in = h_index / width_col; int channel_out = channel_in * ksize * ksize; int h_in = h_out * stride - pad; int w_in = w_out * stride - pad; //output pointer real *data_col_ptr = data_col; data_col_ptr += channel_out * height_col * width_col * samples + imgIdx * height_col * width_col + w_out + h_out * width_col; //input pointer. const real* data_im_ptr = data_im + imgIdx * height * width; data_im_ptr += (channel_in * width * samples + w_in) * height+ h_in; for (int r = 0; r < ksize; r ++) { //rows for (int c = 0; c < ksize; c ++){ //columns int h = h_in + r; int w = w_in + c; *data_col_ptr = ((h >= 0) && (w >= 0) && (h < height) && (w < width )) ? data_im_ptr[ c * height + r ] : 0; data_col_ptr += height_col * width_col * samples; } } } } void getImageCols( real* data_im, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, real* data_col) { int height_col = (height + 2 * pad - ksize ) / stride + 1; int width_col = (width + 2 * pad - ksize ) / stride + 1; int num_kernels = channels * height_col * width_col; //int num_kernels = 4; int blocks = (num_kernels + BLOCK_SIZE - 1) / BLOCK_SIZE; //one thread per element kernel. ker_im2col <<<blocks, BLOCK_SIZE>>> ( num_kernels, data_im, height, width, ksize, pad, stride, height_col, width_col, channels, data_col, 1 ); //ker_im2col <<<1, 28*28>>> // ( 28*28, data_im, 32, 32, 5, 0, 1, 28, 28, data_col ); cudaDeviceSynchronize (); cudaCheckError (); } void getBatchImageCols( real* data_in, int samples, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, real *data_col ) { int height_col = (height + 2 * pad - ksize ) / stride + 1; int width_col = (width + 2 * pad - ksize ) / stride + 1; int num_kernels = channels * height_col * width_col; int x_blocks = (num_kernels + BLOCK_SIZE - 1) / BLOCK_SIZE; int y_blocks = samples; //BUG-FIX //dim3 blocks(1, y_blocks, x_blocks ); dim3 blocks(x_blocks, y_blocks, 1); //One thread per source image pixel //no. of grids = batch size... ker_im2col <<< blocks, BLOCK_SIZE >>> ( num_kernels, data_in, height, width, ksize, pad, stride, height_col, width_col, channels, data_col, samples ); /* ker_im2col <<< blocks, BLOCK_SIZE >>> ( num_kernels, data_in, channels, height, width, ksize, pad, stride, height_col, width_col, data_col, samples ); */ cudaThreadSynchronize (); cudaCheckError (); } void getBatchImageColsRowMajor( real* data_in, int samples, const int channels, const int height, const int width, const int ksize, const int pad, const int stride, real *data_col ) { int height_col = (height + 2 * pad - ksize ) / stride + 1; int width_col = (width + 2 * pad - ksize ) / stride + 1; int num_kernels = channels * height_col * width_col; int x_blocks = (num_kernels + BLOCK_SIZE - 1) / BLOCK_SIZE; int y_blocks = samples; //BUG-FIX //dim3 blocks(1, y_blocks, x_blocks ); dim3 blocks(x_blocks, y_blocks, 1); //One thread per source image pixel //no. of grids = batch size... ker_im2col_row_major<<< blocks, BLOCK_SIZE >>> ( num_kernels, data_in, height, width, ksize, pad, stride, height_col, width_col, channels, data_col, samples ); /* ker_im2col <<< blocks, BLOCK_SIZE >>> ( num_kernels, data_in, channels, height, width, ksize, pad, stride, height_col, width_col, data_col, samples ); */ cudaThreadSynchronize (); cudaCheckError (); }
d7cb0514a56f3115265e07fbb0d7a43f8fb1984a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> #include <stdlib.h> #include <stdio.h> using namespace std; #include <chrono> using namespace std::chrono; void random_ints(int *a, int N) { int i; for (i = 0; i < N; ++i) a[i] = rand(); } __global__ void add(int *a, int *b, int *c) { int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; } #define N (2048 * 2048 * 100) #define THREADS_PER_BLOCK 512 int main(void) { auto start = high_resolution_clock::now(); int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Alloc space for device copies of a, b, c hipMalloc((void **)&d_a, size); hipMalloc((void **)&d_b, size); hipMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); random_ints(b, N); random_ints(a, N); // Copy inputs to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU hipLaunchKernelGGL(( add), dim3(N / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK), 0, 0, d_a, d_b, d_c); // Copy result back to host hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); // Cleanup free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); auto stop = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(stop - start); cout << "Time taken by function: " << duration.count() << " microseconds" << endl; return 0; }
d7cb0514a56f3115265e07fbb0d7a43f8fb1984a.cu
#include <iostream> #include <math.h> #include <stdlib.h> #include <stdio.h> using namespace std; #include <chrono> using namespace std::chrono; void random_ints(int *a, int N) { int i; for (i = 0; i < N; ++i) a[i] = rand(); } __global__ void add(int *a, int *b, int *c) { int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; } #define N (2048 * 2048 * 100) #define THREADS_PER_BLOCK 512 int main(void) { auto start = high_resolution_clock::now(); int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); // Alloc space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); random_ints(b, N); random_ints(a, N); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<N / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); auto stop = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(stop - start); cout << "Time taken by function: " << duration.count() << " microseconds" << endl; return 0; }
a2982313dc8868234d779ab6f837f0eca376f100.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void advectLevelset(const float dt, const float inv_dx, const unsigned char * d_mask, const float * d_levelsetIn, float * d_levelsetOut, const float * d_velIn_x, const float * d_velIn_y) { //todo remove const int T_THREADS = 16; const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; const int g_idx = i + j * blockDim.x * gridDim.x; //Allocate shared memory for Level Set, +2 in for apron __shared__ float s_phi[(T_THREADS + 2) * (T_THREADS + 2)]; //Load inner phi int s_idx = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2); s_phi[s_idx] = d_levelsetIn[g_idx]; //Load phi at the apron //Left boundary if (threadIdx.x == 0 && blockIdx.x != 0) { s_idx = (threadIdx.y + 1) * (blockDim.x + 2); s_phi[s_idx] = d_levelsetIn[g_idx - 1]; } //Right boundary if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1) { s_idx = (threadIdx.y + 1) * (blockDim.x + 2) + threadIdx.x + 2; s_phi[s_idx] = d_levelsetIn[g_idx + 1]; } //Bottom boundary if (threadIdx.y == 0 && blockIdx.y != 0) { s_idx = threadIdx.x + 1; s_phi[s_idx] = d_levelsetIn[g_idx - gridDim.x * blockDim.x]; } //Top boundary if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1) { s_idx = (threadIdx.y + 2) * (blockDim.x + 2) + threadIdx.x + 1; s_phi[s_idx] = d_levelsetIn[g_idx + gridDim.x * blockDim.x]; } //Sync all threads __syncthreads(); //Allocate memory for velocities __shared__ float s_vel_x[(T_THREADS + 1)*(T_THREADS + 1)]; __shared__ float s_vel_y[(T_THREADS + 1)*(T_THREADS + 1)]; s_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x; //Because of MaC grid, global memeory has one extra component int g_idx_vel = i * j * (blockDim.x * gridDim.x + 1); //Load inner velocities s_vel_x[s_idx] = d_velIn_x[g_idx_vel]; s_vel_y[s_idx] = d_velIn_y[g_idx_vel]; //Load boundary velocities //Right boundary if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1) { s_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x + 1; s_vel_x[s_idx] = d_velIn_x[g_idx_vel + 1]; } //Top boundary if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1) { s_idx = (threadIdx.y + 1) * (blockDim.x + 1) + threadIdx.x; s_vel_x[s_idx] = d_velIn_x[g_idx_vel + blockDim.x * gridDim.x + 1]; } //Sync all threads __syncthreads(); int vel_idx = threadIdx.x + threadIdx.y * (blockDim.x + 1); float vel_x = (s_vel_x[vel_idx] + s_vel_x[vel_idx + 1]) * 0.5f; float vel_y = (s_vel_y[vel_idx] + s_vel_y[vel_idx + blockDim.x + 1]) * 0.5f; float dphidx, dphidy; int phi_idx = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2); float phi = s_phi[phi_idx]; if (vel_x > 0.0f) { dphidx = (phi - s_phi[phi_idx - 1]) * inv_dx; } else { dphidx = (s_phi[phi_idx + 1] - phi) * inv_dx; } if (vel_y > 0.0f) { dphidy = (phi - s_phi[phi_idx - (blockDim.x + 2)]) * inv_dx; } else { dphidy = (s_phi[phi_idx + (blockDim.x + 2)] - phi) * inv_dx; } d_levelsetOut[g_idx] = phi - dt * (dphidx * vel_x + dphidy * vel_y); } void advectLevelset(dim3 blocks, dim3 threads, const float dt, const float inv_dx, const unsigned char * d_mask, const float * d_levelsetIn, float * d_levelsetOut, const float * d_velIn_x, const float * d_velIn_y) { hipLaunchKernelGGL(( advectLevelset), dim3(blocks),dim3(threads), 0, 0, dt, inv_dx, d_mask, d_levelsetIn, d_levelsetOut, d_velIn_x, d_velIn_y); }
a2982313dc8868234d779ab6f837f0eca376f100.cu
__global__ void advectLevelset(const float dt, const float inv_dx, const unsigned char * d_mask, const float * d_levelsetIn, float * d_levelsetOut, const float * d_velIn_x, const float * d_velIn_y) { //todo remove const int T_THREADS = 16; const int i = threadIdx.x + blockDim.x * blockIdx.x; const int j = threadIdx.y + blockDim.y * blockIdx.y; const int g_idx = i + j * blockDim.x * gridDim.x; //Allocate shared memory for Level Set, +2 in for apron __shared__ float s_phi[(T_THREADS + 2) * (T_THREADS + 2)]; //Load inner phi int s_idx = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2); s_phi[s_idx] = d_levelsetIn[g_idx]; //Load phi at the apron //Left boundary if (threadIdx.x == 0 && blockIdx.x != 0) { s_idx = (threadIdx.y + 1) * (blockDim.x + 2); s_phi[s_idx] = d_levelsetIn[g_idx - 1]; } //Right boundary if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1) { s_idx = (threadIdx.y + 1) * (blockDim.x + 2) + threadIdx.x + 2; s_phi[s_idx] = d_levelsetIn[g_idx + 1]; } //Bottom boundary if (threadIdx.y == 0 && blockIdx.y != 0) { s_idx = threadIdx.x + 1; s_phi[s_idx] = d_levelsetIn[g_idx - gridDim.x * blockDim.x]; } //Top boundary if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1) { s_idx = (threadIdx.y + 2) * (blockDim.x + 2) + threadIdx.x + 1; s_phi[s_idx] = d_levelsetIn[g_idx + gridDim.x * blockDim.x]; } //Sync all threads __syncthreads(); //Allocate memory for velocities __shared__ float s_vel_x[(T_THREADS + 1)*(T_THREADS + 1)]; __shared__ float s_vel_y[(T_THREADS + 1)*(T_THREADS + 1)]; s_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x; //Because of MaC grid, global memeory has one extra component int g_idx_vel = i * j * (blockDim.x * gridDim.x + 1); //Load inner velocities s_vel_x[s_idx] = d_velIn_x[g_idx_vel]; s_vel_y[s_idx] = d_velIn_y[g_idx_vel]; //Load boundary velocities //Right boundary if (threadIdx.x == blockDim.x - 1 && blockIdx.x != gridDim.x - 1) { s_idx = threadIdx.y * (blockDim.x + 1) + threadIdx.x + 1; s_vel_x[s_idx] = d_velIn_x[g_idx_vel + 1]; } //Top boundary if (threadIdx.y == blockDim.y - 1 && blockIdx.y != gridDim.y - 1) { s_idx = (threadIdx.y + 1) * (blockDim.x + 1) + threadIdx.x; s_vel_x[s_idx] = d_velIn_x[g_idx_vel + blockDim.x * gridDim.x + 1]; } //Sync all threads __syncthreads(); int vel_idx = threadIdx.x + threadIdx.y * (blockDim.x + 1); float vel_x = (s_vel_x[vel_idx] + s_vel_x[vel_idx + 1]) * 0.5f; float vel_y = (s_vel_y[vel_idx] + s_vel_y[vel_idx + blockDim.x + 1]) * 0.5f; float dphidx, dphidy; int phi_idx = threadIdx.x + 1 + (threadIdx.y + 1) * (blockDim.x + 2); float phi = s_phi[phi_idx]; if (vel_x > 0.0f) { dphidx = (phi - s_phi[phi_idx - 1]) * inv_dx; } else { dphidx = (s_phi[phi_idx + 1] - phi) * inv_dx; } if (vel_y > 0.0f) { dphidy = (phi - s_phi[phi_idx - (blockDim.x + 2)]) * inv_dx; } else { dphidy = (s_phi[phi_idx + (blockDim.x + 2)] - phi) * inv_dx; } d_levelsetOut[g_idx] = phi - dt * (dphidx * vel_x + dphidy * vel_y); } void advectLevelset(dim3 blocks, dim3 threads, const float dt, const float inv_dx, const unsigned char * d_mask, const float * d_levelsetIn, float * d_levelsetOut, const float * d_velIn_x, const float * d_velIn_y) { advectLevelset<<<blocks,threads>>>(dt, inv_dx, d_mask, d_levelsetIn, d_levelsetOut, d_velIn_x, d_velIn_y); }
f04acc21cac68f2bf71ef30934ce9e02ac27e9d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/util/specialization_table.h> #include <smat/vm/util/specialization_typelists.h> #include <smat/vm/instruction_db.h> SM_NAMESPACE_BEGIN template <typename T> __global__ void kernel_diff_y(const T* src, T* dst, usize_t m, usize_t size) { DECL_KERNEL_VARS for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx) dst[i] = src[i+m]-src[i]; // could be implemented by oc_sub operation on two views of arg, but this should be marginally faster. } template <typename T> struct execute_diff_typed { static void execute(opcode_t opcode, const argument& src, const argument& dst) { usize_t size = (usize_t)dst.size(); if (size == 0) return; if (opcode == oc_diff_x) { SM_UNIMPLEMENTED(); } else if (opcode == oc_diff_y) { launchcfg cfg = make_elemwise_launchcfg(size); hipLaunchKernelGGL(( kernel_diff_y), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, src.get<const T*>(),dst.get<T*>(),dst.shape.x,size); } else { SM_UNREACHABLE(); } } }; void execute_diff(opcode_t opcode, const argument& src, const argument& dst) { DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_diff_typed); specialization_table(src.dtype)(opcode,src,dst); } SM_NAMESPACE_END
f04acc21cac68f2bf71ef30934ce9e02ac27e9d7.cu
#include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/util/specialization_table.h> #include <smat/vm/util/specialization_typelists.h> #include <smat/vm/instruction_db.h> SM_NAMESPACE_BEGIN template <typename T> __global__ void kernel_diff_y(const T* src, T* dst, usize_t m, usize_t size) { DECL_KERNEL_VARS for (usize_t i = (usize_t)bdx*bx+tx; i < size; i += bdx*gdx) dst[i] = src[i+m]-src[i]; // could be implemented by oc_sub operation on two views of arg, but this should be marginally faster. } template <typename T> struct execute_diff_typed { static void execute(opcode_t opcode, const argument& src, const argument& dst) { usize_t size = (usize_t)dst.size(); if (size == 0) return; if (opcode == oc_diff_x) { SM_UNIMPLEMENTED(); } else if (opcode == oc_diff_y) { launchcfg cfg = make_elemwise_launchcfg(size); kernel_diff_y<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(src.get<const T*>(),dst.get<T*>(),dst.shape.x,size); } else { SM_UNREACHABLE(); } } }; void execute_diff(opcode_t opcode, const argument& src, const argument& dst) { DECL_SPECIALIZATION_TABLE(T_G,execute_fn2,execute_diff_typed); specialization_table(src.dtype)(opcode,src,dst); } SM_NAMESPACE_END
4f6757ce615d3bbfb6fefcd8c1eb9cce1acb5bed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // EM_ICP Wendy Liu #include <iostream> #include <numeric> #include <cmath> #include "icp.h" // shared with icp #include "Eigen/Eigen" #include <assert.h> #include <iomanip> #include <rocblas.h> #include <cusolverDn.h> #include "support.cu" #include "emicp.h" #include <Eigen/Dense> #define BLOCK_SIZE 32 #define GRID_SIZE 128 __global__ static void updateA(int rowsA, int colsA, int pitchA, const float* d_Xx, const float* d_Xy, const float* d_Xz, const float* d_Yx, const float* d_Yy, const float* d_Yz, const float* d_R, const float* d_t, float* d_A, float sigma_p2){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float XxShare[BLOCK_SIZE]; __shared__ float XyShare[BLOCK_SIZE]; __shared__ float XzShare[BLOCK_SIZE]; __shared__ float YxShare[BLOCK_SIZE]; __shared__ float YyShare[BLOCK_SIZE]; __shared__ float YzShare[BLOCK_SIZE]; __shared__ float RShare[9]; __shared__ float tShare[3]; if(threadIdx.x == 0 && threadIdx.y == 0) { for (int i = 0; i < 9; i++) RShare[i] = d_R[i]; for (int i = 0; i < 3; i++) tShare[i] = d_t[i]; } if(r < rowsA && c < colsA){ // check for only inside the matrix A if(threadIdx.x == 0){ XxShare[threadIdx.y] = d_Xx[c]; XyShare[threadIdx.y] = d_Xy[c]; XzShare[threadIdx.y] = d_Xz[c]; } if(threadIdx.y == 0){ YxShare[threadIdx.x] = d_Yx[r]; YyShare[threadIdx.x] = d_Yy[r]; YzShare[threadIdx.x] = d_Yz[r]; } __syncthreads(); #define Xx XxShare[threadIdx.y] #define Xy XyShare[threadIdx.y] #define Xz XzShare[threadIdx.y] #define Yx YxShare[threadIdx.x] #define Yy YyShare[threadIdx.x] #define Yz YzShare[threadIdx.x] #define R(i) RShare[i] #define t(i) tShare[i] // #define Euclid(a,b,c) ((a)*(a)+(b)*(b)+(c)*(c)) // float tmp = // Euclid(Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)), // Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)), // Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)) ); // tmp = expf(-tmp/sigma_p^2) float tmpX = Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)); float tmpY = Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)); float tmpZ = Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)); __syncthreads(); tmpX *= tmpX; tmpY *= tmpY; tmpZ *= tmpZ; tmpX += tmpY; tmpX += tmpZ; tmpX /= sigma_p2; tmpX = expf(-tmpX); //float *A = (float*)((char*)d_A + c * pitchMinBytes) + r; d_A[c * pitchA + r] = tmpX; } } __global__ static void normalizeRowsOfA(int rowsA, int colsA, int pitchA, float *d_A, const float *d_C){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float d_CShare[BLOCK_SIZE]; if(r < rowsA && c < colsA){ // check for only inside the matrix A if(threadIdx.y == 0) d_CShare[threadIdx.x] = d_C[r]; __syncthreads(); if(d_CShare[threadIdx.x] > 10e-7f) // each element in A is normalized C, then squre-rooted d_A[c * pitchA + r] = sqrtf( d_A[c * pitchA + r] / d_CShare[threadIdx.x] ); else d_A[c * pitchA + r] = 1.0f/colsA; // ad_hoc code to avoid 0 division __syncthreads(); } } __global__ static void elementwiseDivision(int Xsize, float* d_Xx, float* d_Xy, float* d_Xz, const float* d_lambda){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < Xsize){ float l_lambda = d_lambda[x]; d_Xx[x] /= l_lambda; d_Xy[x] /= l_lambda; d_Xz[x] /= l_lambda; } } __global__ static void elementwiseMultiplication(int Xsize, float* d_Xx, float* d_Xy, float* d_Xz, const float* d_lambda){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < Xsize){ float l_lambda = d_lambda[x]; d_Xx[x] *= l_lambda; d_Xy[x] *= l_lambda; d_Xz[x] *= l_lambda; } } __global__ static void centeringXandY(int rowsA, const float* d_Xc, const float* d_Yc, const float* d_Xx, const float* d_Xy, const float* d_Xz, const float* d_Yx, const float* d_Yy, const float* d_Yz, float* d_XxCenterd, float* d_XyCenterd, float* d_XzCenterd, float* d_YxCenterd, float* d_YyCenterd, float* d_YzCenterd ){ // do for both X and Y at the same time int r = blockIdx.x * blockDim.x + threadIdx.x; // Shared memory __shared__ float Xc[3]; __shared__ float Yc[3]; if(threadIdx.x < 6) // assume blocksize >= 6 if(threadIdx.x < 3) Xc[threadIdx.x] = d_Xc[threadIdx.x]; else Yc[threadIdx.x - 3] = d_Yc[threadIdx.x - 3]; if(r < rowsA){ __syncthreads(); d_XxCenterd[r] = d_Xx[r] - Xc[0]; d_XyCenterd[r] = d_Xy[r] - Xc[1]; d_XzCenterd[r] = d_Xz[r] - Xc[2]; d_YxCenterd[r] = d_Yx[r] - Yc[0]; d_YyCenterd[r] = d_Yy[r] - Yc[1]; d_YzCenterd[r] = d_Yz[r] - Yc[2]; __syncthreads(); } } extern "C" { int dsyev_(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *info); } void eigenvectorOfN(double *N, float* q){ static float q_pre[4]; // previous result int dimN = 4; double w[4]; // eigenvalues double *work = new double; // workspace int info; int lwork = -1; // dsyev_((char*)"V", (char*)"U", // &dimN, N, &dimN, // w, work, &lwork, &info); // if(info != 0){ // fprintf(stderr, "info = %d\n", info); // exit(1); // } // lwork = (int)work[0]; // delete work; // work = new double [lwork]; // dsyev_((char*)"V", (char*)"U", // &dimN, N, &dimN, // w, work, &lwork, &info); // delete [] work; if(info != 0){ fprintf(stderr, "computing eigenvector FAIL! info = %d\n", info); //exit(1); // if fail, put back the previous result for(int i=0; i<4; i++){ q[i] = q_pre[i]; } }else{ // last column of N is the eigenvector of the largest eigenvalue // and N is stored column-major for(int i=0; i<4; i++){ q[i] = N[4*3 + i]; q_pre[i] = q[i]; } } } void findRTfromS(const float* h_Xc, const float* h_Yc, const float* h_S, float* h_R, float* h_t){ #define h_Sxx h_S[0] #define h_Sxy h_S[1] #define h_Sxz h_S[2] #define h_Syx h_S[3] #define h_Syy h_S[4] #define h_Syz h_S[5] #define h_Szx h_S[6] #define h_Szy h_S[7] #define h_Szz h_S[8] #define h_Xcx h_Xc[0] #define h_Xcy h_Xc[1] #define h_Xcz h_Xc[2] #define h_Ycx h_Yc[0] #define h_Ycy h_Yc[1] #define h_Ycz h_Yc[2] double N[4*4]; for(int n=0;n<16;n++) N[n] = 0.0; float q[4]; for(int a=0;a<4;a++) q[a] = 0.0f; N[ 0] = h_Sxx + h_Syy + h_Szz; N[ 1] = h_Syz - h_Szy; N[ 2] = h_Szx - h_Sxz; N[ 3] = h_Sxy - h_Syx; N[ 4] = h_Syz - h_Szy; N[ 5] = h_Sxx - h_Syy - h_Szz; N[ 6] = h_Sxy + h_Syx; N[ 7] = h_Szx + h_Sxz; N[ 8] = h_Szx - h_Sxz; N[ 9] = h_Sxy + h_Syx; N[10] = h_Syy - h_Sxx - h_Szz; N[11] = h_Syz + h_Szy; N[12] = h_Sxy - h_Syx; N[13] = h_Szx + h_Sxz; N[14] = h_Syz + h_Szy; N[15] = h_Szz - h_Sxx - h_Syy; // compute the eigenvector corresponding the largest eigenvalue // eigenvectorOfN(N, q); // Eigen::ComplexEigenSolver<Eigen::Matrix<std::complex<double>, 2,2> > s(A); // Eigen::MatrixXd newN = Eigen::Map<Eigen::MatrixXd>(N, 4, 4); // Eigen::Matrix<std::complex<double>, 4,4> s(newN); // s.eigenvalues(); for(int i =0; i<16; i++) N[i] = 1+2*i; Eigen::MatrixXd A_test = Eigen::Map<Eigen::Matrix<double, 4, 4> >(N); Eigen::EigenSolver <Eigen::MatrixXd> eigensolver(A_test); if (eigensolver.info() != Eigen::Success) abort(); std::cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << std::endl; std::cout << "Here's a matrix whose columns are eigenvectors of A \n" << "corresponding to these eigenvalues:\n" << eigensolver.eigenvectors() << std::endl; // std::cout << aaaa<< std::endl; // for(int n=0;n<16;n++) std::cout << N[n] << ", "<<std::endl; printf("So far so good\n"); exit(0); float q0 = q[0], qx = q[1], qy = q[2], qz = q[3]; // quaternion to rotation matrix h_R[0] = q0*q0 + qx*qx - qy*qy - qz*qz; h_R[1] = 2 * (qx*qy - q0*qz); h_R[2] = 2 * (qx*qz + q0*qy); h_R[3] = 2 * (qy*qx + q0*qz); h_R[4] = q0*q0 - qx*qx + qy*qy - qz*qz; h_R[5] = 2 * (qy*qz - q0*qx); h_R[6] = 2 * (qz*qx - q0*qy); h_R[7] = 2 * (qz*qy + q0*qx); h_R[8] = q0*q0 - qx*qx - qy*qy + qz*qz; // translation vector h_t[0] = h_Xcx - (h_R[0]*h_Ycx + h_R[1]*h_Ycy + h_R[2]*h_Ycz); h_t[1] = h_Xcy - (h_R[3]*h_Ycx + h_R[4]*h_Ycy + h_R[5]*h_Ycz); h_t[2] = h_Xcz - (h_R[6]*h_Ycx + h_R[7]*h_Ycy + h_R[8]*h_Ycz); } // ----------------------------------- Main -------------------------------- void emicp(const Eigen::MatrixXf cloud_target, const Eigen::MatrixXf cloud_source, float* h_R, float* h_t) { float sigma_p2 = 0.01; // initial value for the main loop. sigma_p2 <- sigma_p2 * sigma_factor at the end of each iteration while sigma_p2 > sigam_inf. default: 0.01 float sigma_inf = 0.00001; // minimum value of sigma_p2. default: 0.00001 float sigma_factor = 0.9; // facfor for reducing sigma_p2. default: 0.9 float d_02 = 0.01; // values for outlier (see EM-ICP paper). default: 0.01 int Xsize = cloud_source.rows(); int Ysize = cloud_source.cols(); const float *h_X = cloud_source.data(); const float *h_Y = cloud_target.data(); // Reusable snippets // Copied from Tamaki's GitHub repo #define memCUDA(var,num) float* d_ ## var; hipMalloc((void**) &(d_ ## var), sizeof(float)*num); #define memHostToCUDA(var,num) \ float* d_ ## var; hipMalloc((void**) &(d_ ## var), sizeof(float)*num); \ hipMemcpy(d_ ## var, h_ ## var, sizeof(float)*num, hipMemcpyHostToDevice); // Memory allocation memHostToCUDA(X, Xsize*3); float* d_Xx = &d_X[Xsize*0]; float* d_Xy = &d_X[Xsize*1]; float* d_Xz = &d_X[Xsize*2]; memHostToCUDA(Y, Ysize*3); float* d_Yx = &d_Y[Ysize*0]; float* d_Yy = &d_Y[Ysize*1]; float* d_Yz = &d_Y[Ysize*2]; memCUDA(Xprime, Ysize*3); float *d_XprimeX = &d_Xprime[Ysize*0]; float *d_XprimeY = &d_Xprime[Ysize*1]; float *d_XprimeZ = &d_Xprime[Ysize*2]; float *d_XprimeCenterd = d_Xprime; float *d_XprimeCenterdX = &d_XprimeCenterd[Ysize*0]; float *d_XprimeCenterdY = &d_XprimeCenterd[Ysize*1]; float *d_XprimeCenterdZ = &d_XprimeCenterd[Ysize*2]; memCUDA(YCenterd, Ysize*3); float *d_YCenterdX = &d_YCenterd[Ysize*0]; float *d_YCenterdY = &d_YCenterd[Ysize*1]; float *d_YCenterdZ = &d_YCenterd[Ysize*2]; // center of X, Y float h_Xc[3], h_Yc[3]; memCUDA(Xc, 3); memCUDA(Yc, 3); // R, t memHostToCUDA(R, 3*3); memHostToCUDA(t, 3); hipMemcpy(d_R, h_R, sizeof(float)*3*3, hipMemcpyHostToDevice); hipMemcpy(d_t, h_t, sizeof(float)*3, hipMemcpyHostToDevice); // S for finding R, t float h_S[9]; memCUDA(S, 9); // NOTE on matrix A (from Tamaki) // number of rows: Ysize, or rowsA // number of columns : Xsize, or colsA // // [0th in X] [1st] ... [(Xsize-1)] // [0th point in Y] [ A(0,0) A(0,1) ... A(0,Xsize-1) ] // [1st ] [ A(1,0) A(1,1) ... ] // ... [ ... ] // [(Ysize-1) ] [ A(Ysize-1, 0) ... A(Ysize-1,Xsize-1)] // // // CAUTION on matrix A // A is allcoated as a column-maijor format for the use of cublas. // This means that you must acces an element at row r and column c as: // A(r,c) = A[c * pitchA + r] int rowsA = Ysize; int colsA = Xsize; // pitchA: leading dimension of A, which is ideally equal to rowsA, // but actually larger than that. int pitchA = (rowsA / 4 + 1) * 4; memCUDA(A, pitchA*colsA); // a vector with all elements of 1.0f float* h_one = new float [max(Xsize,Ysize)]; for(int t = 0; t < max(Xsize,Ysize); t++) h_one[t] = 1.0f; memHostToCUDA(one, max(Xsize,Ysize)); memCUDA(sumOfMRow, rowsA); memCUDA(C, rowsA); // sum of a row in A memCUDA(lambda, rowsA); // weight of a row in A // for 2D block dim3 dimBlockForA(BLOCK_SIZE, BLOCK_SIZE); // a block is (BLOCK_SIZE*BLOCK_SIZE) threads dim3 dimGridForA( (pitchA + dimBlockForA.x - 1) / dimBlockForA.x, (colsA + dimBlockForA.y - 1) / dimBlockForA.y); // for 1D block int threadsPerBlockForYsize = 512; // a block is 512 threads int blocksPerGridForYsize = (Ysize + threadsPerBlockForYsize - 1 ) / threadsPerBlockForYsize; hipblasHandle_t handle; hipblasCreate(&handle); // EM-ICP main loop // int Titer = 1; while(sigma_p2 > sigma_inf) { // UpdateA hipLaunchKernelGGL(( updateA) , dim3(dimGridForA), dim3(dimBlockForA) , 0, 0, rowsA, colsA, pitchA, d_Xx, d_Xy, d_Xz, d_Yx, d_Yy, d_Yz, d_R, d_t, d_A, sigma_p2); // Normalization of A // // A * one vector = vector with elements of row-wise sum // d_A * d_one => d_C //(rowsA*colsA) * (colsA*1) = (rowsA*1) float alpha = 1.0; float beta = 0.0; hipblasSgemv(handle, HIPBLAS_OP_N, rowsA, colsA, &alpha, d_A, pitchA, d_one, 1, &beta, d_C, 1); alpha = expf(-d_02/sigma_p2); hipblasSaxpy(handle, rowsA, &alpha, d_one, 1, d_C, 1); hipLaunchKernelGGL(( normalizeRowsOfA) , dim3(dimGridForA), dim3(dimBlockForA) , 0, 0, rowsA, colsA, pitchA, d_A, d_C); // update R,T ///////////////////////////////////////////////////////////////////////////////////// // compute lambda // A * one vector = vector with elements of row-wise sum // d_A * d_one => d_lambda //(rowsA*colsA) * (colsA*1) = (rowsA*1) // hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, N, K, M, &alpha, A, M, A, M, &beta, B, N); // A(MxN) K = N A'(N,M) hipblasSgemv(handle, HIPBLAS_OP_N, rowsA, colsA, &alpha, d_A, pitchA, d_one, 1, &beta, d_lambda, 1); // hipblasStatus_t hipblasSasum(hipblasHandle_t handle, int n, const float *x, int incx, float *result) // hipblasDasum(handle, num_data_pts, best_dist_device, 1, &prev_error) float result = 0; // place-holder float sumLambda = hipblasSasum (handle, rowsA, d_lambda, 1, &result); ///////////////////////////////////////////////////////////////////////////////////// // compute X' // m number of rows of matrix op(A) and rows of matrix C // n number of columns of matrix op(B) and number of columns of C // k number of columns of matrix op(A) and number of rows of op(B) // A * X => X' // d_A * d_X => d_Xprime //(rowsA*colsA) * (colsA*3) = (rowsA*3) // m * k k * n m * n alpha = 1; beta = 0; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, rowsA, 3, colsA, &alpha, d_A, pitchA, d_X, colsA, &beta, d_Xprime, rowsA); // X' ./ lambda => X' hipLaunchKernelGGL(( elementwiseDivision) , dim3(blocksPerGridForYsize), dim3(threadsPerBlockForYsize), 0, 0, rowsA, d_XprimeX, d_XprimeY, d_XprimeZ, d_lambda); ///////////////////////////////////////////////////////////////////////////////////// // // centering X' and Y // ///////////////////////////////////////////////////////////////////////////////////// // find weighted center of X' and Y // d_Xprime^T * d_lambda => h_Xc // (3 * rowsA) (rowsA * 1) = (3 * 1) hipblasSgemv(handle, HIPBLAS_OP_T, rowsA, 3, &alpha, d_Xprime, rowsA, d_lambda, 1, &beta, d_Xc, 1); // d_Y^T * d_lambda => h_Yc // (3 * rowsA) (rowsA * 1) = (3 * 1) hipblasSgemv(handle, HIPBLAS_OP_T, rowsA, 3, &alpha, d_Y, rowsA, d_lambda, 1, &beta, d_Yc, 1); // void hipblasSscal (int n, float alpha, float *x, int incx) // it replaces x[ix + i * incx] with alpha * x[ix + i * incx] alpha = 1/sumLambda; hipblasSscal (handle, 3, &alpha, d_Xc, 1); hipblasSscal (handle, 3, &alpha, d_Yc, 1); hipMemcpy(h_Xc, d_Xc, sizeof(float)*3, hipMemcpyDeviceToHost); hipMemcpy(h_Yc, d_Yc, sizeof(float)*3, hipMemcpyDeviceToHost); ///////////////////////////////////////////////////////////////////////////////////// // centering X and Y // d_Xprime .- d_Xc => d_XprimeCenterd // d_Y .- d_Yc => d_YCenterd hipLaunchKernelGGL(( centeringXandY) , dim3(blocksPerGridForYsize), dim3(threadsPerBlockForYsize), 0, 0, rowsA, d_Xc, d_Yc, d_XprimeX, d_XprimeY, d_XprimeZ, d_Yx, d_Yy, d_Yz, d_XprimeCenterdX, d_XprimeCenterdY, d_XprimeCenterdZ, d_YCenterdX, d_YCenterdY, d_YCenterdZ); // XprimeCented .* d_lambda => XprimeCented hipLaunchKernelGGL(( elementwiseMultiplication) , dim3(blocksPerGridForYsize), dim3(threadsPerBlockForYsize), 0, 0, rowsA, d_XprimeCenterdX, d_XprimeCenterdY, d_XprimeCenterdZ, d_lambda); ///////////////////////////////////////////////////////////////////////////////////// // compute S // d_XprimeCented^T * d_YCenterd => d_S // (3*rowsA) * (rowsA*3) = (3*3) // m * k k * n m * n hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, 3, 3, rowsA, &alpha, d_XprimeCenterd, rowsA, d_YCenterd, rowsA, &beta, d_S, 3); hipMemcpy(h_S, d_S, sizeof(float)*9, hipMemcpyDeviceToHost); ///////////////////////////////////////////////////////////////////////////////////// // find RT from S findRTfromS(h_Xc, h_Yc, h_S, h_R, h_t); // STOP_TIMER(timerAfterSVD); ///////////////////////////////////////////////////////////////////////////////////// // copy R,t to device hipMemcpy(d_R, h_R, sizeof(float)*3*3, hipMemcpyHostToDevice); hipMemcpy(d_t, h_t, sizeof(float)*3, hipMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////////////////////// sigma_p2 *= sigma_factor; } hipDeviceSynchronize(); hipblasDestroy(handle); hipFree(d_X); hipFree(d_Y); hipFree(d_Xprime); hipFree(d_YCenterd); hipFree(d_Xc); hipFree(d_Yc); hipFree(d_R); hipFree(d_t); hipFree(d_A); hipFree(d_S); hipFree(d_one); hipFree(d_sumOfMRow); hipFree(d_C); hipFree(d_lambda); delete [] h_one; }
4f6757ce615d3bbfb6fefcd8c1eb9cce1acb5bed.cu
// EM_ICP Wendy Liu #include <iostream> #include <numeric> #include <cmath> #include "icp.h" // shared with icp #include "Eigen/Eigen" #include <assert.h> #include <iomanip> #include <cublas_v2.h> #include <cusolverDn.h> #include "support.cu" #include "emicp.h" #include <Eigen/Dense> #define BLOCK_SIZE 32 #define GRID_SIZE 128 __global__ static void updateA(int rowsA, int colsA, int pitchA, const float* d_Xx, const float* d_Xy, const float* d_Xz, const float* d_Yx, const float* d_Yy, const float* d_Yz, const float* d_R, const float* d_t, float* d_A, float sigma_p2){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float XxShare[BLOCK_SIZE]; __shared__ float XyShare[BLOCK_SIZE]; __shared__ float XzShare[BLOCK_SIZE]; __shared__ float YxShare[BLOCK_SIZE]; __shared__ float YyShare[BLOCK_SIZE]; __shared__ float YzShare[BLOCK_SIZE]; __shared__ float RShare[9]; __shared__ float tShare[3]; if(threadIdx.x == 0 && threadIdx.y == 0) { for (int i = 0; i < 9; i++) RShare[i] = d_R[i]; for (int i = 0; i < 3; i++) tShare[i] = d_t[i]; } if(r < rowsA && c < colsA){ // check for only inside the matrix A if(threadIdx.x == 0){ XxShare[threadIdx.y] = d_Xx[c]; XyShare[threadIdx.y] = d_Xy[c]; XzShare[threadIdx.y] = d_Xz[c]; } if(threadIdx.y == 0){ YxShare[threadIdx.x] = d_Yx[r]; YyShare[threadIdx.x] = d_Yy[r]; YzShare[threadIdx.x] = d_Yz[r]; } __syncthreads(); #define Xx XxShare[threadIdx.y] #define Xy XyShare[threadIdx.y] #define Xz XzShare[threadIdx.y] #define Yx YxShare[threadIdx.x] #define Yy YyShare[threadIdx.x] #define Yz YzShare[threadIdx.x] #define R(i) RShare[i] #define t(i) tShare[i] // #define Euclid(a,b,c) ((a)*(a)+(b)*(b)+(c)*(c)) // float tmp = // Euclid(Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)), // Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)), // Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)) ); // tmp = expf(-tmp/sigma_p^2) float tmpX = Xx - (R(0)*Yx + R(1)*Yy + R(2)*Yz + t(0)); float tmpY = Xy - (R(3)*Yx + R(4)*Yy + R(5)*Yz + t(1)); float tmpZ = Xz - (R(6)*Yx + R(7)*Yy + R(8)*Yz + t(2)); __syncthreads(); tmpX *= tmpX; tmpY *= tmpY; tmpZ *= tmpZ; tmpX += tmpY; tmpX += tmpZ; tmpX /= sigma_p2; tmpX = expf(-tmpX); //float *A = (float*)((char*)d_A + c * pitchMinBytes) + r; d_A[c * pitchA + r] = tmpX; } } __global__ static void normalizeRowsOfA(int rowsA, int colsA, int pitchA, float *d_A, const float *d_C){ int r = blockIdx.x * blockDim.x + threadIdx.x; int c = blockIdx.y * blockDim.y + threadIdx.y; // Shared memory __shared__ float d_CShare[BLOCK_SIZE]; if(r < rowsA && c < colsA){ // check for only inside the matrix A if(threadIdx.y == 0) d_CShare[threadIdx.x] = d_C[r]; __syncthreads(); if(d_CShare[threadIdx.x] > 10e-7f) // each element in A is normalized C, then squre-rooted d_A[c * pitchA + r] = sqrtf( d_A[c * pitchA + r] / d_CShare[threadIdx.x] ); else d_A[c * pitchA + r] = 1.0f/colsA; // ad_hoc code to avoid 0 division __syncthreads(); } } __global__ static void elementwiseDivision(int Xsize, float* d_Xx, float* d_Xy, float* d_Xz, const float* d_lambda){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < Xsize){ float l_lambda = d_lambda[x]; d_Xx[x] /= l_lambda; d_Xy[x] /= l_lambda; d_Xz[x] /= l_lambda; } } __global__ static void elementwiseMultiplication(int Xsize, float* d_Xx, float* d_Xy, float* d_Xz, const float* d_lambda){ int x = blockIdx.x * blockDim.x + threadIdx.x; if(x < Xsize){ float l_lambda = d_lambda[x]; d_Xx[x] *= l_lambda; d_Xy[x] *= l_lambda; d_Xz[x] *= l_lambda; } } __global__ static void centeringXandY(int rowsA, const float* d_Xc, const float* d_Yc, const float* d_Xx, const float* d_Xy, const float* d_Xz, const float* d_Yx, const float* d_Yy, const float* d_Yz, float* d_XxCenterd, float* d_XyCenterd, float* d_XzCenterd, float* d_YxCenterd, float* d_YyCenterd, float* d_YzCenterd ){ // do for both X and Y at the same time int r = blockIdx.x * blockDim.x + threadIdx.x; // Shared memory __shared__ float Xc[3]; __shared__ float Yc[3]; if(threadIdx.x < 6) // assume blocksize >= 6 if(threadIdx.x < 3) Xc[threadIdx.x] = d_Xc[threadIdx.x]; else Yc[threadIdx.x - 3] = d_Yc[threadIdx.x - 3]; if(r < rowsA){ __syncthreads(); d_XxCenterd[r] = d_Xx[r] - Xc[0]; d_XyCenterd[r] = d_Xy[r] - Xc[1]; d_XzCenterd[r] = d_Xz[r] - Xc[2]; d_YxCenterd[r] = d_Yx[r] - Yc[0]; d_YyCenterd[r] = d_Yy[r] - Yc[1]; d_YzCenterd[r] = d_Yz[r] - Yc[2]; __syncthreads(); } } extern "C" { int dsyev_(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *info); } void eigenvectorOfN(double *N, float* q){ static float q_pre[4]; // previous result int dimN = 4; double w[4]; // eigenvalues double *work = new double; // workspace int info; int lwork = -1; // dsyev_((char*)"V", (char*)"U", // &dimN, N, &dimN, // w, work, &lwork, &info); // if(info != 0){ // fprintf(stderr, "info = %d\n", info); // exit(1); // } // lwork = (int)work[0]; // delete work; // work = new double [lwork]; // dsyev_((char*)"V", (char*)"U", // &dimN, N, &dimN, // w, work, &lwork, &info); // delete [] work; if(info != 0){ fprintf(stderr, "computing eigenvector FAIL! info = %d\n", info); //exit(1); // if fail, put back the previous result for(int i=0; i<4; i++){ q[i] = q_pre[i]; } }else{ // last column of N is the eigenvector of the largest eigenvalue // and N is stored column-major for(int i=0; i<4; i++){ q[i] = N[4*3 + i]; q_pre[i] = q[i]; } } } void findRTfromS(const float* h_Xc, const float* h_Yc, const float* h_S, float* h_R, float* h_t){ #define h_Sxx h_S[0] #define h_Sxy h_S[1] #define h_Sxz h_S[2] #define h_Syx h_S[3] #define h_Syy h_S[4] #define h_Syz h_S[5] #define h_Szx h_S[6] #define h_Szy h_S[7] #define h_Szz h_S[8] #define h_Xcx h_Xc[0] #define h_Xcy h_Xc[1] #define h_Xcz h_Xc[2] #define h_Ycx h_Yc[0] #define h_Ycy h_Yc[1] #define h_Ycz h_Yc[2] double N[4*4]; for(int n=0;n<16;n++) N[n] = 0.0; float q[4]; for(int a=0;a<4;a++) q[a] = 0.0f; N[ 0] = h_Sxx + h_Syy + h_Szz; N[ 1] = h_Syz - h_Szy; N[ 2] = h_Szx - h_Sxz; N[ 3] = h_Sxy - h_Syx; N[ 4] = h_Syz - h_Szy; N[ 5] = h_Sxx - h_Syy - h_Szz; N[ 6] = h_Sxy + h_Syx; N[ 7] = h_Szx + h_Sxz; N[ 8] = h_Szx - h_Sxz; N[ 9] = h_Sxy + h_Syx; N[10] = h_Syy - h_Sxx - h_Szz; N[11] = h_Syz + h_Szy; N[12] = h_Sxy - h_Syx; N[13] = h_Szx + h_Sxz; N[14] = h_Syz + h_Szy; N[15] = h_Szz - h_Sxx - h_Syy; // compute the eigenvector corresponding the largest eigenvalue // eigenvectorOfN(N, q); // Eigen::ComplexEigenSolver<Eigen::Matrix<std::complex<double>, 2,2> > s(A); // Eigen::MatrixXd newN = Eigen::Map<Eigen::MatrixXd>(N, 4, 4); // Eigen::Matrix<std::complex<double>, 4,4> s(newN); // s.eigenvalues(); for(int i =0; i<16; i++) N[i] = 1+2*i; Eigen::MatrixXd A_test = Eigen::Map<Eigen::Matrix<double, 4, 4> >(N); Eigen::EigenSolver <Eigen::MatrixXd> eigensolver(A_test); if (eigensolver.info() != Eigen::Success) abort(); std::cout << "The eigenvalues of A are:\n" << eigensolver.eigenvalues() << std::endl; std::cout << "Here's a matrix whose columns are eigenvectors of A \n" << "corresponding to these eigenvalues:\n" << eigensolver.eigenvectors() << std::endl; // std::cout << aaaa<< std::endl; // for(int n=0;n<16;n++) std::cout << N[n] << ", "<<std::endl; printf("So far so good\n"); exit(0); float q0 = q[0], qx = q[1], qy = q[2], qz = q[3]; // quaternion to rotation matrix h_R[0] = q0*q0 + qx*qx - qy*qy - qz*qz; h_R[1] = 2 * (qx*qy - q0*qz); h_R[2] = 2 * (qx*qz + q0*qy); h_R[3] = 2 * (qy*qx + q0*qz); h_R[4] = q0*q0 - qx*qx + qy*qy - qz*qz; h_R[5] = 2 * (qy*qz - q0*qx); h_R[6] = 2 * (qz*qx - q0*qy); h_R[7] = 2 * (qz*qy + q0*qx); h_R[8] = q0*q0 - qx*qx - qy*qy + qz*qz; // translation vector h_t[0] = h_Xcx - (h_R[0]*h_Ycx + h_R[1]*h_Ycy + h_R[2]*h_Ycz); h_t[1] = h_Xcy - (h_R[3]*h_Ycx + h_R[4]*h_Ycy + h_R[5]*h_Ycz); h_t[2] = h_Xcz - (h_R[6]*h_Ycx + h_R[7]*h_Ycy + h_R[8]*h_Ycz); } // ----------------------------------- Main -------------------------------- void emicp(const Eigen::MatrixXf cloud_target, const Eigen::MatrixXf cloud_source, float* h_R, float* h_t) { float sigma_p2 = 0.01; // initial value for the main loop. sigma_p2 <- sigma_p2 * sigma_factor at the end of each iteration while sigma_p2 > sigam_inf. default: 0.01 float sigma_inf = 0.00001; // minimum value of sigma_p2. default: 0.00001 float sigma_factor = 0.9; // facfor for reducing sigma_p2. default: 0.9 float d_02 = 0.01; // values for outlier (see EM-ICP paper). default: 0.01 int Xsize = cloud_source.rows(); int Ysize = cloud_source.cols(); const float *h_X = cloud_source.data(); const float *h_Y = cloud_target.data(); // Reusable snippets // Copied from Tamaki's GitHub repo #define memCUDA(var,num) float* d_ ## var; cudaMalloc((void**) &(d_ ## var), sizeof(float)*num); #define memHostToCUDA(var,num) \ float* d_ ## var; cudaMalloc((void**) &(d_ ## var), sizeof(float)*num); \ cudaMemcpy(d_ ## var, h_ ## var, sizeof(float)*num, cudaMemcpyHostToDevice); // Memory allocation memHostToCUDA(X, Xsize*3); float* d_Xx = &d_X[Xsize*0]; float* d_Xy = &d_X[Xsize*1]; float* d_Xz = &d_X[Xsize*2]; memHostToCUDA(Y, Ysize*3); float* d_Yx = &d_Y[Ysize*0]; float* d_Yy = &d_Y[Ysize*1]; float* d_Yz = &d_Y[Ysize*2]; memCUDA(Xprime, Ysize*3); float *d_XprimeX = &d_Xprime[Ysize*0]; float *d_XprimeY = &d_Xprime[Ysize*1]; float *d_XprimeZ = &d_Xprime[Ysize*2]; float *d_XprimeCenterd = d_Xprime; float *d_XprimeCenterdX = &d_XprimeCenterd[Ysize*0]; float *d_XprimeCenterdY = &d_XprimeCenterd[Ysize*1]; float *d_XprimeCenterdZ = &d_XprimeCenterd[Ysize*2]; memCUDA(YCenterd, Ysize*3); float *d_YCenterdX = &d_YCenterd[Ysize*0]; float *d_YCenterdY = &d_YCenterd[Ysize*1]; float *d_YCenterdZ = &d_YCenterd[Ysize*2]; // center of X, Y float h_Xc[3], h_Yc[3]; memCUDA(Xc, 3); memCUDA(Yc, 3); // R, t memHostToCUDA(R, 3*3); memHostToCUDA(t, 3); cudaMemcpy(d_R, h_R, sizeof(float)*3*3, cudaMemcpyHostToDevice); cudaMemcpy(d_t, h_t, sizeof(float)*3, cudaMemcpyHostToDevice); // S for finding R, t float h_S[9]; memCUDA(S, 9); // NOTE on matrix A (from Tamaki) // number of rows: Ysize, or rowsA // number of columns : Xsize, or colsA // // [0th in X] [1st] ... [(Xsize-1)] // [0th point in Y] [ A(0,0) A(0,1) ... A(0,Xsize-1) ] // [1st ] [ A(1,0) A(1,1) ... ] // ... [ ... ] // [(Ysize-1) ] [ A(Ysize-1, 0) ... A(Ysize-1,Xsize-1)] // // // CAUTION on matrix A // A is allcoated as a column-maijor format for the use of cublas. // This means that you must acces an element at row r and column c as: // A(r,c) = A[c * pitchA + r] int rowsA = Ysize; int colsA = Xsize; // pitchA: leading dimension of A, which is ideally equal to rowsA, // but actually larger than that. int pitchA = (rowsA / 4 + 1) * 4; memCUDA(A, pitchA*colsA); // a vector with all elements of 1.0f float* h_one = new float [max(Xsize,Ysize)]; for(int t = 0; t < max(Xsize,Ysize); t++) h_one[t] = 1.0f; memHostToCUDA(one, max(Xsize,Ysize)); memCUDA(sumOfMRow, rowsA); memCUDA(C, rowsA); // sum of a row in A memCUDA(lambda, rowsA); // weight of a row in A // for 2D block dim3 dimBlockForA(BLOCK_SIZE, BLOCK_SIZE); // a block is (BLOCK_SIZE*BLOCK_SIZE) threads dim3 dimGridForA( (pitchA + dimBlockForA.x - 1) / dimBlockForA.x, (colsA + dimBlockForA.y - 1) / dimBlockForA.y); // for 1D block int threadsPerBlockForYsize = 512; // a block is 512 threads int blocksPerGridForYsize = (Ysize + threadsPerBlockForYsize - 1 ) / threadsPerBlockForYsize; cublasHandle_t handle; cublasCreate(&handle); // EM-ICP main loop // int Titer = 1; while(sigma_p2 > sigma_inf) { // UpdateA updateA <<< dimGridForA, dimBlockForA >>> (rowsA, colsA, pitchA, d_Xx, d_Xy, d_Xz, d_Yx, d_Yy, d_Yz, d_R, d_t, d_A, sigma_p2); // Normalization of A // // A * one vector = vector with elements of row-wise sum // d_A * d_one => d_C //(rowsA*colsA) * (colsA*1) = (rowsA*1) float alpha = 1.0; float beta = 0.0; cublasSgemv(handle, CUBLAS_OP_N, rowsA, colsA, &alpha, d_A, pitchA, d_one, 1, &beta, d_C, 1); alpha = expf(-d_02/sigma_p2); cublasSaxpy(handle, rowsA, &alpha, d_one, 1, d_C, 1); normalizeRowsOfA <<< dimGridForA, dimBlockForA >>> (rowsA, colsA, pitchA, d_A, d_C); // update R,T ///////////////////////////////////////////////////////////////////////////////////// // compute lambda // A * one vector = vector with elements of row-wise sum // d_A * d_one => d_lambda //(rowsA*colsA) * (colsA*1) = (rowsA*1) // cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, N, K, M, &alpha, A, M, A, M, &beta, B, N); // A(MxN) K = N A'(N,M) cublasSgemv(handle, CUBLAS_OP_N, rowsA, colsA, &alpha, d_A, pitchA, d_one, 1, &beta, d_lambda, 1); // cublasStatus_t cublasSasum(cublasHandle_t handle, int n, const float *x, int incx, float *result) // cublasDasum(handle, num_data_pts, best_dist_device, 1, &prev_error) float result = 0; // place-holder float sumLambda = cublasSasum (handle, rowsA, d_lambda, 1, &result); ///////////////////////////////////////////////////////////////////////////////////// // compute X' // m number of rows of matrix op(A) and rows of matrix C // n number of columns of matrix op(B) and number of columns of C // k number of columns of matrix op(A) and number of rows of op(B) // A * X => X' // d_A * d_X => d_Xprime //(rowsA*colsA) * (colsA*3) = (rowsA*3) // m * k k * n m * n alpha = 1; beta = 0; cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, rowsA, 3, colsA, &alpha, d_A, pitchA, d_X, colsA, &beta, d_Xprime, rowsA); // X' ./ lambda => X' elementwiseDivision <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (rowsA, d_XprimeX, d_XprimeY, d_XprimeZ, d_lambda); ///////////////////////////////////////////////////////////////////////////////////// // // centering X' and Y // ///////////////////////////////////////////////////////////////////////////////////// // find weighted center of X' and Y // d_Xprime^T * d_lambda => h_Xc // (3 * rowsA) (rowsA * 1) = (3 * 1) cublasSgemv(handle, CUBLAS_OP_T, rowsA, 3, &alpha, d_Xprime, rowsA, d_lambda, 1, &beta, d_Xc, 1); // d_Y^T * d_lambda => h_Yc // (3 * rowsA) (rowsA * 1) = (3 * 1) cublasSgemv(handle, CUBLAS_OP_T, rowsA, 3, &alpha, d_Y, rowsA, d_lambda, 1, &beta, d_Yc, 1); // void cublasSscal (int n, float alpha, float *x, int incx) // it replaces x[ix + i * incx] with alpha * x[ix + i * incx] alpha = 1/sumLambda; cublasSscal (handle, 3, &alpha, d_Xc, 1); cublasSscal (handle, 3, &alpha, d_Yc, 1); cudaMemcpy(h_Xc, d_Xc, sizeof(float)*3, cudaMemcpyDeviceToHost); cudaMemcpy(h_Yc, d_Yc, sizeof(float)*3, cudaMemcpyDeviceToHost); ///////////////////////////////////////////////////////////////////////////////////// // centering X and Y // d_Xprime .- d_Xc => d_XprimeCenterd // d_Y .- d_Yc => d_YCenterd centeringXandY <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (rowsA, d_Xc, d_Yc, d_XprimeX, d_XprimeY, d_XprimeZ, d_Yx, d_Yy, d_Yz, d_XprimeCenterdX, d_XprimeCenterdY, d_XprimeCenterdZ, d_YCenterdX, d_YCenterdY, d_YCenterdZ); // XprimeCented .* d_lambda => XprimeCented elementwiseMultiplication <<< blocksPerGridForYsize, threadsPerBlockForYsize>>> (rowsA, d_XprimeCenterdX, d_XprimeCenterdY, d_XprimeCenterdZ, d_lambda); ///////////////////////////////////////////////////////////////////////////////////// // compute S // d_XprimeCented^T * d_YCenterd => d_S // (3*rowsA) * (rowsA*3) = (3*3) // m * k k * n m * n cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, 3, 3, rowsA, &alpha, d_XprimeCenterd, rowsA, d_YCenterd, rowsA, &beta, d_S, 3); cudaMemcpy(h_S, d_S, sizeof(float)*9, cudaMemcpyDeviceToHost); ///////////////////////////////////////////////////////////////////////////////////// // find RT from S findRTfromS(h_Xc, h_Yc, h_S, h_R, h_t); // STOP_TIMER(timerAfterSVD); ///////////////////////////////////////////////////////////////////////////////////// // copy R,t to device cudaMemcpy(d_R, h_R, sizeof(float)*3*3, cudaMemcpyHostToDevice); cudaMemcpy(d_t, h_t, sizeof(float)*3, cudaMemcpyHostToDevice); ///////////////////////////////////////////////////////////////////////////////////// sigma_p2 *= sigma_factor; } cudaDeviceSynchronize(); cublasDestroy(handle); cudaFree(d_X); cudaFree(d_Y); cudaFree(d_Xprime); cudaFree(d_YCenterd); cudaFree(d_Xc); cudaFree(d_Yc); cudaFree(d_R); cudaFree(d_t); cudaFree(d_A); cudaFree(d_S); cudaFree(d_one); cudaFree(d_sumOfMRow); cudaFree(d_C); cudaFree(d_lambda); delete [] h_one; }