hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
97403457a6788c8060d17651ae5c205a4f9ffb84.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel4_ydir; int xdim0_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel4_ydir; int ydim0_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel4_ydir; int xdim1_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel4_ydir; int ydim1_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel4_ydir; int xdim2_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel4_ydir; int ydim2_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel4_ydir; int xdim3_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel4_ydir; int ydim3_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel4_ydir; int xdim4_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel4_ydir; int ydim4_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim5_advec_cell_kernel4_ydir; int xdim5_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim5_advec_cell_kernel4_ydir; int ydim5_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim6_advec_cell_kernel4_ydir; int xdim6_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim6_advec_cell_kernel4_ydir; int ydim6_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim7_advec_cell_kernel4_ydir; int xdim7_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim7_advec_cell_kernel4_ydir; int ydim7_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim8_advec_cell_kernel4_ydir; int xdim8_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim8_advec_cell_kernel4_ydir; int ydim8_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim9_advec_cell_kernel4_ydir; int xdim9_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim9_advec_cell_kernel4_ydir; int ydim9_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim10_advec_cell_kernel4_ydir; int xdim10_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim10_advec_cell_kernel4_ydir; int ydim10_advec_cell_kernel4_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel4_ydir * (y) + \ xdim0_advec_cell_kernel4_ydir * ydim0_advec_cell_kernel4_ydir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel4_ydir * (y) + \ xdim1_advec_cell_kernel4_ydir * ydim1_advec_cell_kernel4_ydir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel4_ydir * (y) + \ xdim2_advec_cell_kernel4_ydir * ydim2_advec_cell_kernel4_ydir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel4_ydir * (y) + \ xdim3_advec_cell_kernel4_ydir * ydim3_advec_cell_kernel4_ydir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel4_ydir * (y) + \ xdim4_advec_cell_kernel4_ydir * ydim4_advec_cell_kernel4_ydir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel4_ydir * (y) + \ xdim5_advec_cell_kernel4_ydir * ydim5_advec_cell_kernel4_ydir * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_advec_cell_kernel4_ydir * (y) + \ xdim6_advec_cell_kernel4_ydir * ydim6_advec_cell_kernel4_ydir * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_advec_cell_kernel4_ydir * (y) + \ xdim7_advec_cell_kernel4_ydir * ydim7_advec_cell_kernel4_ydir * (z)) #define OPS_ACC8(x, y, z) \ (x + xdim8_advec_cell_kernel4_ydir * (y) + \ xdim8_advec_cell_kernel4_ydir * ydim8_advec_cell_kernel4_ydir * (z)) #define OPS_ACC9(x, y, z) \ (x + xdim9_advec_cell_kernel4_ydir * (y) + \ xdim9_advec_cell_kernel4_ydir * ydim9_advec_cell_kernel4_ydir * (z)) #define OPS_ACC10(x, y, z) \ (x + xdim10_advec_cell_kernel4_ydir * (y) + \ xdim10_advec_cell_kernel4_ydir * ydim10_advec_cell_kernel4_ydir * (z)) // user function __device__ inline void advec_cell_kernel4_ydir(double *density1, double *energy1, const double *mass_flux_y, const double *vol_flux_y, const double *pre_vol, const double *post_vol, double *pre_mass, double *post_mass, double *advec_vol, double *post_ener, const double *ener_flux) { pre_mass[OPS_ACC6(0, 0, 0)] = density1[OPS_ACC0(0, 0, 0)] * pre_vol[OPS_ACC4(0, 0, 0)]; post_mass[OPS_ACC7(0, 0, 0)] = pre_mass[OPS_ACC6(0, 0, 0)] + mass_flux_y[OPS_ACC2(0, 0, 0)] - mass_flux_y[OPS_ACC2(0, 1, 0)]; post_ener[OPS_ACC9(0, 0, 0)] = (energy1[OPS_ACC1(0, 0, 0)] * pre_mass[OPS_ACC6(0, 0, 0)] + ener_flux[OPS_ACC10(0, 0, 0)] - ener_flux[OPS_ACC10(0, 1, 0)]) / post_mass[OPS_ACC7(0, 0, 0)]; advec_vol[OPS_ACC8(0, 0, 0)] = pre_vol[OPS_ACC4(0, 0, 0)] + vol_flux_y[OPS_ACC3(0, 0, 0)] - vol_flux_y[OPS_ACC3(0, 1, 0)]; density1[OPS_ACC0(0, 0, 0)] = post_mass[OPS_ACC7(0, 0, 0)] / advec_vol[OPS_ACC8(0, 0, 0)]; energy1[OPS_ACC1(0, 0, 0)] = post_ener[OPS_ACC9(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 __global__ void ops_advec_cell_kernel4_ydir( double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, double *__restrict arg8, double *__restrict arg9, const double *__restrict arg10, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim0_advec_cell_kernel4_ydir * ydim0_advec_cell_kernel4_ydir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim1_advec_cell_kernel4_ydir * ydim1_advec_cell_kernel4_ydir; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim2_advec_cell_kernel4_ydir * ydim2_advec_cell_kernel4_ydir; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim3_advec_cell_kernel4_ydir * ydim3_advec_cell_kernel4_ydir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim4_advec_cell_kernel4_ydir * ydim4_advec_cell_kernel4_ydir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim5_advec_cell_kernel4_ydir * ydim5_advec_cell_kernel4_ydir; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim6_advec_cell_kernel4_ydir * ydim6_advec_cell_kernel4_ydir; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim7_advec_cell_kernel4_ydir * ydim7_advec_cell_kernel4_ydir; arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim8_advec_cell_kernel4_ydir * ydim8_advec_cell_kernel4_ydir; arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim9_advec_cell_kernel4_ydir * ydim9_advec_cell_kernel4_ydir; arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim10_advec_cell_kernel4_ydir * ydim10_advec_cell_kernel4_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel4_ydir(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); } } // host stub function void ops_par_loop_advec_cell_kernel4_ydir( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10) { // Timing double t1, t2, c1, c2; ops_arg args[11] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 11, range, 14)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(14, "advec_cell_kernel4_ydir"); OPS_kernels[14].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]; int ydim10 = args[10].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel4_ydir_h || ydim0 != ydim0_advec_cell_kernel4_ydir_h || xdim1 != xdim1_advec_cell_kernel4_ydir_h || ydim1 != ydim1_advec_cell_kernel4_ydir_h || xdim2 != xdim2_advec_cell_kernel4_ydir_h || ydim2 != ydim2_advec_cell_kernel4_ydir_h || xdim3 != xdim3_advec_cell_kernel4_ydir_h || ydim3 != ydim3_advec_cell_kernel4_ydir_h || xdim4 != xdim4_advec_cell_kernel4_ydir_h || ydim4 != ydim4_advec_cell_kernel4_ydir_h || xdim5 != xdim5_advec_cell_kernel4_ydir_h || ydim5 != ydim5_advec_cell_kernel4_ydir_h || xdim6 != xdim6_advec_cell_kernel4_ydir_h || ydim6 != ydim6_advec_cell_kernel4_ydir_h || xdim7 != xdim7_advec_cell_kernel4_ydir_h || ydim7 != ydim7_advec_cell_kernel4_ydir_h || xdim8 != xdim8_advec_cell_kernel4_ydir_h || ydim8 != ydim8_advec_cell_kernel4_ydir_h || xdim9 != xdim9_advec_cell_kernel4_ydir_h || ydim9 != ydim9_advec_cell_kernel4_ydir_h || xdim10 != xdim10_advec_cell_kernel4_ydir_h || ydim10 != ydim10_advec_cell_kernel4_ydir_h) { hipMemcpyToSymbol(xdim0_advec_cell_kernel4_ydir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel4_ydir_h = xdim0; hipMemcpyToSymbol(ydim0_advec_cell_kernel4_ydir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel4_ydir_h = ydim0; hipMemcpyToSymbol(xdim1_advec_cell_kernel4_ydir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel4_ydir_h = xdim1; hipMemcpyToSymbol(ydim1_advec_cell_kernel4_ydir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel4_ydir_h = ydim1; hipMemcpyToSymbol(xdim2_advec_cell_kernel4_ydir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel4_ydir_h = xdim2; hipMemcpyToSymbol(ydim2_advec_cell_kernel4_ydir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel4_ydir_h = ydim2; hipMemcpyToSymbol(xdim3_advec_cell_kernel4_ydir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel4_ydir_h = xdim3; hipMemcpyToSymbol(ydim3_advec_cell_kernel4_ydir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel4_ydir_h = ydim3; hipMemcpyToSymbol(xdim4_advec_cell_kernel4_ydir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel4_ydir_h = xdim4; hipMemcpyToSymbol(ydim4_advec_cell_kernel4_ydir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel4_ydir_h = ydim4; hipMemcpyToSymbol(xdim5_advec_cell_kernel4_ydir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel4_ydir_h = xdim5; hipMemcpyToSymbol(ydim5_advec_cell_kernel4_ydir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel4_ydir_h = ydim5; hipMemcpyToSymbol(xdim6_advec_cell_kernel4_ydir, &xdim6, sizeof(int)); xdim6_advec_cell_kernel4_ydir_h = xdim6; hipMemcpyToSymbol(ydim6_advec_cell_kernel4_ydir, &ydim6, sizeof(int)); ydim6_advec_cell_kernel4_ydir_h = ydim6; hipMemcpyToSymbol(xdim7_advec_cell_kernel4_ydir, &xdim7, sizeof(int)); xdim7_advec_cell_kernel4_ydir_h = xdim7; hipMemcpyToSymbol(ydim7_advec_cell_kernel4_ydir, &ydim7, sizeof(int)); ydim7_advec_cell_kernel4_ydir_h = ydim7; hipMemcpyToSymbol(xdim8_advec_cell_kernel4_ydir, &xdim8, sizeof(int)); xdim8_advec_cell_kernel4_ydir_h = xdim8; hipMemcpyToSymbol(ydim8_advec_cell_kernel4_ydir, &ydim8, sizeof(int)); ydim8_advec_cell_kernel4_ydir_h = ydim8; hipMemcpyToSymbol(xdim9_advec_cell_kernel4_ydir, &xdim9, sizeof(int)); xdim9_advec_cell_kernel4_ydir_h = xdim9; hipMemcpyToSymbol(ydim9_advec_cell_kernel4_ydir, &ydim9, sizeof(int)); ydim9_advec_cell_kernel4_ydir_h = ydim9; hipMemcpyToSymbol(xdim10_advec_cell_kernel4_ydir, &xdim10, sizeof(int)); xdim10_advec_cell_kernel4_ydir_h = xdim10; hipMemcpyToSymbol(ydim10_advec_cell_kernel4_ydir, &ydim10, sizeof(int)); ydim10_advec_cell_kernel4_ydir_h = ydim10; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; char *p_a[11]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8 + dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8 + dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9 + dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9 + dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10 + dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10 + dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; ops_H_D_exchanges_device(args, 11); ops_halo_exchanges(args, 11, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[14].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_cell_kernel4_ydir), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[14].time += t1 - t2; } ops_set_dirtybit_device(args, 11); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); ops_set_halo_dirtybit3(&args[8], range); ops_set_halo_dirtybit3(&args[9], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[14].mpi_time += t2 - t1; OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg7); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg8); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg9); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg10); } }
97403457a6788c8060d17651ae5c205a4f9ffb84.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel4_ydir; int xdim0_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel4_ydir; int ydim0_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel4_ydir; int xdim1_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel4_ydir; int ydim1_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel4_ydir; int xdim2_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel4_ydir; int ydim2_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel4_ydir; int xdim3_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel4_ydir; int ydim3_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel4_ydir; int xdim4_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel4_ydir; int ydim4_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim5_advec_cell_kernel4_ydir; int xdim5_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim5_advec_cell_kernel4_ydir; int ydim5_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim6_advec_cell_kernel4_ydir; int xdim6_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim6_advec_cell_kernel4_ydir; int ydim6_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim7_advec_cell_kernel4_ydir; int xdim7_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim7_advec_cell_kernel4_ydir; int ydim7_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim8_advec_cell_kernel4_ydir; int xdim8_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim8_advec_cell_kernel4_ydir; int ydim8_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim9_advec_cell_kernel4_ydir; int xdim9_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim9_advec_cell_kernel4_ydir; int ydim9_advec_cell_kernel4_ydir_h = -1; __constant__ int xdim10_advec_cell_kernel4_ydir; int xdim10_advec_cell_kernel4_ydir_h = -1; __constant__ int ydim10_advec_cell_kernel4_ydir; int ydim10_advec_cell_kernel4_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_cell_kernel4_ydir * (y) + \ xdim0_advec_cell_kernel4_ydir * ydim0_advec_cell_kernel4_ydir * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_cell_kernel4_ydir * (y) + \ xdim1_advec_cell_kernel4_ydir * ydim1_advec_cell_kernel4_ydir * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_cell_kernel4_ydir * (y) + \ xdim2_advec_cell_kernel4_ydir * ydim2_advec_cell_kernel4_ydir * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_cell_kernel4_ydir * (y) + \ xdim3_advec_cell_kernel4_ydir * ydim3_advec_cell_kernel4_ydir * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_cell_kernel4_ydir * (y) + \ xdim4_advec_cell_kernel4_ydir * ydim4_advec_cell_kernel4_ydir * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_cell_kernel4_ydir * (y) + \ xdim5_advec_cell_kernel4_ydir * ydim5_advec_cell_kernel4_ydir * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_advec_cell_kernel4_ydir * (y) + \ xdim6_advec_cell_kernel4_ydir * ydim6_advec_cell_kernel4_ydir * (z)) #define OPS_ACC7(x, y, z) \ (x + xdim7_advec_cell_kernel4_ydir * (y) + \ xdim7_advec_cell_kernel4_ydir * ydim7_advec_cell_kernel4_ydir * (z)) #define OPS_ACC8(x, y, z) \ (x + xdim8_advec_cell_kernel4_ydir * (y) + \ xdim8_advec_cell_kernel4_ydir * ydim8_advec_cell_kernel4_ydir * (z)) #define OPS_ACC9(x, y, z) \ (x + xdim9_advec_cell_kernel4_ydir * (y) + \ xdim9_advec_cell_kernel4_ydir * ydim9_advec_cell_kernel4_ydir * (z)) #define OPS_ACC10(x, y, z) \ (x + xdim10_advec_cell_kernel4_ydir * (y) + \ xdim10_advec_cell_kernel4_ydir * ydim10_advec_cell_kernel4_ydir * (z)) // user function __device__ inline void advec_cell_kernel4_ydir(double *density1, double *energy1, const double *mass_flux_y, const double *vol_flux_y, const double *pre_vol, const double *post_vol, double *pre_mass, double *post_mass, double *advec_vol, double *post_ener, const double *ener_flux) { pre_mass[OPS_ACC6(0, 0, 0)] = density1[OPS_ACC0(0, 0, 0)] * pre_vol[OPS_ACC4(0, 0, 0)]; post_mass[OPS_ACC7(0, 0, 0)] = pre_mass[OPS_ACC6(0, 0, 0)] + mass_flux_y[OPS_ACC2(0, 0, 0)] - mass_flux_y[OPS_ACC2(0, 1, 0)]; post_ener[OPS_ACC9(0, 0, 0)] = (energy1[OPS_ACC1(0, 0, 0)] * pre_mass[OPS_ACC6(0, 0, 0)] + ener_flux[OPS_ACC10(0, 0, 0)] - ener_flux[OPS_ACC10(0, 1, 0)]) / post_mass[OPS_ACC7(0, 0, 0)]; advec_vol[OPS_ACC8(0, 0, 0)] = pre_vol[OPS_ACC4(0, 0, 0)] + vol_flux_y[OPS_ACC3(0, 0, 0)] - vol_flux_y[OPS_ACC3(0, 1, 0)]; density1[OPS_ACC0(0, 0, 0)] = post_mass[OPS_ACC7(0, 0, 0)] / advec_vol[OPS_ACC8(0, 0, 0)]; energy1[OPS_ACC1(0, 0, 0)] = post_ener[OPS_ACC9(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #undef OPS_ACC7 #undef OPS_ACC8 #undef OPS_ACC9 #undef OPS_ACC10 __global__ void ops_advec_cell_kernel4_ydir( double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, double *__restrict arg7, double *__restrict arg8, double *__restrict arg9, const double *__restrict arg10, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim0_advec_cell_kernel4_ydir * ydim0_advec_cell_kernel4_ydir; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim1_advec_cell_kernel4_ydir * ydim1_advec_cell_kernel4_ydir; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim2_advec_cell_kernel4_ydir * ydim2_advec_cell_kernel4_ydir; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim3_advec_cell_kernel4_ydir * ydim3_advec_cell_kernel4_ydir; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim4_advec_cell_kernel4_ydir * ydim4_advec_cell_kernel4_ydir; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim5_advec_cell_kernel4_ydir * ydim5_advec_cell_kernel4_ydir; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim6_advec_cell_kernel4_ydir * ydim6_advec_cell_kernel4_ydir; arg7 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim7_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim7_advec_cell_kernel4_ydir * ydim7_advec_cell_kernel4_ydir; arg8 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim8_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim8_advec_cell_kernel4_ydir * ydim8_advec_cell_kernel4_ydir; arg9 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim9_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim9_advec_cell_kernel4_ydir * ydim9_advec_cell_kernel4_ydir; arg10 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim10_advec_cell_kernel4_ydir + idx_z * 1 * 1 * xdim10_advec_cell_kernel4_ydir * ydim10_advec_cell_kernel4_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel4_ydir(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10); } } // host stub function void ops_par_loop_advec_cell_kernel4_ydir( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7, ops_arg arg8, ops_arg arg9, ops_arg arg10) { // Timing double t1, t2, c1, c2; ops_arg args[11] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 11, range, 14)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(14, "advec_cell_kernel4_ydir"); OPS_kernels[14].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; int xdim7 = args[7].dat->size[0]; int ydim7 = args[7].dat->size[1]; int xdim8 = args[8].dat->size[0]; int ydim8 = args[8].dat->size[1]; int xdim9 = args[9].dat->size[0]; int ydim9 = args[9].dat->size[1]; int xdim10 = args[10].dat->size[0]; int ydim10 = args[10].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel4_ydir_h || ydim0 != ydim0_advec_cell_kernel4_ydir_h || xdim1 != xdim1_advec_cell_kernel4_ydir_h || ydim1 != ydim1_advec_cell_kernel4_ydir_h || xdim2 != xdim2_advec_cell_kernel4_ydir_h || ydim2 != ydim2_advec_cell_kernel4_ydir_h || xdim3 != xdim3_advec_cell_kernel4_ydir_h || ydim3 != ydim3_advec_cell_kernel4_ydir_h || xdim4 != xdim4_advec_cell_kernel4_ydir_h || ydim4 != ydim4_advec_cell_kernel4_ydir_h || xdim5 != xdim5_advec_cell_kernel4_ydir_h || ydim5 != ydim5_advec_cell_kernel4_ydir_h || xdim6 != xdim6_advec_cell_kernel4_ydir_h || ydim6 != ydim6_advec_cell_kernel4_ydir_h || xdim7 != xdim7_advec_cell_kernel4_ydir_h || ydim7 != ydim7_advec_cell_kernel4_ydir_h || xdim8 != xdim8_advec_cell_kernel4_ydir_h || ydim8 != ydim8_advec_cell_kernel4_ydir_h || xdim9 != xdim9_advec_cell_kernel4_ydir_h || ydim9 != ydim9_advec_cell_kernel4_ydir_h || xdim10 != xdim10_advec_cell_kernel4_ydir_h || ydim10 != ydim10_advec_cell_kernel4_ydir_h) { cudaMemcpyToSymbol(xdim0_advec_cell_kernel4_ydir, &xdim0, sizeof(int)); xdim0_advec_cell_kernel4_ydir_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_cell_kernel4_ydir, &ydim0, sizeof(int)); ydim0_advec_cell_kernel4_ydir_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_cell_kernel4_ydir, &xdim1, sizeof(int)); xdim1_advec_cell_kernel4_ydir_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_cell_kernel4_ydir, &ydim1, sizeof(int)); ydim1_advec_cell_kernel4_ydir_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_cell_kernel4_ydir, &xdim2, sizeof(int)); xdim2_advec_cell_kernel4_ydir_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_cell_kernel4_ydir, &ydim2, sizeof(int)); ydim2_advec_cell_kernel4_ydir_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_cell_kernel4_ydir, &xdim3, sizeof(int)); xdim3_advec_cell_kernel4_ydir_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_cell_kernel4_ydir, &ydim3, sizeof(int)); ydim3_advec_cell_kernel4_ydir_h = ydim3; cudaMemcpyToSymbol(xdim4_advec_cell_kernel4_ydir, &xdim4, sizeof(int)); xdim4_advec_cell_kernel4_ydir_h = xdim4; cudaMemcpyToSymbol(ydim4_advec_cell_kernel4_ydir, &ydim4, sizeof(int)); ydim4_advec_cell_kernel4_ydir_h = ydim4; cudaMemcpyToSymbol(xdim5_advec_cell_kernel4_ydir, &xdim5, sizeof(int)); xdim5_advec_cell_kernel4_ydir_h = xdim5; cudaMemcpyToSymbol(ydim5_advec_cell_kernel4_ydir, &ydim5, sizeof(int)); ydim5_advec_cell_kernel4_ydir_h = ydim5; cudaMemcpyToSymbol(xdim6_advec_cell_kernel4_ydir, &xdim6, sizeof(int)); xdim6_advec_cell_kernel4_ydir_h = xdim6; cudaMemcpyToSymbol(ydim6_advec_cell_kernel4_ydir, &ydim6, sizeof(int)); ydim6_advec_cell_kernel4_ydir_h = ydim6; cudaMemcpyToSymbol(xdim7_advec_cell_kernel4_ydir, &xdim7, sizeof(int)); xdim7_advec_cell_kernel4_ydir_h = xdim7; cudaMemcpyToSymbol(ydim7_advec_cell_kernel4_ydir, &ydim7, sizeof(int)); ydim7_advec_cell_kernel4_ydir_h = ydim7; cudaMemcpyToSymbol(xdim8_advec_cell_kernel4_ydir, &xdim8, sizeof(int)); xdim8_advec_cell_kernel4_ydir_h = xdim8; cudaMemcpyToSymbol(ydim8_advec_cell_kernel4_ydir, &ydim8, sizeof(int)); ydim8_advec_cell_kernel4_ydir_h = ydim8; cudaMemcpyToSymbol(xdim9_advec_cell_kernel4_ydir, &xdim9, sizeof(int)); xdim9_advec_cell_kernel4_ydir_h = xdim9; cudaMemcpyToSymbol(ydim9_advec_cell_kernel4_ydir, &ydim9, sizeof(int)); ydim9_advec_cell_kernel4_ydir_h = ydim9; cudaMemcpyToSymbol(xdim10_advec_cell_kernel4_ydir, &xdim10, sizeof(int)); xdim10_advec_cell_kernel4_ydir_h = xdim10; cudaMemcpyToSymbol(ydim10_advec_cell_kernel4_ydir, &ydim10, sizeof(int)); ydim10_advec_cell_kernel4_ydir_h = ydim10; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; int dat7 = args[7].dat->elem_size; int dat8 = args[8].dat->elem_size; int dat9 = args[9].dat->elem_size; int dat10 = args[10].dat->elem_size; char *p_a[11]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d] + OPS_sub_dat_list[args[7].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[7].dat->d_m[d]; #endif int base7 = dat7 * 1 * (start[0] * args[7].stencil->stride[0] - args[7].dat->base[0] - d_m[0]); base7 = base7 + dat7 * args[7].dat->size[0] * (start[1] * args[7].stencil->stride[1] - args[7].dat->base[1] - d_m[1]); base7 = base7 + dat7 * args[7].dat->size[0] * args[7].dat->size[1] * (start[2] * args[7].stencil->stride[2] - args[7].dat->base[2] - d_m[2]); p_a[7] = (char *)args[7].data_d + base7; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d] + OPS_sub_dat_list[args[8].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[8].dat->d_m[d]; #endif int base8 = dat8 * 1 * (start[0] * args[8].stencil->stride[0] - args[8].dat->base[0] - d_m[0]); base8 = base8 + dat8 * args[8].dat->size[0] * (start[1] * args[8].stencil->stride[1] - args[8].dat->base[1] - d_m[1]); base8 = base8 + dat8 * args[8].dat->size[0] * args[8].dat->size[1] * (start[2] * args[8].stencil->stride[2] - args[8].dat->base[2] - d_m[2]); p_a[8] = (char *)args[8].data_d + base8; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d] + OPS_sub_dat_list[args[9].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[9].dat->d_m[d]; #endif int base9 = dat9 * 1 * (start[0] * args[9].stencil->stride[0] - args[9].dat->base[0] - d_m[0]); base9 = base9 + dat9 * args[9].dat->size[0] * (start[1] * args[9].stencil->stride[1] - args[9].dat->base[1] - d_m[1]); base9 = base9 + dat9 * args[9].dat->size[0] * args[9].dat->size[1] * (start[2] * args[9].stencil->stride[2] - args[9].dat->base[2] - d_m[2]); p_a[9] = (char *)args[9].data_d + base9; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d] + OPS_sub_dat_list[args[10].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[10].dat->d_m[d]; #endif int base10 = dat10 * 1 * (start[0] * args[10].stencil->stride[0] - args[10].dat->base[0] - d_m[0]); base10 = base10 + dat10 * args[10].dat->size[0] * (start[1] * args[10].stencil->stride[1] - args[10].dat->base[1] - d_m[1]); base10 = base10 + dat10 * args[10].dat->size[0] * args[10].dat->size[1] * (start[2] * args[10].stencil->stride[2] - args[10].dat->base[2] - d_m[2]); p_a[10] = (char *)args[10].data_d + base10; ops_H_D_exchanges_device(args, 11); ops_halo_exchanges(args, 11, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[14].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_cell_kernel4_ydir<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (double *)p_a[7], (double *)p_a[8], (double *)p_a[9], (double *)p_a[10], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[14].time += t1 - t2; } ops_set_dirtybit_device(args, 11); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[6], range); ops_set_halo_dirtybit3(&args[7], range); ops_set_halo_dirtybit3(&args[8], range); ops_set_halo_dirtybit3(&args[9], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[14].mpi_time += t2 - t1; OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg6); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg7); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg8); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg9); OPS_kernels[14].transfer += ops_compute_transfer(dim, start, end, &arg10); } }
882242a6a8f29e5923f6d0eb7801fb78501a1d99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pdist_grad_impl.cuh" #include <math.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" template <typename T> __device__ __forceinline__ T sign(T val) { return (0.0 < val) - (val < 0.0); } template <typename T> __global__ void InitOutput(T *x_grad, const size_t x_size) { T zero = 0.0; for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < x_size; pos += blockDim.x * gridDim.x) { x_grad[pos] = zero; } return; } template <typename T> __global__ void PDist_Grad_One(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { T res = grad_k * sign(*x_i - *x_j); *buff1 = res; *buff2 = -res; } } template <typename T> __global__ void PDist_Grad_Lt_Two(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; if (dist_k != 0.0 && p >= 1) { const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { const T diff = *x_i - *x_j; T res = (sign(diff) * pow(abs(diff), static_cast<T>(p - 1)) * (grad_k) / pow(dist_k, static_cast<T>(p - 1))); *buff1 = res; *buff2 = -res; } } } template <typename T> __global__ void PDist_Grad_Two(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; if (dist_k != 0.0) { const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { T res = grad_k * (*x_i - *x_j) / dist_k; *buff1 = res; *buff2 = -res; } } } template <typename T> __global__ void PDist_Grad_P(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; if (dist_k != 0.0) { const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { const T diff = (*x_i - *x_j); T res = diff * pow(abs(diff), static_cast<T>(p - 2)) * grad_k / pow(dist_k, static_cast<T>(p - 1)); *buff1 = res; *buff2 = -res; } } } template <typename T> __global__ void PDist_Grad_Inf(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { T diff = *x_i - *x_j; T res = grad_k * sign(diff) * (abs(diff) == (dist_k)); *buff1 = res; *buff2 = -res; } } template <typename T> __global__ void AddBuffer(T *x_grad, T *buffer, const int64_t n, const size_t size) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { T res = 0.0; T *buff = buffer + pos; for (int64_t i = 0; i < n - 1; ++i, buff += size) { res += *(buff); } x_grad[pos] = res; } return; } template <typename T> void CalPDistGrad(const size_t x_size, const size_t y_size, const size_t grad_size, const T *y_grad, const T *x, const T *y, const int64_t n, const int64_t m, const float p, T *x_grad, T *buffer, const uint32_t &device_id, hipStream_t cuda_stream) { if (p == 0.0 || grad_size == 0 || x_size == 0) { hipLaunchKernelGGL(( InitOutput), dim3(CUDA_BLOCKS(device_id, x_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, x_grad, x_size); return; } const int block_x = 8; const int block_y = 128; const int grid_x = (y_size + block_x - 1) / block_x; const int grid_y = (m + block_y * 8 - 1) / (block_y * 8); const dim3 grid(grid_x, grid_y); const dim3 block(block_x, block_y); const float n1 = n - .5; const float n2 = n1 * n1 - 1; if (p == 1.0) { hipLaunchKernelGGL(( PDist_Grad_One<T>), dim3(grid), dim3(block), 0, cuda_stream, y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else if (p < 2.0) { hipLaunchKernelGGL(( InitOutput), dim3(CUDA_BLOCKS(device_id, (n-1) * x_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, buffer, (n-1) * x_size); hipLaunchKernelGGL(( PDist_Grad_Lt_Two<T>), dim3(grid), dim3(block), 0, cuda_stream, y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else if (p == 2.0) { hipLaunchKernelGGL(( InitOutput), dim3(CUDA_BLOCKS(device_id, (n-1) * x_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, buffer, (n-1) * x_size); hipLaunchKernelGGL(( PDist_Grad_Two<T>), dim3(grid), dim3(block), 0, cuda_stream, y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else if (std::isinf(p)) { hipLaunchKernelGGL(( PDist_Grad_Inf<T>), dim3(grid), dim3(block), 0, cuda_stream, y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else { hipLaunchKernelGGL(( InitOutput), dim3(CUDA_BLOCKS(device_id, (n-1) * x_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, buffer, (n-1) * x_size); hipLaunchKernelGGL(( PDist_Grad_P<T>), dim3(grid), dim3(block), 0, cuda_stream, y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } hipLaunchKernelGGL(( AddBuffer), dim3(CUDA_BLOCKS(device_id, x_size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, x_grad, buffer, n, x_size); } template CUDA_LIB_EXPORT void CalPDistGrad<float>(const size_t x_size, const size_t y_size, const size_t grad_size, const float *y_grad, const float *x, const float *y, const int64_t n, const int64_t m, const float p, float *x_grad, float *buffer, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPDistGrad<double>(const size_t x_size, const size_t y_size, const size_t grad_size, const double *y_grad, const double *x, const double *y, const int64_t n, const int64_t m, const float p, double *x_grad, double *buffer, const uint32_t &device_id, hipStream_t cuda_stream);
882242a6a8f29e5923f6d0eb7801fb78501a1d99.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "pdist_grad_impl.cuh" #include <math.h> #include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh" template <typename T> __device__ __forceinline__ T sign(T val) { return (0.0 < val) - (val < 0.0); } template <typename T> __global__ void InitOutput(T *x_grad, const size_t x_size) { T zero = 0.0; for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < x_size; pos += blockDim.x * gridDim.x) { x_grad[pos] = zero; } return; } template <typename T> __global__ void PDist_Grad_One(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { T res = grad_k * sign(*x_i - *x_j); *buff1 = res; *buff2 = -res; } } template <typename T> __global__ void PDist_Grad_Lt_Two(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; if (dist_k != 0.0 && p >= 1) { const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { const T diff = *x_i - *x_j; T res = (sign(diff) * pow(abs(diff), static_cast<T>(p - 1)) * (grad_k) / pow(dist_k, static_cast<T>(p - 1))); *buff1 = res; *buff2 = -res; } } } template <typename T> __global__ void PDist_Grad_Two(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; if (dist_k != 0.0) { const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { T res = grad_k * (*x_i - *x_j) / dist_k; *buff1 = res; *buff2 = -res; } } } template <typename T> __global__ void PDist_Grad_P(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; if (dist_k != 0.0) { const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { const T diff = (*x_i - *x_j); T res = diff * pow(abs(diff), static_cast<T>(p - 2)) * grad_k / pow(dist_k, static_cast<T>(p - 1)); *buff1 = res; *buff2 = -res; } } } template <typename T> __global__ void PDist_Grad_Inf(const size_t y_size, const T *y_grad, const T *x, const T *y, T *buffer, const int64_t n, const int64_t m, const float p, const float n1, const float n2) { const int64_t k = blockIdx.x * blockDim.x + threadIdx.x; const int init = blockIdx.y * blockDim.y + threadIdx.y; const int s = blockDim.y * gridDim.y; if (k >= y_size) { return; } int64_t i = static_cast<int64_t>((n1 - sqrt(n2 - 2 * k))); int64_t j = k - n * i + i * (i + 1) / 2 + i + 1; int64_t ib = j - i - 1; int64_t jb = n - 2 - i; const T grad_k = y_grad[k]; const T dist_k = y[k]; const T *const begin = x + i * m; const T *const end = begin + m; const T *x_i = begin + init; const T *x_j = x + j * m + init; T *buff1 = buffer + (ib * n + i) * m + init; T *buff2 = buffer + (jb * n + j) * m + init; for (; x_i < end; x_i += s, x_j += s, buff1 += s, buff2 += s) { T diff = *x_i - *x_j; T res = grad_k * sign(diff) * (abs(diff) == (dist_k)); *buff1 = res; *buff2 = -res; } } template <typename T> __global__ void AddBuffer(T *x_grad, T *buffer, const int64_t n, const size_t size) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { T res = 0.0; T *buff = buffer + pos; for (int64_t i = 0; i < n - 1; ++i, buff += size) { res += *(buff); } x_grad[pos] = res; } return; } template <typename T> void CalPDistGrad(const size_t x_size, const size_t y_size, const size_t grad_size, const T *y_grad, const T *x, const T *y, const int64_t n, const int64_t m, const float p, T *x_grad, T *buffer, const uint32_t &device_id, cudaStream_t cuda_stream) { if (p == 0.0 || grad_size == 0 || x_size == 0) { InitOutput<<<CUDA_BLOCKS(device_id, x_size), CUDA_THREADS(device_id), 0, cuda_stream>>>(x_grad, x_size); return; } const int block_x = 8; const int block_y = 128; const int grid_x = (y_size + block_x - 1) / block_x; const int grid_y = (m + block_y * 8 - 1) / (block_y * 8); const dim3 grid(grid_x, grid_y); const dim3 block(block_x, block_y); const float n1 = n - .5; const float n2 = n1 * n1 - 1; if (p == 1.0) { PDist_Grad_One<T><<<grid, block, 0, cuda_stream>>>(y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else if (p < 2.0) { InitOutput<<<CUDA_BLOCKS(device_id, (n-1) * x_size), CUDA_THREADS(device_id), 0, cuda_stream>>> (buffer, (n-1) * x_size); PDist_Grad_Lt_Two<T><<<grid, block, 0, cuda_stream>>>(y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else if (p == 2.0) { InitOutput<<<CUDA_BLOCKS(device_id, (n-1) * x_size), CUDA_THREADS(device_id), 0, cuda_stream>>> (buffer, (n-1) * x_size); PDist_Grad_Two<T><<<grid, block, 0, cuda_stream>>>(y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else if (std::isinf(p)) { PDist_Grad_Inf<T><<<grid, block, 0, cuda_stream>>>(y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } else { InitOutput<<<CUDA_BLOCKS(device_id, (n-1) * x_size), CUDA_THREADS(device_id), 0, cuda_stream>>> (buffer, (n-1) * x_size); PDist_Grad_P<T><<<grid, block, 0, cuda_stream>>>(y_size, y_grad, x, y, buffer, n, m, p, n1, n2); } AddBuffer<<<CUDA_BLOCKS(device_id, x_size), CUDA_THREADS(device_id), 0, cuda_stream>>>(x_grad, buffer, n, x_size); } template CUDA_LIB_EXPORT void CalPDistGrad<float>(const size_t x_size, const size_t y_size, const size_t grad_size, const float *y_grad, const float *x, const float *y, const int64_t n, const int64_t m, const float p, float *x_grad, float *buffer, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalPDistGrad<double>(const size_t x_size, const size_t y_size, const size_t grad_size, const double *y_grad, const double *x, const double *y, const int64_t n, const int64_t m, const float p, double *x_grad, double *buffer, const uint32_t &device_id, cudaStream_t cuda_stream);
23506db4eea649a5ad70c01139887ca8076e3e87.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <assert.h> #include "basics/tensor.cu" #include "basics/session.hpp" #include "layers/data.cu" #include "layers/softmax.cu" #include "layers/cross_entropy_loss.cu" #include "layers/pooling.cu" #include "layers/conv2d.cu" #include "layers/relu.cu" #include "layers/dropout.cu" #include "layers/lrn.cu" #include "layers/fc.cu" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "utils/helper_cuda.h" #include "utils/utils.cu" #include "utils/load_model.hpp" void test_alexnet_cpu() { printf("Start testing AlexNet with CPUs.\n"); Session* session = Session::GetNewSession(); session->gpu = false; session->batch_size = 128; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "tmp/test/img_med_list.txt"); size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorCPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorCPU(data_tops_dims1)); printf("data: (%d,%d,%d,%d)\n",data_tops_dims0[0],data_tops_dims0[1],data_tops_dims0[2],data_tops_dims0[3]); Conv2D<float> conv1(11,11,3,96,4, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Pooling<float> pool1(2,MAX,2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); LRN<float> norm1; size_t norm1_top_dims[4]; norm1.GetTopsDims({relu1_top_dims}, {norm1_top_dims}); Conv2D<float> conv2(5,5,96,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({norm1_top_dims}, {conv2_top_dims}); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); LRN<float> norm2; size_t norm2_top_dims[4]; norm2.GetTopsDims({relu2_top_dims}, {norm2_top_dims}); Conv2D<float> conv3(3,3,256,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv3_top_dims[4]; conv3.GetTopsDims({norm2_top_dims}, {conv3_top_dims}); Relu<float> relu3; size_t relu3_top_dims[4]; relu2.GetTopsDims({conv3_top_dims}, {relu3_top_dims}); Conv2D<float> conv4(3,3,384,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv4_top_dims[4]; conv4.GetTopsDims({relu3_top_dims}, {conv4_top_dims}); Relu<float> relu4; size_t relu4_top_dims[4]; relu4.GetTopsDims({conv4_top_dims}, {relu4_top_dims}); Conv2D<float> conv5(3,3,384,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv5_top_dims[4]; conv5.GetTopsDims({relu4_top_dims}, {conv5_top_dims}); Pooling<float> pool5(2, MAX, 2); size_t pool5_top_dims[4]; pool5.GetTopsDims({conv5_top_dims}, {pool5_top_dims}); Relu<float> relu5; size_t relu5_top_dims[4]; relu5.GetTopsDims({pool5_top_dims}, {relu5_top_dims}); FC<float> fc6(relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3],4096); size_t to_fc6_dims[4]; to_fc6_dims[0] = relu5_top_dims[0]; to_fc6_dims[1] = 1; to_fc6_dims[2] = 1; to_fc6_dims[3] = relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3]; size_t fc6_top_dims[4]; fc6.GetTopsDims({to_fc6_dims}, {fc6_top_dims}); Relu<float> relu6; size_t relu6_top_dims[4]; relu6.GetTopsDims({fc6_top_dims}, {relu6_top_dims}); Dropout<float> drop6; size_t drop6_top_dims[4]; drop6.GetTopsDims({relu6_top_dims}, {drop6_top_dims}); FC<float> fc7(4096,4096); size_t fc7_top_dims[4]; fc7.GetTopsDims({drop6_top_dims}, {fc7_top_dims}); Relu<float> relu7; size_t relu7_top_dims[4]; relu7.GetTopsDims({fc7_top_dims}, {relu7_top_dims}); Dropout<float> drop7; size_t drop7_top_dims[4]; drop7.GetTopsDims({relu7_top_dims}, {drop7_top_dims}); FC<float> fc8(4096,1000); size_t fc8_top_dims[4]; fc8.GetTopsDims({drop7_top_dims}, {fc8_top_dims}); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc8_top_dims}, {sm_top_dims}); CrossEntropyLoss<float> cel; size_t cel_top_dims[4]; cel.GetTopsDims({sm_top_dims, data_tops_dims1}, {cel_top_dims}); printf("network finished setup: %3.1f ms \n", stopTimer()); Tensor<float> * conv1_top = Tensor<float>::CreateTensorCPU(conv1_top_dims); printf("conv1: (%d,%d,%d,%d)\n",conv1_top_dims[0],conv1_top_dims[1],conv1_top_dims[2],conv1_top_dims[3]); Tensor<float> * pool1_top = Tensor<float>::CreateTensorCPU(pool1_top_dims); printf("pool1: (%d,%d,%d,%d)\n",pool1_top_dims[0],pool1_top_dims[1],pool1_top_dims[2],pool1_top_dims[3]); Tensor<float> * relu1_top = Tensor<float>::CreateTensorCPU(relu1_top_dims); printf("relu1: (%d,%d,%d,%d)\n",relu1_top_dims[0],relu1_top_dims[1],relu1_top_dims[2],relu1_top_dims[3]); Tensor<float> * norm1_top = Tensor<float>::CreateTensorCPU(norm1_top_dims); printf("norm1: (%d,%d,%d,%d)\n",norm1_top_dims[0],norm1_top_dims[1],norm1_top_dims[2],norm1_top_dims[3]); Tensor<float> * conv2_top = Tensor<float>::CreateTensorCPU(conv2_top_dims); printf("conv2: (%d,%d,%d,%d)\n",conv2_top_dims[0],conv2_top_dims[1],conv2_top_dims[2],conv2_top_dims[3]); Tensor<float> * pool2_top = Tensor<float>::CreateTensorCPU(pool2_top_dims); printf("pool2: (%d,%d,%d,%d)\n",pool2_top_dims[0],pool2_top_dims[1],pool2_top_dims[2],pool2_top_dims[3]); Tensor<float> * relu2_top = Tensor<float>::CreateTensorCPU(relu2_top_dims); printf("relu2: (%d,%d,%d,%d)\n",relu2_top_dims[0],relu2_top_dims[1],relu2_top_dims[2],relu2_top_dims[3]); Tensor<float> * norm2_top = Tensor<float>::CreateTensorCPU(norm2_top_dims); printf("norm2: (%d,%d,%d,%d)\n",norm2_top_dims[0],norm2_top_dims[1],norm2_top_dims[2],norm2_top_dims[3]); Tensor<float> * conv3_top = Tensor<float>::CreateTensorCPU(conv3_top_dims); printf("conv3: (%d,%d,%d,%d)\n",conv3_top_dims[0],conv3_top_dims[1],conv3_top_dims[2],conv3_top_dims[3]); Tensor<float> * relu3_top = Tensor<float>::CreateTensorCPU(relu3_top_dims); printf("relu3: (%d,%d,%d,%d)\n",relu3_top_dims[0],relu3_top_dims[1],relu3_top_dims[2],relu3_top_dims[3]); Tensor<float> * conv4_top = Tensor<float>::CreateTensorCPU(conv4_top_dims); printf("conv4: (%d,%d,%d,%d)\n",conv4_top_dims[0],conv4_top_dims[1],conv4_top_dims[2],conv4_top_dims[3]); Tensor<float> * relu4_top = Tensor<float>::CreateTensorCPU(relu4_top_dims); printf("relu4: (%d,%d,%d,%d)\n",relu4_top_dims[0],relu4_top_dims[1],relu4_top_dims[2],relu4_top_dims[3]); Tensor<float> * conv5_top = Tensor<float>::CreateTensorCPU(conv5_top_dims); printf("conv5: (%d,%d,%d,%d)\n",conv5_top_dims[0],conv5_top_dims[1],conv5_top_dims[2],conv5_top_dims[3]); Tensor<float> * pool5_top = Tensor<float>::CreateTensorCPU(pool5_top_dims); printf("pool5: (%d,%d,%d,%d)\n",pool5_top_dims[0],pool5_top_dims[1],pool5_top_dims[2],pool5_top_dims[3]); Tensor<float> * relu5_top = Tensor<float>::CreateTensorCPU(relu5_top_dims); printf("relu5: (%d,%d,%d,%d)\n",relu5_top_dims[0],relu5_top_dims[1],relu5_top_dims[2],relu5_top_dims[3]); Tensor<float> * fc6_top = Tensor<float>::CreateTensorCPU(fc6_top_dims); Tensor<float> * relu6_top = Tensor<float>::CreateTensorCPU(relu6_top_dims); Tensor<float> * drop6_top = Tensor<float>::CreateTensorCPU(drop6_top_dims); Tensor<float> * fc7_top = Tensor<float>::CreateTensorCPU(fc7_top_dims); Tensor<float> * relu7_top = Tensor<float>::CreateTensorCPU(relu7_top_dims); Tensor<float> * drop7_top = Tensor<float>::CreateTensorCPU(drop7_top_dims); Tensor<float> * fc8_top = Tensor<float>::CreateTensorCPU(fc8_top_dims); Tensor<float> * sm_top = Tensor<float>::CreateTensorCPU(sm_top_dims); Tensor<float> * cel_top = Tensor<float>::CreateTensorCPU(cel_top_dims); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); printf("data forward: %3.1f ms \n", stopTimer()); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); printf("conv1 forward: %3.1f ms \n", stopTimer()); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); printf("pool1 forward: %3.1f ms \n", stopTimer()); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); printf("relu1 forward: %3.1f ms \n", stopTimer()); startTimer(); norm1.Forward({relu1_top}, {norm1_top}); printf("norm1 forward: %3.1f ms \n", stopTimer()); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); printf("conv2 forward: %3.1f ms \n", stopTimer()); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); printf("pool2 forward: %3.1f ms \n", stopTimer()); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); printf("relu2 forward: %3.1f ms \n", stopTimer()); startTimer(); norm2.Forward({relu2_top}, {norm2_top}); printf("norm2 forward: %3.1f ms \n", stopTimer()); startTimer(); conv3.Forward({relu2_top}, {conv3_top}); printf("conv3 forward: %3.1f ms \n", stopTimer()); startTimer(); relu3.Forward({conv3_top}, {relu3_top}); printf("relu3 forward: %3.1f ms \n", stopTimer()); startTimer(); conv4.Forward({relu3_top}, {conv4_top}); printf("conv4 forward: %3.1f ms \n", stopTimer()); startTimer(); relu4.Forward({conv4_top}, {relu4_top}); printf("relu4 forward: %3.1f ms \n", stopTimer()); startTimer(); conv5.Forward({relu4_top}, {conv5_top}); printf("conv5 forward: %3.1f ms \n", stopTimer()); startTimer(); pool5.Forward({conv5_top}, {pool5_top}); printf("pool5 forward: %3.1f ms \n", stopTimer()); startTimer(); relu5.Forward({pool5_top}, {relu5_top}); printf("relu5 forward: %3.1f ms \n", stopTimer()); startTimer(); fc6.Forward({relu5_top}, {fc6_top}); printf("fc6 forward: %3.1f ms \n", stopTimer()); startTimer(); relu6.Forward({fc6_top}, {relu6_top}); printf("relu6 forward: %3.1f ms \n", stopTimer()); startTimer(); drop6.Forward({relu6_top}, {drop6_top}); printf("drop6 forward: %3.1f ms \n", stopTimer()); startTimer(); fc7.Forward({drop6_top}, {fc7_top}); printf("fc7 forward: %3.1f ms \n", stopTimer()); startTimer(); relu7.Forward({fc7_top}, {relu7_top}); printf("relu7 forward: %3.1f ms \n", stopTimer()); startTimer(); drop7.Forward({relu7_top}, {drop7_top}); printf("drop7 forward: %3.1f ms \n", stopTimer()); startTimer(); fc8.Forward({drop7_top}, {fc8_top}); printf("fc8 forward: %3.1f ms \n", stopTimer()); startTimer(); softmax.Forward({fc8_top}, {sm_top}); printf("softmax forward: %3.1f ms \n", stopTimer()); startTimer(); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("cel forward: %3.1f ms \n", stopTimer()); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); conv1.Forward({data_tops[0]}, {conv1_top}); pool1.Forward({conv1_top}, {pool1_top}); relu1.Forward({pool1_top}, {relu1_top}); norm1.Forward({relu1_top}, {norm1_top}); conv2.Forward({relu1_top}, {conv2_top}); pool2.Forward({conv2_top}, {pool2_top}); relu2.Forward({pool2_top}, {relu2_top}); norm2.Forward({relu2_top}, {norm2_top}); conv3.Forward({relu2_top}, {conv3_top}); relu3.Forward({conv3_top}, {relu3_top}); conv4.Forward({relu3_top}, {conv4_top}); relu4.Forward({conv4_top}, {relu4_top}); conv5.Forward({relu4_top}, {conv5_top}); pool5.Forward({conv5_top}, {pool5_top}); relu5.Forward({pool5_top}, {relu5_top}); fc6.Forward({relu5_top}, {fc6_top}); relu6.Forward({fc6_top}, {relu6_top}); drop6.Forward({relu6_top}, {drop6_top}); fc7.Forward({drop6_top}, {fc7_top}); relu7.Forward({fc7_top}, {relu7_top}); drop7.Forward({relu7_top}, {drop7_top}); fc8.Forward({drop7_top}, {fc8_top}); softmax.Forward({fc8_top}, {sm_top}); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("finished forward: %3.1f ms \n", stopTimer()); } void test_alexnet_gpu() { printf("Start testing AlexNet with GPUs.\n"); hipError_t cudaStatus = hipSetDevice(0); checkCudaErrors(cudaStatus); show_mem(cudaStatus); startTimer(); Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 16; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "tmp/test/img_med_list.txt"); size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims1)); printf("data: (%d,%d,%d,%d)\n",data_tops_dims0[0],data_tops_dims0[1],data_tops_dims0[2],data_tops_dims0[3]); Conv2D<float> conv1(11,11,3,96,4, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Pooling<float> pool1(2,MAX,2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); LRN<float> norm1; size_t norm1_top_dims[4]; norm1.GetTopsDims({relu1_top_dims}, {norm1_top_dims}); Conv2D<float> conv2(5,5,96,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({norm1_top_dims}, {conv2_top_dims}); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); LRN<float> norm2; size_t norm2_top_dims[4]; norm2.GetTopsDims({relu2_top_dims}, {norm2_top_dims}); Conv2D<float> conv3(3,3,256,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv3_top_dims[4]; conv3.GetTopsDims({norm2_top_dims}, {conv3_top_dims}); Relu<float> relu3; size_t relu3_top_dims[4]; relu2.GetTopsDims({conv3_top_dims}, {relu3_top_dims}); Conv2D<float> conv4(3,3,384,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv4_top_dims[4]; conv4.GetTopsDims({relu3_top_dims}, {conv4_top_dims}); Relu<float> relu4; size_t relu4_top_dims[4]; relu4.GetTopsDims({conv4_top_dims}, {relu4_top_dims}); Conv2D<float> conv5(3,3,384,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv5_top_dims[4]; conv5.GetTopsDims({relu4_top_dims}, {conv5_top_dims}); Pooling<float> pool5(2, MAX, 2); size_t pool5_top_dims[4]; pool5.GetTopsDims({conv5_top_dims}, {pool5_top_dims}); Relu<float> relu5; size_t relu5_top_dims[4]; relu5.GetTopsDims({pool5_top_dims}, {relu5_top_dims}); FC<float> fc6(relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3],4096); size_t to_fc6_dims[4]; to_fc6_dims[0] = relu5_top_dims[0]; to_fc6_dims[1] = 1; to_fc6_dims[2] = 1; to_fc6_dims[3] = relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3]; size_t fc6_top_dims[4]; fc6.GetTopsDims({to_fc6_dims}, {fc6_top_dims}); Relu<float> relu6; size_t relu6_top_dims[4]; relu6.GetTopsDims({fc6_top_dims}, {relu6_top_dims}); Dropout<float> drop6; size_t drop6_top_dims[4]; drop6.GetTopsDims({relu6_top_dims}, {drop6_top_dims}); FC<float> fc7(4096,4096); size_t fc7_top_dims[4]; fc7.GetTopsDims({drop6_top_dims}, {fc7_top_dims}); Relu<float> relu7; size_t relu7_top_dims[4]; relu7.GetTopsDims({fc7_top_dims}, {relu7_top_dims}); Dropout<float> drop7; size_t drop7_top_dims[4]; drop7.GetTopsDims({relu7_top_dims}, {drop7_top_dims}); FC<float> fc8(4096,1000); size_t fc8_top_dims[4]; fc8.GetTopsDims({drop7_top_dims}, {fc8_top_dims}); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc8_top_dims}, {sm_top_dims}); CrossEntropyLoss<float> cel; size_t cel_top_dims[4]; cel.GetTopsDims({sm_top_dims, data_tops_dims1}, {cel_top_dims}); printf("network finished setup: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); Tensor<float> * conv1_top = Tensor<float>::CreateTensorGPU(conv1_top_dims); printf("conv1: (%d,%d,%d,%d)\n",conv1_top_dims[0],conv1_top_dims[1],conv1_top_dims[2],conv1_top_dims[3]); Tensor<float> * pool1_top = Tensor<float>::CreateTensorGPU(pool1_top_dims); printf("pool1: (%d,%d,%d,%d)\n",pool1_top_dims[0],pool1_top_dims[1],pool1_top_dims[2],pool1_top_dims[3]); Tensor<float> * relu1_top = Tensor<float>::CreateTensorGPU(relu1_top_dims); printf("relu1: (%d,%d,%d,%d)\n",relu1_top_dims[0],relu1_top_dims[1],relu1_top_dims[2],relu1_top_dims[3]); Tensor<float> * norm1_top = Tensor<float>::CreateTensorGPU(norm1_top_dims); printf("norm1: (%d,%d,%d,%d)\n",norm1_top_dims[0],norm1_top_dims[1],norm1_top_dims[2],norm1_top_dims[3]); Tensor<float> * conv2_top = Tensor<float>::CreateTensorGPU(conv2_top_dims); printf("conv2: (%d,%d,%d,%d)\n",conv2_top_dims[0],conv2_top_dims[1],conv2_top_dims[2],conv2_top_dims[3]); Tensor<float> * pool2_top = Tensor<float>::CreateTensorGPU(pool2_top_dims); printf("pool2: (%d,%d,%d,%d)\n",pool2_top_dims[0],pool2_top_dims[1],pool2_top_dims[2],pool2_top_dims[3]); Tensor<float> * relu2_top = Tensor<float>::CreateTensorGPU(relu2_top_dims); printf("relu2: (%d,%d,%d,%d)\n",relu2_top_dims[0],relu2_top_dims[1],relu2_top_dims[2],relu2_top_dims[3]); Tensor<float> * norm2_top = Tensor<float>::CreateTensorGPU(norm2_top_dims); printf("norm2: (%d,%d,%d,%d)\n",norm2_top_dims[0],norm2_top_dims[1],norm2_top_dims[2],norm2_top_dims[3]); Tensor<float> * conv3_top = Tensor<float>::CreateTensorGPU(conv3_top_dims); printf("conv3: (%d,%d,%d,%d)\n",conv3_top_dims[0],conv3_top_dims[1],conv3_top_dims[2],conv3_top_dims[3]); Tensor<float> * relu3_top = Tensor<float>::CreateTensorGPU(relu3_top_dims); printf("relu3: (%d,%d,%d,%d)\n",relu3_top_dims[0],relu3_top_dims[1],relu3_top_dims[2],relu3_top_dims[3]); Tensor<float> * conv4_top = Tensor<float>::CreateTensorGPU(conv4_top_dims); printf("conv4: (%d,%d,%d,%d)\n",conv4_top_dims[0],conv4_top_dims[1],conv4_top_dims[2],conv4_top_dims[3]); Tensor<float> * relu4_top = Tensor<float>::CreateTensorGPU(relu4_top_dims); printf("relu4: (%d,%d,%d,%d)\n",relu4_top_dims[0],relu4_top_dims[1],relu4_top_dims[2],relu4_top_dims[3]); Tensor<float> * conv5_top = Tensor<float>::CreateTensorGPU(conv5_top_dims); printf("conv5: (%d,%d,%d,%d)\n",conv5_top_dims[0],conv5_top_dims[1],conv5_top_dims[2],conv5_top_dims[3]); Tensor<float> * pool5_top = Tensor<float>::CreateTensorGPU(pool5_top_dims); printf("pool5: (%d,%d,%d,%d)\n",pool5_top_dims[0],pool5_top_dims[1],pool5_top_dims[2],pool5_top_dims[3]); Tensor<float> * relu5_top = Tensor<float>::CreateTensorGPU(relu5_top_dims); printf("relu5: (%d,%d,%d,%d)\n",relu5_top_dims[0],relu5_top_dims[1],relu5_top_dims[2],relu5_top_dims[3]); Tensor<float> * fc6_top = Tensor<float>::CreateTensorGPU(fc6_top_dims); Tensor<float> * relu6_top = Tensor<float>::CreateTensorGPU(relu6_top_dims); Tensor<float> * drop6_top = Tensor<float>::CreateTensorGPU(drop6_top_dims); Tensor<float> * fc7_top = Tensor<float>::CreateTensorGPU(fc7_top_dims); Tensor<float> * relu7_top = Tensor<float>::CreateTensorGPU(relu7_top_dims); Tensor<float> * drop7_top = Tensor<float>::CreateTensorGPU(drop7_top_dims); Tensor<float> * fc8_top = Tensor<float>::CreateTensorGPU(fc8_top_dims); Tensor<float> * sm_top = Tensor<float>::CreateTensorGPU(sm_top_dims); Tensor<float> * cel_top = Tensor<float>::CreateTensorGPU(cel_top_dims); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); printf("data forward: %3.1f ms \n", stopTimer()); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); printf("conv1 forward: %3.1f ms \n", stopTimer()); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); printf("pool1 forward: %3.1f ms \n", stopTimer()); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); printf("relu1 forward: %3.1f ms \n", stopTimer()); startTimer(); norm1.Forward({relu1_top}, {norm1_top}); printf("norm1 forward: %3.1f ms \n", stopTimer()); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); printf("conv2 forward: %3.1f ms \n", stopTimer()); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); printf("pool2 forward: %3.1f ms \n", stopTimer()); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); printf("relu2 forward: %3.1f ms \n", stopTimer()); startTimer(); norm2.Forward({relu2_top}, {norm2_top}); printf("norm2 forward: %3.1f ms \n", stopTimer()); startTimer(); conv3.Forward({relu2_top}, {conv3_top}); printf("conv3 forward: %3.1f ms \n", stopTimer()); startTimer(); relu3.Forward({conv3_top}, {relu3_top}); printf("relu3 forward: %3.1f ms \n", stopTimer()); startTimer(); conv4.Forward({relu3_top}, {conv4_top}); printf("conv4 forward: %3.1f ms \n", stopTimer()); startTimer(); relu4.Forward({conv4_top}, {relu4_top}); printf("relu4 forward: %3.1f ms \n", stopTimer()); startTimer(); conv5.Forward({relu4_top}, {conv5_top}); printf("conv5 forward: %3.1f ms \n", stopTimer()); startTimer(); pool5.Forward({conv5_top}, {pool5_top}); printf("pool5 forward: %3.1f ms \n", stopTimer()); startTimer(); relu5.Forward({pool5_top}, {relu5_top}); printf("relu5 forward: %3.1f ms \n", stopTimer()); startTimer(); fc6.Forward({relu5_top}, {fc6_top}); printf("fc6 forward: %3.1f ms \n", stopTimer()); startTimer(); relu6.Forward({fc6_top}, {relu6_top}); printf("relu6 forward: %3.1f ms \n", stopTimer()); startTimer(); drop6.Forward({relu6_top}, {drop6_top}); printf("drop6 forward: %3.1f ms \n", stopTimer()); startTimer(); fc7.Forward({drop6_top}, {fc7_top}); printf("fc7 forward: %3.1f ms \n", stopTimer()); startTimer(); relu7.Forward({fc7_top}, {relu7_top}); printf("relu7 forward: %3.1f ms \n", stopTimer()); startTimer(); drop7.Forward({relu7_top}, {drop7_top}); printf("drop7 forward: %3.1f ms \n", stopTimer()); startTimer(); fc8.Forward({drop7_top}, {fc8_top}); printf("fc8 forward: %3.1f ms \n", stopTimer()); startTimer(); softmax.Forward({fc8_top}, {sm_top}); printf("softmax forward: %3.1f ms \n", stopTimer()); startTimer(); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("cel forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); pool1.Forward({conv1_top}, {pool1_top}); relu1.Forward({pool1_top}, {relu1_top}); norm1.Forward({relu1_top}, {norm1_top}); conv2.Forward({relu1_top}, {conv2_top}); pool2.Forward({conv2_top}, {pool2_top}); relu2.Forward({pool2_top}, {relu2_top}); norm2.Forward({relu2_top}, {norm2_top}); conv3.Forward({relu2_top}, {conv3_top}); relu3.Forward({conv3_top}, {relu3_top}); conv4.Forward({relu3_top}, {conv4_top}); relu4.Forward({conv4_top}, {relu4_top}); conv5.Forward({relu4_top}, {conv5_top}); pool5.Forward({conv5_top}, {pool5_top}); relu5.Forward({pool5_top}, {relu5_top}); fc6.Forward({relu5_top}, {fc6_top}); relu6.Forward({fc6_top}, {relu6_top}); drop6.Forward({relu6_top}, {drop6_top}); fc7.Forward({drop6_top}, {fc7_top}); relu7.Forward({fc7_top}, {relu7_top}); drop7.Forward({relu7_top}, {drop7_top}); fc8.Forward({drop7_top}, {fc8_top}); softmax.Forward({fc8_top}, {sm_top}); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("finished forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); show_mem(cudaStatus); } int main() { test_alexnet_cpu(); // test_alexnet_gpu(); }
23506db4eea649a5ad70c01139887ca8076e3e87.cu
#include <stdio.h> #include <assert.h> #include "basics/tensor.cu" #include "basics/session.hpp" #include "layers/data.cu" #include "layers/softmax.cu" #include "layers/cross_entropy_loss.cu" #include "layers/pooling.cu" #include "layers/conv2d.cu" #include "layers/relu.cu" #include "layers/dropout.cu" #include "layers/lrn.cu" #include "layers/fc.cu" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "utils/helper_cuda.h" #include "utils/utils.cu" #include "utils/load_model.hpp" void test_alexnet_cpu() { printf("Start testing AlexNet with CPUs.\n"); Session* session = Session::GetNewSession(); session->gpu = false; session->batch_size = 128; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "tmp/test/img_med_list.txt"); size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorCPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorCPU(data_tops_dims1)); printf("data: (%d,%d,%d,%d)\n",data_tops_dims0[0],data_tops_dims0[1],data_tops_dims0[2],data_tops_dims0[3]); Conv2D<float> conv1(11,11,3,96,4, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Pooling<float> pool1(2,MAX,2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); LRN<float> norm1; size_t norm1_top_dims[4]; norm1.GetTopsDims({relu1_top_dims}, {norm1_top_dims}); Conv2D<float> conv2(5,5,96,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({norm1_top_dims}, {conv2_top_dims}); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); LRN<float> norm2; size_t norm2_top_dims[4]; norm2.GetTopsDims({relu2_top_dims}, {norm2_top_dims}); Conv2D<float> conv3(3,3,256,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv3_top_dims[4]; conv3.GetTopsDims({norm2_top_dims}, {conv3_top_dims}); Relu<float> relu3; size_t relu3_top_dims[4]; relu2.GetTopsDims({conv3_top_dims}, {relu3_top_dims}); Conv2D<float> conv4(3,3,384,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv4_top_dims[4]; conv4.GetTopsDims({relu3_top_dims}, {conv4_top_dims}); Relu<float> relu4; size_t relu4_top_dims[4]; relu4.GetTopsDims({conv4_top_dims}, {relu4_top_dims}); Conv2D<float> conv5(3,3,384,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv5_top_dims[4]; conv5.GetTopsDims({relu4_top_dims}, {conv5_top_dims}); Pooling<float> pool5(2, MAX, 2); size_t pool5_top_dims[4]; pool5.GetTopsDims({conv5_top_dims}, {pool5_top_dims}); Relu<float> relu5; size_t relu5_top_dims[4]; relu5.GetTopsDims({pool5_top_dims}, {relu5_top_dims}); FC<float> fc6(relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3],4096); size_t to_fc6_dims[4]; to_fc6_dims[0] = relu5_top_dims[0]; to_fc6_dims[1] = 1; to_fc6_dims[2] = 1; to_fc6_dims[3] = relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3]; size_t fc6_top_dims[4]; fc6.GetTopsDims({to_fc6_dims}, {fc6_top_dims}); Relu<float> relu6; size_t relu6_top_dims[4]; relu6.GetTopsDims({fc6_top_dims}, {relu6_top_dims}); Dropout<float> drop6; size_t drop6_top_dims[4]; drop6.GetTopsDims({relu6_top_dims}, {drop6_top_dims}); FC<float> fc7(4096,4096); size_t fc7_top_dims[4]; fc7.GetTopsDims({drop6_top_dims}, {fc7_top_dims}); Relu<float> relu7; size_t relu7_top_dims[4]; relu7.GetTopsDims({fc7_top_dims}, {relu7_top_dims}); Dropout<float> drop7; size_t drop7_top_dims[4]; drop7.GetTopsDims({relu7_top_dims}, {drop7_top_dims}); FC<float> fc8(4096,1000); size_t fc8_top_dims[4]; fc8.GetTopsDims({drop7_top_dims}, {fc8_top_dims}); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc8_top_dims}, {sm_top_dims}); CrossEntropyLoss<float> cel; size_t cel_top_dims[4]; cel.GetTopsDims({sm_top_dims, data_tops_dims1}, {cel_top_dims}); printf("network finished setup: %3.1f ms \n", stopTimer()); Tensor<float> * conv1_top = Tensor<float>::CreateTensorCPU(conv1_top_dims); printf("conv1: (%d,%d,%d,%d)\n",conv1_top_dims[0],conv1_top_dims[1],conv1_top_dims[2],conv1_top_dims[3]); Tensor<float> * pool1_top = Tensor<float>::CreateTensorCPU(pool1_top_dims); printf("pool1: (%d,%d,%d,%d)\n",pool1_top_dims[0],pool1_top_dims[1],pool1_top_dims[2],pool1_top_dims[3]); Tensor<float> * relu1_top = Tensor<float>::CreateTensorCPU(relu1_top_dims); printf("relu1: (%d,%d,%d,%d)\n",relu1_top_dims[0],relu1_top_dims[1],relu1_top_dims[2],relu1_top_dims[3]); Tensor<float> * norm1_top = Tensor<float>::CreateTensorCPU(norm1_top_dims); printf("norm1: (%d,%d,%d,%d)\n",norm1_top_dims[0],norm1_top_dims[1],norm1_top_dims[2],norm1_top_dims[3]); Tensor<float> * conv2_top = Tensor<float>::CreateTensorCPU(conv2_top_dims); printf("conv2: (%d,%d,%d,%d)\n",conv2_top_dims[0],conv2_top_dims[1],conv2_top_dims[2],conv2_top_dims[3]); Tensor<float> * pool2_top = Tensor<float>::CreateTensorCPU(pool2_top_dims); printf("pool2: (%d,%d,%d,%d)\n",pool2_top_dims[0],pool2_top_dims[1],pool2_top_dims[2],pool2_top_dims[3]); Tensor<float> * relu2_top = Tensor<float>::CreateTensorCPU(relu2_top_dims); printf("relu2: (%d,%d,%d,%d)\n",relu2_top_dims[0],relu2_top_dims[1],relu2_top_dims[2],relu2_top_dims[3]); Tensor<float> * norm2_top = Tensor<float>::CreateTensorCPU(norm2_top_dims); printf("norm2: (%d,%d,%d,%d)\n",norm2_top_dims[0],norm2_top_dims[1],norm2_top_dims[2],norm2_top_dims[3]); Tensor<float> * conv3_top = Tensor<float>::CreateTensorCPU(conv3_top_dims); printf("conv3: (%d,%d,%d,%d)\n",conv3_top_dims[0],conv3_top_dims[1],conv3_top_dims[2],conv3_top_dims[3]); Tensor<float> * relu3_top = Tensor<float>::CreateTensorCPU(relu3_top_dims); printf("relu3: (%d,%d,%d,%d)\n",relu3_top_dims[0],relu3_top_dims[1],relu3_top_dims[2],relu3_top_dims[3]); Tensor<float> * conv4_top = Tensor<float>::CreateTensorCPU(conv4_top_dims); printf("conv4: (%d,%d,%d,%d)\n",conv4_top_dims[0],conv4_top_dims[1],conv4_top_dims[2],conv4_top_dims[3]); Tensor<float> * relu4_top = Tensor<float>::CreateTensorCPU(relu4_top_dims); printf("relu4: (%d,%d,%d,%d)\n",relu4_top_dims[0],relu4_top_dims[1],relu4_top_dims[2],relu4_top_dims[3]); Tensor<float> * conv5_top = Tensor<float>::CreateTensorCPU(conv5_top_dims); printf("conv5: (%d,%d,%d,%d)\n",conv5_top_dims[0],conv5_top_dims[1],conv5_top_dims[2],conv5_top_dims[3]); Tensor<float> * pool5_top = Tensor<float>::CreateTensorCPU(pool5_top_dims); printf("pool5: (%d,%d,%d,%d)\n",pool5_top_dims[0],pool5_top_dims[1],pool5_top_dims[2],pool5_top_dims[3]); Tensor<float> * relu5_top = Tensor<float>::CreateTensorCPU(relu5_top_dims); printf("relu5: (%d,%d,%d,%d)\n",relu5_top_dims[0],relu5_top_dims[1],relu5_top_dims[2],relu5_top_dims[3]); Tensor<float> * fc6_top = Tensor<float>::CreateTensorCPU(fc6_top_dims); Tensor<float> * relu6_top = Tensor<float>::CreateTensorCPU(relu6_top_dims); Tensor<float> * drop6_top = Tensor<float>::CreateTensorCPU(drop6_top_dims); Tensor<float> * fc7_top = Tensor<float>::CreateTensorCPU(fc7_top_dims); Tensor<float> * relu7_top = Tensor<float>::CreateTensorCPU(relu7_top_dims); Tensor<float> * drop7_top = Tensor<float>::CreateTensorCPU(drop7_top_dims); Tensor<float> * fc8_top = Tensor<float>::CreateTensorCPU(fc8_top_dims); Tensor<float> * sm_top = Tensor<float>::CreateTensorCPU(sm_top_dims); Tensor<float> * cel_top = Tensor<float>::CreateTensorCPU(cel_top_dims); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); printf("data forward: %3.1f ms \n", stopTimer()); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); printf("conv1 forward: %3.1f ms \n", stopTimer()); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); printf("pool1 forward: %3.1f ms \n", stopTimer()); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); printf("relu1 forward: %3.1f ms \n", stopTimer()); startTimer(); norm1.Forward({relu1_top}, {norm1_top}); printf("norm1 forward: %3.1f ms \n", stopTimer()); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); printf("conv2 forward: %3.1f ms \n", stopTimer()); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); printf("pool2 forward: %3.1f ms \n", stopTimer()); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); printf("relu2 forward: %3.1f ms \n", stopTimer()); startTimer(); norm2.Forward({relu2_top}, {norm2_top}); printf("norm2 forward: %3.1f ms \n", stopTimer()); startTimer(); conv3.Forward({relu2_top}, {conv3_top}); printf("conv3 forward: %3.1f ms \n", stopTimer()); startTimer(); relu3.Forward({conv3_top}, {relu3_top}); printf("relu3 forward: %3.1f ms \n", stopTimer()); startTimer(); conv4.Forward({relu3_top}, {conv4_top}); printf("conv4 forward: %3.1f ms \n", stopTimer()); startTimer(); relu4.Forward({conv4_top}, {relu4_top}); printf("relu4 forward: %3.1f ms \n", stopTimer()); startTimer(); conv5.Forward({relu4_top}, {conv5_top}); printf("conv5 forward: %3.1f ms \n", stopTimer()); startTimer(); pool5.Forward({conv5_top}, {pool5_top}); printf("pool5 forward: %3.1f ms \n", stopTimer()); startTimer(); relu5.Forward({pool5_top}, {relu5_top}); printf("relu5 forward: %3.1f ms \n", stopTimer()); startTimer(); fc6.Forward({relu5_top}, {fc6_top}); printf("fc6 forward: %3.1f ms \n", stopTimer()); startTimer(); relu6.Forward({fc6_top}, {relu6_top}); printf("relu6 forward: %3.1f ms \n", stopTimer()); startTimer(); drop6.Forward({relu6_top}, {drop6_top}); printf("drop6 forward: %3.1f ms \n", stopTimer()); startTimer(); fc7.Forward({drop6_top}, {fc7_top}); printf("fc7 forward: %3.1f ms \n", stopTimer()); startTimer(); relu7.Forward({fc7_top}, {relu7_top}); printf("relu7 forward: %3.1f ms \n", stopTimer()); startTimer(); drop7.Forward({relu7_top}, {drop7_top}); printf("drop7 forward: %3.1f ms \n", stopTimer()); startTimer(); fc8.Forward({drop7_top}, {fc8_top}); printf("fc8 forward: %3.1f ms \n", stopTimer()); startTimer(); softmax.Forward({fc8_top}, {sm_top}); printf("softmax forward: %3.1f ms \n", stopTimer()); startTimer(); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("cel forward: %3.1f ms \n", stopTimer()); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); conv1.Forward({data_tops[0]}, {conv1_top}); pool1.Forward({conv1_top}, {pool1_top}); relu1.Forward({pool1_top}, {relu1_top}); norm1.Forward({relu1_top}, {norm1_top}); conv2.Forward({relu1_top}, {conv2_top}); pool2.Forward({conv2_top}, {pool2_top}); relu2.Forward({pool2_top}, {relu2_top}); norm2.Forward({relu2_top}, {norm2_top}); conv3.Forward({relu2_top}, {conv3_top}); relu3.Forward({conv3_top}, {relu3_top}); conv4.Forward({relu3_top}, {conv4_top}); relu4.Forward({conv4_top}, {relu4_top}); conv5.Forward({relu4_top}, {conv5_top}); pool5.Forward({conv5_top}, {pool5_top}); relu5.Forward({pool5_top}, {relu5_top}); fc6.Forward({relu5_top}, {fc6_top}); relu6.Forward({fc6_top}, {relu6_top}); drop6.Forward({relu6_top}, {drop6_top}); fc7.Forward({drop6_top}, {fc7_top}); relu7.Forward({fc7_top}, {relu7_top}); drop7.Forward({relu7_top}, {drop7_top}); fc8.Forward({drop7_top}, {fc8_top}); softmax.Forward({fc8_top}, {sm_top}); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("finished forward: %3.1f ms \n", stopTimer()); } void test_alexnet_gpu() { printf("Start testing AlexNet with GPUs.\n"); cudaError_t cudaStatus = cudaSetDevice(0); checkCudaErrors(cudaStatus); show_mem(cudaStatus); startTimer(); Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 16; size_t batch_size = session->batch_size; Data<float> data_layer(batch_size, "tmp/test/img_med_list.txt"); size_t data_tops_dims0[4]; size_t data_tops_dims1[4]; data_layer.GetTopsDims({}, {data_tops_dims0, data_tops_dims1}); std::vector<Tensor<float>*> data_tops; data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims0)); data_tops.push_back(Tensor<float>::CreateTensorGPU(data_tops_dims1)); printf("data: (%d,%d,%d,%d)\n",data_tops_dims0[0],data_tops_dims0[1],data_tops_dims0[2],data_tops_dims0[3]); Conv2D<float> conv1(11,11,3,96,4, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv1_top_dims[4]; conv1.GetTopsDims({data_tops_dims0}, {conv1_top_dims}); Pooling<float> pool1(2,MAX,2); size_t pool1_top_dims[4]; pool1.GetTopsDims({conv1_top_dims}, {pool1_top_dims}); Relu<float> relu1; size_t relu1_top_dims[4]; relu1.GetTopsDims({pool1_top_dims}, {relu1_top_dims}); LRN<float> norm1; size_t norm1_top_dims[4]; norm1.GetTopsDims({relu1_top_dims}, {norm1_top_dims}); Conv2D<float> conv2(5,5,96,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv2_top_dims[4]; conv2.GetTopsDims({norm1_top_dims}, {conv2_top_dims}); Pooling<float> pool2(2, MAX, 2); size_t pool2_top_dims[4]; pool2.GetTopsDims({conv2_top_dims}, {pool2_top_dims}); Relu<float> relu2; size_t relu2_top_dims[4]; relu2.GetTopsDims({pool2_top_dims}, {relu2_top_dims}); LRN<float> norm2; size_t norm2_top_dims[4]; norm2.GetTopsDims({relu2_top_dims}, {norm2_top_dims}); Conv2D<float> conv3(3,3,256,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv3_top_dims[4]; conv3.GetTopsDims({norm2_top_dims}, {conv3_top_dims}); Relu<float> relu3; size_t relu3_top_dims[4]; relu2.GetTopsDims({conv3_top_dims}, {relu3_top_dims}); Conv2D<float> conv4(3,3,384,384,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv4_top_dims[4]; conv4.GetTopsDims({relu3_top_dims}, {conv4_top_dims}); Relu<float> relu4; size_t relu4_top_dims[4]; relu4.GetTopsDims({conv4_top_dims}, {relu4_top_dims}); Conv2D<float> conv5(3,3,384,256,1, new GaussianKernelInitializer<float>(0.1), SAME); size_t conv5_top_dims[4]; conv5.GetTopsDims({relu4_top_dims}, {conv5_top_dims}); Pooling<float> pool5(2, MAX, 2); size_t pool5_top_dims[4]; pool5.GetTopsDims({conv5_top_dims}, {pool5_top_dims}); Relu<float> relu5; size_t relu5_top_dims[4]; relu5.GetTopsDims({pool5_top_dims}, {relu5_top_dims}); FC<float> fc6(relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3],4096); size_t to_fc6_dims[4]; to_fc6_dims[0] = relu5_top_dims[0]; to_fc6_dims[1] = 1; to_fc6_dims[2] = 1; to_fc6_dims[3] = relu5_top_dims[1]*relu5_top_dims[2]*relu5_top_dims[3]; size_t fc6_top_dims[4]; fc6.GetTopsDims({to_fc6_dims}, {fc6_top_dims}); Relu<float> relu6; size_t relu6_top_dims[4]; relu6.GetTopsDims({fc6_top_dims}, {relu6_top_dims}); Dropout<float> drop6; size_t drop6_top_dims[4]; drop6.GetTopsDims({relu6_top_dims}, {drop6_top_dims}); FC<float> fc7(4096,4096); size_t fc7_top_dims[4]; fc7.GetTopsDims({drop6_top_dims}, {fc7_top_dims}); Relu<float> relu7; size_t relu7_top_dims[4]; relu7.GetTopsDims({fc7_top_dims}, {relu7_top_dims}); Dropout<float> drop7; size_t drop7_top_dims[4]; drop7.GetTopsDims({relu7_top_dims}, {drop7_top_dims}); FC<float> fc8(4096,1000); size_t fc8_top_dims[4]; fc8.GetTopsDims({drop7_top_dims}, {fc8_top_dims}); Softmax<float> softmax; size_t sm_top_dims[4]; softmax.GetTopsDims({fc8_top_dims}, {sm_top_dims}); CrossEntropyLoss<float> cel; size_t cel_top_dims[4]; cel.GetTopsDims({sm_top_dims, data_tops_dims1}, {cel_top_dims}); printf("network finished setup: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); Tensor<float> * conv1_top = Tensor<float>::CreateTensorGPU(conv1_top_dims); printf("conv1: (%d,%d,%d,%d)\n",conv1_top_dims[0],conv1_top_dims[1],conv1_top_dims[2],conv1_top_dims[3]); Tensor<float> * pool1_top = Tensor<float>::CreateTensorGPU(pool1_top_dims); printf("pool1: (%d,%d,%d,%d)\n",pool1_top_dims[0],pool1_top_dims[1],pool1_top_dims[2],pool1_top_dims[3]); Tensor<float> * relu1_top = Tensor<float>::CreateTensorGPU(relu1_top_dims); printf("relu1: (%d,%d,%d,%d)\n",relu1_top_dims[0],relu1_top_dims[1],relu1_top_dims[2],relu1_top_dims[3]); Tensor<float> * norm1_top = Tensor<float>::CreateTensorGPU(norm1_top_dims); printf("norm1: (%d,%d,%d,%d)\n",norm1_top_dims[0],norm1_top_dims[1],norm1_top_dims[2],norm1_top_dims[3]); Tensor<float> * conv2_top = Tensor<float>::CreateTensorGPU(conv2_top_dims); printf("conv2: (%d,%d,%d,%d)\n",conv2_top_dims[0],conv2_top_dims[1],conv2_top_dims[2],conv2_top_dims[3]); Tensor<float> * pool2_top = Tensor<float>::CreateTensorGPU(pool2_top_dims); printf("pool2: (%d,%d,%d,%d)\n",pool2_top_dims[0],pool2_top_dims[1],pool2_top_dims[2],pool2_top_dims[3]); Tensor<float> * relu2_top = Tensor<float>::CreateTensorGPU(relu2_top_dims); printf("relu2: (%d,%d,%d,%d)\n",relu2_top_dims[0],relu2_top_dims[1],relu2_top_dims[2],relu2_top_dims[3]); Tensor<float> * norm2_top = Tensor<float>::CreateTensorGPU(norm2_top_dims); printf("norm2: (%d,%d,%d,%d)\n",norm2_top_dims[0],norm2_top_dims[1],norm2_top_dims[2],norm2_top_dims[3]); Tensor<float> * conv3_top = Tensor<float>::CreateTensorGPU(conv3_top_dims); printf("conv3: (%d,%d,%d,%d)\n",conv3_top_dims[0],conv3_top_dims[1],conv3_top_dims[2],conv3_top_dims[3]); Tensor<float> * relu3_top = Tensor<float>::CreateTensorGPU(relu3_top_dims); printf("relu3: (%d,%d,%d,%d)\n",relu3_top_dims[0],relu3_top_dims[1],relu3_top_dims[2],relu3_top_dims[3]); Tensor<float> * conv4_top = Tensor<float>::CreateTensorGPU(conv4_top_dims); printf("conv4: (%d,%d,%d,%d)\n",conv4_top_dims[0],conv4_top_dims[1],conv4_top_dims[2],conv4_top_dims[3]); Tensor<float> * relu4_top = Tensor<float>::CreateTensorGPU(relu4_top_dims); printf("relu4: (%d,%d,%d,%d)\n",relu4_top_dims[0],relu4_top_dims[1],relu4_top_dims[2],relu4_top_dims[3]); Tensor<float> * conv5_top = Tensor<float>::CreateTensorGPU(conv5_top_dims); printf("conv5: (%d,%d,%d,%d)\n",conv5_top_dims[0],conv5_top_dims[1],conv5_top_dims[2],conv5_top_dims[3]); Tensor<float> * pool5_top = Tensor<float>::CreateTensorGPU(pool5_top_dims); printf("pool5: (%d,%d,%d,%d)\n",pool5_top_dims[0],pool5_top_dims[1],pool5_top_dims[2],pool5_top_dims[3]); Tensor<float> * relu5_top = Tensor<float>::CreateTensorGPU(relu5_top_dims); printf("relu5: (%d,%d,%d,%d)\n",relu5_top_dims[0],relu5_top_dims[1],relu5_top_dims[2],relu5_top_dims[3]); Tensor<float> * fc6_top = Tensor<float>::CreateTensorGPU(fc6_top_dims); Tensor<float> * relu6_top = Tensor<float>::CreateTensorGPU(relu6_top_dims); Tensor<float> * drop6_top = Tensor<float>::CreateTensorGPU(drop6_top_dims); Tensor<float> * fc7_top = Tensor<float>::CreateTensorGPU(fc7_top_dims); Tensor<float> * relu7_top = Tensor<float>::CreateTensorGPU(relu7_top_dims); Tensor<float> * drop7_top = Tensor<float>::CreateTensorGPU(drop7_top_dims); Tensor<float> * fc8_top = Tensor<float>::CreateTensorGPU(fc8_top_dims); Tensor<float> * sm_top = Tensor<float>::CreateTensorGPU(sm_top_dims); Tensor<float> * cel_top = Tensor<float>::CreateTensorGPU(cel_top_dims); startTimer(); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); printf("data forward: %3.1f ms \n", stopTimer()); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); printf("conv1 forward: %3.1f ms \n", stopTimer()); startTimer(); pool1.Forward({conv1_top}, {pool1_top}); printf("pool1 forward: %3.1f ms \n", stopTimer()); startTimer(); relu1.Forward({pool1_top}, {relu1_top}); printf("relu1 forward: %3.1f ms \n", stopTimer()); startTimer(); norm1.Forward({relu1_top}, {norm1_top}); printf("norm1 forward: %3.1f ms \n", stopTimer()); startTimer(); conv2.Forward({relu1_top}, {conv2_top}); printf("conv2 forward: %3.1f ms \n", stopTimer()); startTimer(); pool2.Forward({conv2_top}, {pool2_top}); printf("pool2 forward: %3.1f ms \n", stopTimer()); startTimer(); relu2.Forward({pool2_top}, {relu2_top}); printf("relu2 forward: %3.1f ms \n", stopTimer()); startTimer(); norm2.Forward({relu2_top}, {norm2_top}); printf("norm2 forward: %3.1f ms \n", stopTimer()); startTimer(); conv3.Forward({relu2_top}, {conv3_top}); printf("conv3 forward: %3.1f ms \n", stopTimer()); startTimer(); relu3.Forward({conv3_top}, {relu3_top}); printf("relu3 forward: %3.1f ms \n", stopTimer()); startTimer(); conv4.Forward({relu3_top}, {conv4_top}); printf("conv4 forward: %3.1f ms \n", stopTimer()); startTimer(); relu4.Forward({conv4_top}, {relu4_top}); printf("relu4 forward: %3.1f ms \n", stopTimer()); startTimer(); conv5.Forward({relu4_top}, {conv5_top}); printf("conv5 forward: %3.1f ms \n", stopTimer()); startTimer(); pool5.Forward({conv5_top}, {pool5_top}); printf("pool5 forward: %3.1f ms \n", stopTimer()); startTimer(); relu5.Forward({pool5_top}, {relu5_top}); printf("relu5 forward: %3.1f ms \n", stopTimer()); startTimer(); fc6.Forward({relu5_top}, {fc6_top}); printf("fc6 forward: %3.1f ms \n", stopTimer()); startTimer(); relu6.Forward({fc6_top}, {relu6_top}); printf("relu6 forward: %3.1f ms \n", stopTimer()); startTimer(); drop6.Forward({relu6_top}, {drop6_top}); printf("drop6 forward: %3.1f ms \n", stopTimer()); startTimer(); fc7.Forward({drop6_top}, {fc7_top}); printf("fc7 forward: %3.1f ms \n", stopTimer()); startTimer(); relu7.Forward({fc7_top}, {relu7_top}); printf("relu7 forward: %3.1f ms \n", stopTimer()); startTimer(); drop7.Forward({relu7_top}, {drop7_top}); printf("drop7 forward: %3.1f ms \n", stopTimer()); startTimer(); fc8.Forward({drop7_top}, {fc8_top}); printf("fc8 forward: %3.1f ms \n", stopTimer()); startTimer(); softmax.Forward({fc8_top}, {sm_top}); printf("softmax forward: %3.1f ms \n", stopTimer()); startTimer(); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("cel forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); data_layer.Forward(std::vector<Tensor<float>*> (), data_tops); startTimer(); conv1.Forward({data_tops[0]}, {conv1_top}); pool1.Forward({conv1_top}, {pool1_top}); relu1.Forward({pool1_top}, {relu1_top}); norm1.Forward({relu1_top}, {norm1_top}); conv2.Forward({relu1_top}, {conv2_top}); pool2.Forward({conv2_top}, {pool2_top}); relu2.Forward({pool2_top}, {relu2_top}); norm2.Forward({relu2_top}, {norm2_top}); conv3.Forward({relu2_top}, {conv3_top}); relu3.Forward({conv3_top}, {relu3_top}); conv4.Forward({relu3_top}, {conv4_top}); relu4.Forward({conv4_top}, {relu4_top}); conv5.Forward({relu4_top}, {conv5_top}); pool5.Forward({conv5_top}, {pool5_top}); relu5.Forward({pool5_top}, {relu5_top}); fc6.Forward({relu5_top}, {fc6_top}); relu6.Forward({fc6_top}, {relu6_top}); drop6.Forward({relu6_top}, {drop6_top}); fc7.Forward({drop6_top}, {fc7_top}); relu7.Forward({fc7_top}, {relu7_top}); drop7.Forward({relu7_top}, {drop7_top}); fc8.Forward({drop7_top}, {fc8_top}); softmax.Forward({fc8_top}, {sm_top}); cel.Forward({sm_top, data_tops[1]}, {cel_top}); printf("finished forward: %3.1f ms \n", stopTimer()); show_mem(cudaStatus); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); show_mem(cudaStatus); } int main() { test_alexnet_cpu(); // test_alexnet_gpu(); }
62059faa26343d50e01cfe076c048e223e494a38.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "device_launch_parameters.h" #include <rocblas.h> #include <hip/hip_runtime.h> #include <math.h> #include <stdio.h> #include <iostream> #include <fstream> #include <vector> #include <string> using namespace std; #define blocksize 8 /*storing matrix*/ void matrix_read(double *L, int dimension) { FILE *fp; int row, col; fp = fopen("randomMatrix_3.txt", "r");//open output file if (fp == NULL)//open failed return; for (row = 0; row < dimension; row++) { for (col = 0; col < dimension; col++) if (fscanf(fp, "%lf,", &L[row * dimension + col]) == EOF) break;//read data if (feof(fp)) break;//if the file is over } fclose(fp);//close file } __global__ void nodiag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == i && x != y) { I[x*n + y] /= A[i*n + i]; A[x*n + y] /= A[i*n + i]; } } __global__ void diag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == y && x == i) { I[x*n + y] /= A[i*n + i]; A[x*n + y] /= A[i*n + i]; } } __global__ void gaussjordan(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { I[x*n + y] -= I[i*n + y] * A[x*n + i]; if (y != i) { A[x*n + y] -= A[i*n + y] * A[x*n + i]; } } } } __global__ void set_zero(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { if (y == i) { A[x*n + y] = 0; } } } } void savetofile(double *A, string s, int n, int h) { std::ofstream plik; plik.open(s); for (int j = 0; j<h; j++) { for (int i = 0; i<h; i++) { cout << A[j*n + i] << "\t"; plik << A[j*n + i] << "\t"; } plik << endl; } plik.close(); } void print_arr(double *f, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { std::cout << *f++ << "\t"; } } } int something() { const int n = 20; // creating input double *iL = new double[n*n]; double *L = new double[n*n]; matrix_read(L, n); //savetofile(L, "L.txt", n, n); print_arr(L, n); cout << "inv\n"; double *d_A, *d_L, *I, *dI; float time; hipError_t err; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int ddsize = n * n * sizeof(double); dim3 threadsPerBlock(blocksize, blocksize); dim3 numBlocks((n + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize); // memory allocation err = hipMalloc((void**)&d_A, ddsize); if (err != hipSuccess) { cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } err = hipMalloc((void**)&dI, ddsize); if (err != hipSuccess) { cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } I = new double[n*n]; for (int i = 0; i<n; i++) { for (int j = 0; j<n; j++) { if (i == j) I[i*n + i] = 1.0; else I[i*n + j] = 0.0; } } //copy data from CPU to GPU err = hipMemcpy(d_A, L, ddsize, hipMemcpyHostToDevice); if (err != hipSuccess) { cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } err = hipMemcpy(dI, I, ddsize, hipMemcpyHostToDevice); if (err != hipSuccess) { cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } //timer start hipEventRecord(start, 0); // L^(-1) for (int i = 0; i<n; i++) { nodiag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); diag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); gaussjordan << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); set_zero << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); hipEventDestroy(start); hipEventDestroy(stop); //copy data from GPU to CPU err = hipMemcpy(iL, dI, ddsize, hipMemcpyDeviceToHost); if (err != hipSuccess) { cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } err = hipMemcpy(I, d_A, ddsize, hipMemcpyDeviceToHost); if (err != hipSuccess) { cout << hipGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } cout << "Cuda Time - inverse: " << time << "ms\n"; savetofile(iL, "inv.txt", n, n); //savetofile(I, "I.txt", n, n); hipFree(d_A); hipFree(dI); double *c = new double[n*n]; for (int i = 0; i<n; i++) for (int j = 0; j<n; j++) { c[i*n + j] = 0; //put the initial value to zero for (int x = 0; x<n; x++) c[i*n + j] = c[i*n + j] + L[i*n + x] * iL[x*n + j]; //matrix multiplication } savetofile(c, "c.txt", n, n); delete[]I; delete[]L; delete[]iL; system("Pause"); return 0; }
62059faa26343d50e01cfe076c048e223e494a38.cu
#include <cuda_runtime.h> #include <cuda_runtime_api.h> #include "device_launch_parameters.h" #include <cublas_v2.h> #include <cuda.h> #include <math.h> #include <stdio.h> #include <iostream> #include <fstream> #include <vector> #include <string> using namespace std; #define blocksize 8 /*storing matrix*/ void matrix_read(double *L, int dimension) { FILE *fp; int row, col; fp = fopen("randomMatrix_3.txt", "r");//open output file if (fp == NULL)//open failed return; for (row = 0; row < dimension; row++) { for (col = 0; col < dimension; col++) if (fscanf(fp, "%lf,", &L[row * dimension + col]) == EOF) break;//read data if (feof(fp)) break;//if the file is over } fclose(fp);//close file } __global__ void nodiag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == i && x != y) { I[x*n + y] /= A[i*n + i]; A[x*n + y] /= A[i*n + i]; } } __global__ void diag_normalize(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) if (x == y && x == i) { I[x*n + y] /= A[i*n + i]; A[x*n + y] /= A[i*n + i]; } } __global__ void gaussjordan(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { I[x*n + y] -= I[i*n + y] * A[x*n + i]; if (y != i) { A[x*n + y] -= A[i*n + y] * A[x*n + i]; } } } } __global__ void set_zero(double *A, double *I, int n, int i) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < n && y < n) { if (x != i) { if (y == i) { A[x*n + y] = 0; } } } } void savetofile(double *A, string s, int n, int h) { std::ofstream plik; plik.open(s); for (int j = 0; j<h; j++) { for (int i = 0; i<h; i++) { cout << A[j*n + i] << "\t"; plik << A[j*n + i] << "\t"; } plik << endl; } plik.close(); } void print_arr(double *f, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { std::cout << *f++ << "\t"; } } } int something() { const int n = 20; // creating input double *iL = new double[n*n]; double *L = new double[n*n]; matrix_read(L, n); //savetofile(L, "L.txt", n, n); print_arr(L, n); cout << "inv\n"; double *d_A, *d_L, *I, *dI; float time; cudaError_t err; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int ddsize = n * n * sizeof(double); dim3 threadsPerBlock(blocksize, blocksize); dim3 numBlocks((n + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize); // memory allocation err = cudaMalloc((void**)&d_A, ddsize); if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } err = cudaMalloc((void**)&dI, ddsize); if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } I = new double[n*n]; for (int i = 0; i<n; i++) { for (int j = 0; j<n; j++) { if (i == j) I[i*n + i] = 1.0; else I[i*n + j] = 0.0; } } //copy data from CPU to GPU err = cudaMemcpy(d_A, L, ddsize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } err = cudaMemcpy(dI, I, ddsize, cudaMemcpyHostToDevice); if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } //timer start cudaEventRecord(start, 0); // L^(-1) for (int i = 0; i<n; i++) { nodiag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); diag_normalize << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); gaussjordan << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); set_zero << <numBlocks, threadsPerBlock >> >(d_A, dI, n, i); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //copy data from GPU to CPU err = cudaMemcpy(iL, dI, ddsize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } err = cudaMemcpy(I, d_A, ddsize, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in " << __FILE__ << " at line " << __LINE__ << endl; } cout << "Cuda Time - inverse: " << time << "ms\n"; savetofile(iL, "inv.txt", n, n); //savetofile(I, "I.txt", n, n); cudaFree(d_A); cudaFree(dI); double *c = new double[n*n]; for (int i = 0; i<n; i++) for (int j = 0; j<n; j++) { c[i*n + j] = 0; //put the initial value to zero for (int x = 0; x<n; x++) c[i*n + j] = c[i*n + j] + L[i*n + x] * iL[x*n + j]; //matrix multiplication } savetofile(c, "c.txt", n, n); delete[]I; delete[]L; delete[]iL; system("Pause"); return 0; }
b43f1094cfebe7ee4ba3a84ad27adc34ec6f58fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*cuda1024*1024*/ #include<iostream> #include<stdlib.h> #include<sys/time.h> #include<math.h> #include"cuda_runtime.h" #define cols 1024 #define rows 1024 using namespace std; __global__ void Add(float** Ad,float** Bd,float** Cd) { int x = blockDim.x*blockIdx.x+threadIdx.x; int y = blockDim.y*blockIdx.y+threadIdx.y; if(x<cols && y<rows) { Cd[y][x]=Ad[y][x]+Bd[y][x]; } } int main() { struct timeval start, end; float **A,**B,**C,**Ad,**Bd,**Cd; float *a,*b,*c,*ad,*bd,*cd; int n=rows * cols; A=new float* [cols]; B=new float* [cols]; C=new float* [cols]; a=new float [n]; b=new float [n]; c=new float [n]; hipMalloc((void**)&Ad,sizeof(float*)*cols); hipMalloc((void**)&Bd,sizeof(float*)*cols); hipMalloc((void**)&Cd,sizeof(float*)*cols); hipMalloc((void**)&ad,sizeof(float)*n); hipMalloc((void**)&bd,sizeof(float)*n); hipMalloc((void**)&cd,sizeof(float)*n); for(int i=0;i<n;i++) { a[i]=2.0; b[i]=2.0; } for(int i=0;i<cols;i++) { //ad, bd, cdgpuAd, Bd, Cdcpu A[i]=ad+i*rows; B[i]=bd+i*rows; C[i]=cd+i*rows; } gettimeofday( &start, NULL); hipMemcpy(Ad,A,cols*sizeof(float*),hipMemcpyHostToDevice); hipMemcpy(Bd,B,cols*sizeof(float*),hipMemcpyHostToDevice); hipMemcpy(Cd,C,cols*sizeof(float*),hipMemcpyHostToDevice); hipMemcpy(ad,a,n*sizeof(float),hipMemcpyHostToDevice); hipMemcpy(bd,b,n*sizeof(float),hipMemcpyHostToDevice); dim3 dimBlock(16,16); dim3 dimGrid(cols/16+1,rows/16+1); hipLaunchKernelGGL(( Add), dim3(dimGrid),dim3(dimBlock), 0, 0, Ad,Bd,Cd); hipMemcpy(c,cd,n*sizeof(float),hipMemcpyDeviceToHost); gettimeofday( &end, NULL ); float target=4.0; float error=0.0; for(int i=0;i<n;i++) { error+=abs(target-c[i]); } cout<<"error is "<<error<<endl; int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; cout << "total time is " << timeuse/1000 << "ms" <<endl; delete [] a; delete [] b; delete [] c; delete [] A; delete [] B; delete [] C; hipFree(Ad); hipFree(Bd); hipFree(Cd); hipFree(ad); hipFree(bd); hipFree(cd); return 0; }
b43f1094cfebe7ee4ba3a84ad27adc34ec6f58fd.cu
/*利用cuda完成两个1024*1024矩阵的加法*/ #include<iostream> #include<stdlib.h> #include<sys/time.h> #include<math.h> #include"cuda_runtime.h" #define cols 1024 #define rows 1024 using namespace std; __global__ void Add(float** Ad,float** Bd,float** Cd) { int x = blockDim.x*blockIdx.x+threadIdx.x; int y = blockDim.y*blockIdx.y+threadIdx.y; if(x<cols && y<rows) { Cd[y][x]=Ad[y][x]+Bd[y][x]; } } int main() { struct timeval start, end; float **A,**B,**C,**Ad,**Bd,**Cd; float *a,*b,*c,*ad,*bd,*cd; int n=rows * cols; A=new float* [cols]; B=new float* [cols]; C=new float* [cols]; a=new float [n]; b=new float [n]; c=new float [n]; cudaMalloc((void**)&Ad,sizeof(float*)*cols); cudaMalloc((void**)&Bd,sizeof(float*)*cols); cudaMalloc((void**)&Cd,sizeof(float*)*cols); cudaMalloc((void**)&ad,sizeof(float)*n); cudaMalloc((void**)&bd,sizeof(float)*n); cudaMalloc((void**)&cd,sizeof(float)*n); for(int i=0;i<n;i++) { a[i]=2.0; b[i]=2.0; } for(int i=0;i<cols;i++) { //ad, bd, cd是一维向量,如果在gpu上按照二维矩阵进行运算,则需要将其和Ad, Bd, Cd建立对应关系,建立对应关系的过程在cpu上完成 A[i]=ad+i*rows; B[i]=bd+i*rows; C[i]=cd+i*rows; } gettimeofday( &start, NULL); cudaMemcpy(Ad,A,cols*sizeof(float*),cudaMemcpyHostToDevice); cudaMemcpy(Bd,B,cols*sizeof(float*),cudaMemcpyHostToDevice); cudaMemcpy(Cd,C,cols*sizeof(float*),cudaMemcpyHostToDevice); cudaMemcpy(ad,a,n*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(bd,b,n*sizeof(float),cudaMemcpyHostToDevice); dim3 dimBlock(16,16); dim3 dimGrid(cols/16+1,rows/16+1); Add<<<dimGrid,dimBlock>>>(Ad,Bd,Cd); cudaMemcpy(c,cd,n*sizeof(float),cudaMemcpyDeviceToHost); gettimeofday( &end, NULL ); float target=4.0; float error=0.0; for(int i=0;i<n;i++) { error+=abs(target-c[i]); } cout<<"error is "<<error<<endl; int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; cout << "total time is " << timeuse/1000 << "ms" <<endl; delete [] a; delete [] b; delete [] c; delete [] A; delete [] B; delete [] C; cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); cudaFree(ad); cudaFree(bd); cudaFree(cd); return 0; }
c0e68fec53ed9ba9955954fb2d233cfcfc7c1004.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "rotate_gpu_nms.hpp" #include <vector> #include <iostream> #include <cmath> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float axr(float x, float r) { return 0.5*3.1415926535*r*r-x*sqrt(r*r-x*x) - r*r*std::asin(x/r); } __device__ inline float devRotateIoU(float const * const a, float const * const b) { /*float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);*/ float ah = a[2]; float aw = a[3]; float bh = b[2]; float bw = b[3]; float ax = a[0]; float ay = a[1]; float bx = b[0]; float by = b[1]; float ra = sqrt(ah * ah + aw * aw) / 2; float rb = sqrt(bh * bh + bw * bw) / 2; float Sa = 3.1415926535 * ra * ra; float Sb = 3.1415926535 * rb * rb; float d = sqrt((ax - bx) * (ax - bx) + (ay - by) * (ay - by)); float r1 = min(ra, rb); float r2 = max(ra, rb); float x1, x2, s, interS; if(d > 0.0) { x1 = (d*d+r1*r1-r2*r2)/(2*d); x2 = (d*d+r2*r2-r1*r1)/(2*d); s = (r2*r2-r1*r1-d*d)/(2*d); } //else: Avoid Warning //x1 = 0 //x2 = 0 //s = 0 if(d<=r2-r1) interS = 3.1415926535*r1*r1; else if (d>=r2+r1 || r1 < 1e-5 || r2 < 1e-5) interS = 0.0; else if (d*d<r2*r2-r1*r1) interS = 3.1415926535*r1*r1-axr(s,r1)+axr(s+d,r2); else interS = axr(x1,r1)+axr(x2,r2); return interS / (Sa + Sb - interS); } __global__ void rotate_nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devRotateIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _rotate_nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( rotate_nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); }
c0e68fec53ed9ba9955954fb2d233cfcfc7c1004.cu
#include "rotate_gpu_nms.hpp" #include <vector> #include <iostream> #include <cmath> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float axr(float x, float r) { return 0.5*3.1415926535*r*r-x*sqrt(r*r-x*x) - r*r*std::asin(x/r); } __device__ inline float devRotateIoU(float const * const a, float const * const b) { /*float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);*/ float ah = a[2]; float aw = a[3]; float bh = b[2]; float bw = b[3]; float ax = a[0]; float ay = a[1]; float bx = b[0]; float by = b[1]; float ra = sqrt(ah * ah + aw * aw) / 2; float rb = sqrt(bh * bh + bw * bw) / 2; float Sa = 3.1415926535 * ra * ra; float Sb = 3.1415926535 * rb * rb; float d = sqrt((ax - bx) * (ax - bx) + (ay - by) * (ay - by)); float r1 = min(ra, rb); float r2 = max(ra, rb); float x1, x2, s, interS; if(d > 0.0) { x1 = (d*d+r1*r1-r2*r2)/(2*d); x2 = (d*d+r2*r2-r1*r1)/(2*d); s = (r2*r2-r1*r1-d*d)/(2*d); } //else: Avoid Warning //x1 = 0 //x2 = 0 //s = 0 if(d<=r2-r1) interS = 3.1415926535*r1*r1; else if (d>=r2+r1 || r1 < 1e-5 || r2 < 1e-5) interS = 0.0; else if (d*d<r2*r2-r1*r1) interS = 3.1415926535*r1*r1-axr(s,r1)+axr(s+d,r2); else interS = axr(x1,r1)+axr(x2,r2); return interS / (Sa + Sb - interS); } __global__ void rotate_nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 6]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 6 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 0]; block_boxes[threadIdx.x * 6 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 1]; block_boxes[threadIdx.x * 6 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 2]; block_boxes[threadIdx.x * 6 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 3]; block_boxes[threadIdx.x * 6 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 4]; block_boxes[threadIdx.x * 6 + 5] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 6 + 5]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 6; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devRotateIoU(cur_box, block_boxes + i * 6) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _rotate_nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); rotate_nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); }
e28f814b3375316ac0a18f8a726dae58c176638d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/hash_utils.cuh> #include <text/subword/detail/tokenizer_utils.cuh> #include <text/subword/detail/wordpiece_tokenizer.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/utilities/error.hpp> #include <nvtext/subword_tokenize.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/distance.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/remove.h> #include <thrust/transform_scan.h> namespace nvtext { namespace detail { namespace { /** * @brief Initializes the token-ids, word-indices, and token counts vectors. * * Each thread process a single code point from `code_points`. * This also locates the start and end of each word within the `code_points` buffer. * A word start is identified as a non-space character that appears right after a space. * A word end is identified as a space character that appears right after a non-space one. * If the code point at this thread does not represent a word start or word end, * a max uint32_t value is written to the appropriate vector instead. * A post processing step is required to filter the relevant values in these * vectors. * * It is guaranteed that the same number of valid values will be written to both the * start and end indices and that after the select step, the two arrays will be aligned. * That is, `start_word_indices[word]` and `end_word_indices[word]` are the start and * end for the same word. * * Memory required is 13 bytes per code point values: * - 4 bytes each for `start_word_indices` and `end_word_indices` * - 4 bytes for each `token_ids` * - 1 byte for each each `tokens_per_word` * Also, there is a code point value for each byte in the input strings. * * @param[in] code_points A pointer to the code points in the strings after normalization. * @param[out] start_word_indices An array of size `num_code_points` which will contain the * starting index for each word. * @param[out] end_word_indices An array of size `num_code_points` which will contain the * ending index for each word. * @param num_code_points The total number of code_points. * @param[out] token_ids An array of size `num_code_points` which will hold the token ids. * This kernel just sets all the values to max uint32_t. * @param[out] tokens_per_word An array of size `num_code_points` which hold the number of * tokens. This kernel just sets all the values to 0. */ __global__ void init_data_and_mark_word_start_and_ends(uint32_t const* code_points, uint32_t* start_word_indices, uint32_t* end_word_indices, size_t num_code_points, uint32_t* token_ids, uint8_t* tokens_per_word) { uint32_t char_for_thread = blockDim.x * blockIdx.x + threadIdx.x; // Deal with the start_word_indices array if (char_for_thread < num_code_points) { uint32_t val_to_write = std::numeric_limits<uint32_t>::max(); if ((code_points[char_for_thread] != SPACE_CODE_POINT) && (char_for_thread > 0) && (code_points[char_for_thread - 1] == SPACE_CODE_POINT)) { val_to_write = char_for_thread; } start_word_indices[char_for_thread] = val_to_write; // Deal with the end_word_indices_array val_to_write = std::numeric_limits<uint32_t>::max(); if ((code_points[char_for_thread] != SPACE_CODE_POINT) && (char_for_thread + 1 < num_code_points) && (code_points[char_for_thread + 1] == SPACE_CODE_POINT)) { val_to_write = char_for_thread + 1; } end_word_indices[char_for_thread] = val_to_write; token_ids[char_for_thread] = std::numeric_limits<uint32_t>::max(); tokens_per_word[char_for_thread] = 0; } } /** * @brief Resolves the string boundaries for the start and end words. * * This kernel should be called after `init_data_and_mark_word_start_and_ends` with at * least `num_strings` total threads. * * The start and end indices are updated to honor the string boundaries * within the strings array. This corrects any word ranges that span across * individual strings. * * @param code_points A pointer to the code points in the strings. * @param strings_offsets An array containing the index of the starting character of each string * with an extra space at the end containing the total number of characters. As a result, * this array is of length num_strings + 1. * @param start_word_indices An array which will contain the starting index for each word scattered * throughout. If an index does not represent a word start, the max-uint32_t value is written * to indicate this. * @param end_word_indices An array which will contain the one past the end index for each word * scattered throughout. If an index does not represent a word end, the max uint32_t value is * written to indicate this. * @param num_strings The total number of strings to be processed. */ __global__ void mark_string_start_and_ends(uint32_t const* code_points, uint32_t const* strings_offsets, uint32_t* start_word_indices, uint32_t* end_word_indices, uint32_t num_strings) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; // Ensure the starting character of each strings is written to the word start array. if (idx <= num_strings) { auto const offset = strings_offsets[idx]; if ((idx < num_strings) && (code_points[offset] != SPACE_CODE_POINT)) { start_word_indices[offset] = offset; } if ((offset > 0) && (code_points[offset - 1] != SPACE_CODE_POINT)) { end_word_indices[offset - 1] = offset; } } } /** * @brief Currently supported special tokens. * * Code logic expects these to be 3 upper-case characters along * with a single trailing space. */ __constant__ char special_tokens[35]{"BOS EOS UNK SEP PAD CLS MASK "}; constexpr cudf::size_type MIN_ST_WIDTH = 4; // Min token size in special_tokens constexpr cudf::size_type MAX_ST_WIDTH = 5; // Max token size in special_tokens struct mark_special_tokens { /** * @brief Check given code-point array to the list of known * special tokens. */ __device__ bool is_special_token(uint32_t const* token, cudf::size_type size) const { if (size < MIN_ST_WIDTH || size > MAX_ST_WIDTH) return false; char str_token[MAX_ST_WIDTH]; // convert code-points to chars thrust::transform(thrust::seq, token, token + size, str_token, [](uint32_t cp) { // also upper-case them to match again special_tokens array return static_cast<char>(cp >= 'a' ? cp - 'a' + 'A' : cp); }); // search the special tokens array for the str_token cudf::string_view tokens(special_tokens, sizeof(special_tokens)); return tokens.find(str_token, size) >= 0; } /** * @brief Check code-points for special tokens and adjust indices. * * Tokens will appear in the `code_points` array as: * `_[_ttt_]_` where `_` are single space characters and * ttt is the variable-length token name * * The logic below uses the following variables to represent position * values in the `code_points` array after locating a special token: * ``` * _ [ _ t t t _ ] _ * ^ ^ ^ ^ * si sp ep ei * ``` * where `si` is `start_index` * `sp` is `start_pos` * `ep` is `end_pos` * `ei` is `end_index` * * When a special token is found, the `code_points` are adjusted * to remove the spaces and capitalize the name. * ``` * _ [ _ t t t _ ] _ is updated to * _ [ T T T ] _ ] _ * ``` * This is required for the downstream word-piece tokenizer to * match it to the vocabulary hash table. * * The `start_word_indices` and `end_word_indices` are updated to * identify the token and to ignore the extra trailing `]` character. */ __device__ void operator()(size_t idx) const { uint32_t const start_index = start_word_indices[idx]; if ((start_index == std::numeric_limits<uint32_t>::max()) || ((start_index + MIN_ST_WIDTH + 2) > num_code_points)) return; if (code_points[start_index] != '[') return; // check for matching end bracket uint32_t const start_pos = start_index + 2; // after the space delimiter // search for next start-word and then check it is a ']' uint32_t const end_index = [&] { auto const begin = start_word_indices + start_pos; auto const width = ::min(static_cast<size_t>(MAX_ST_WIDTH + 1), (num_code_points - start_pos)); auto const end = begin + width; // checking the next start-word is more reliable than arbitrarily searching for ']' // in case the text is split across string rows auto const iter = thrust::find_if(thrust::seq, begin + 1, end, [](auto swi) { return swi != std::numeric_limits<uint32_t>::max(); }); return iter == end ? start_index : static_cast<uint32_t>(iter - start_word_indices); }(); if (code_points[end_index] != ']') return; // check for special token auto const size = static_cast<cudf::size_type>(end_index - start_pos); if (!is_special_token(code_points + start_pos, size)) return; // special token found // adjust code-points auto const end_pos = end_index - 2; // change _[_ttt_]_ to _[TTT]_ for (auto left_idx = start_pos - 1; left_idx <= end_pos; ++left_idx) { auto const cp = code_points[left_idx + 1]; code_points[left_idx] = cp >= 'a' ? cp - 'a' + 'A' : cp; } code_points[end_pos] = ']'; // erase the intermediate indices thrust::fill(thrust::seq, start_word_indices + start_index + 1, // keep the first one start_word_indices + end_index + 1, std::numeric_limits<uint32_t>::max()); thrust::fill(thrust::seq, end_word_indices + start_index, end_word_indices + end_index + 1, std::numeric_limits<uint32_t>::max()); // reset the new end-word index end_word_indices[end_pos] = end_pos + 1; } uint32_t* const code_points; uint32_t* const start_word_indices; uint32_t* const end_word_indices; size_t const num_code_points; }; /** * @brief Converts words into token ids. * * Each thread is assigned a word to convert based on the `hash_table`. Each thread converts * its word and writes the number of tokens it found in the `tokens_per_word` array. * * The `tokens_per_word` array is kept to the length `num_code_points + 1`. This means each thread * can write its number of tokens to the `tokens_per_word` corresponding to the starting * character of each word. Since strings must start at some word, we can prefix sum this array * and use the strings_lengths code point offsets to directly index the number of tokens in each * string. * * The `token_ids` array should be initialized to the max uint32_t before calling this kernel. * * @param code_points An array containing all of the code points to be processed * @param hash_table An array containing the flattened hash table with key, value pairs * packed in 64-bits * @param bin_coefficients A pointer to the GPU pointer containing the hashing parameters for * each hash bin on the GPU. * @param bin_offsets: A pointer to the GPU pointer containing the start index of each bin in * the flattened hash table. * @param token_ids The index for each token found during tokenization. This is of length * num_code_points. In most cases, multiple characters will collapse to one token. In these * cases, the max uint32_t will be in place. Cub will be used later to filter out these * invalid ids later. * @param word_starts An array of length `num_code_points`. The first total word elements contains * the index of the first character for each word. * @param word_ends An array of length num_code_points. The first total_words elements contains the * past the end index for each word. This array is kept aligned with the initial * token_ids array containing the word start code points. * `word_ends[word] - filtered_start_indices[word] = word_length` * @param tokens_per_word An array of size num_code_points that will contain the number of tokens in * each word in a string. This array can be exclusive summed and the result used in * conjunction with the strings lengths array to find the tokens in each string. This is * possible since the number of tokens in each word will be placed at the index corresponding * to the start character of a word. If we assume prefix_summed is the prefix sum of the * tokens_per_word array, then `prefix_summed[strings_lengths[string_idx] - 1]` is the number * of tokens found before the start of string. * @param unk_token_id The token id to be place for unknown tokens * @param max_word_length The maximum length of a word. Any word longer than this length is * replaced by the unknown token. * @param total_words The total number of white space separated words * @param outer_hash_a_param The a parameter for the outer hash * @param outer_hash_b_param: The b parameter for the outer hash * @param num_outer_bins: The number of bins for the outer hash */ __global__ void kernel_wordpiece_tokenizer(uint32_t const* code_points, uint64_t const* hash_table, uint64_t const* bin_coefficients, uint16_t const* bin_offsets, uint16_t unk_token_id, uint32_t outer_hash_a_param, uint32_t outer_hash_b_param, uint16_t num_outer_bins, uint32_t const* word_starts, uint32_t const* word_ends, uint32_t max_word_length, uint32_t total_words, uint32_t* token_ids, uint8_t* tokens_per_word) { uint32_t const word_to_tokenize = blockDim.x * blockIdx.x + threadIdx.x; if (word_to_tokenize >= total_words) return; // Each thread gets the start code_point offset for each word and resets the token_id memory to // the default value. In a post processing step, all of these values will be removed. auto const token_start = word_starts[word_to_tokenize]; auto const token_end = word_ends[word_to_tokenize]; auto const word_length = token_end - token_start; // The sdbm hash of "##" constexpr uint32_t hashtag_hash = 2296000; uint16_t num_values_tokenized = 0; // initialize start, end uint32_t start = token_start; uint32_t end = token_end; if (word_length > max_word_length) { start = token_end; num_values_tokenized = 1; token_ids[token_start] = unk_token_id; tokens_per_word[token_start] = num_values_tokenized; } while (start < token_end) { end = token_end; // init token_id to no token int token_id = -1; // compute current length uint32_t const length = token_end - start; uint64_t substr_hash = sdbm_hash(code_points + start, length, start == token_start ? 0 : hashtag_hash); while (start < end) { token_id = retrieve(substr_hash, outer_hash_a_param, outer_hash_b_param, num_outer_bins, hash_table, bin_coefficients, bin_offsets); if (token_id != -1) { break; } --end; // Pop off the last value from the substr hash substr_hash = prev_sdbm_hash(substr_hash, code_points[end]); } if (token_id == -1) { end = token_end; token_id = unk_token_id; // We need to clean up the global array. This case is very uncommon. // Only 0.016% of words cannot be resolved to a token from the squad dev set. for (uint32_t i = 1; i < num_values_tokenized; ++i) { token_ids[token_start + i] = std::numeric_limits<uint32_t>::max(); } num_values_tokenized = 0; } token_ids[token_start + num_values_tokenized] = token_id; ++num_values_tokenized; start = end; } tokens_per_word[token_start] = num_values_tokenized; } } // namespace wordpiece_tokenizer::wordpiece_tokenizer(hashed_vocabulary const& vocab_table, uint32_t max_rows_final_tensor, uint32_t max_sequence_length, uint32_t stride, bool do_truncate, bool do_lower_case, uint32_t max_word_length) : vocab_table(vocab_table), normalizer(vocab_table.cp_metadata->view().data<codepoint_metadata_type>(), vocab_table.aux_cp_table->view().data<aux_codepoint_data_type>(), do_lower_case), max_sequence_length{max_sequence_length}, stride(stride), do_truncate(do_truncate), max_word_length{max_word_length} { } uvector_pair wordpiece_tokenizer::tokenize(char const* d_strings, uint32_t const* d_offsets, uint32_t num_strings, rmm::cuda_stream_view stream) { auto cps_and_offsets = normalizer.normalize(d_strings, d_offsets, num_strings, stream); tokenize(cps_and_offsets, stream); return uvector_pair(std::move(cps_and_offsets.first), std::move(cps_and_offsets.second)); } struct copy_if_fn { // inline lambda not allowed in private or protected member function __device__ bool operator()(uint32_t cp) { return cp != std::numeric_limits<uint32_t>::max(); } }; struct tranform_fn { // just converting uint8 value to uint32 __device__ uint32_t operator()(uint8_t count) { return count; } }; void wordpiece_tokenizer::tokenize(uvector_pair& cps_and_offsets, rmm::cuda_stream_view stream) { uint32_t* device_code_points = cps_and_offsets.first->data(); size_t const num_code_points = cps_and_offsets.first->size(); uint32_t* device_strings_offsets = cps_and_offsets.second->data(); uint32_t const num_strings = cps_and_offsets.second->size() - 1; const size_t four_byte_cp_chunks = 1 + (num_code_points - 1) / sizeof(uint32_t); const size_t rounded_num_cps = sizeof(uint32_t) * four_byte_cp_chunks; rmm::device_uvector<uint8_t> device_tokens_per_word(rounded_num_cps, stream); rmm::device_uvector<uint32_t> device_token_ids(num_code_points, stream); rmm::device_uvector<uint32_t> device_word_indices(2 * num_code_points, stream); // make device_start_word_indices and device_end_word_indices contiguous uint32_t* device_start_word_indices = device_word_indices.data(); uint32_t* device_end_word_indices = device_start_word_indices + num_code_points; cudf::detail::grid_1d const grid_init{static_cast<cudf::size_type>(num_code_points), THREADS_PER_BLOCK}; hipLaunchKernelGGL(( detail::init_data_and_mark_word_start_and_ends), dim3(grid_init.num_blocks), dim3(grid_init.num_threads_per_block), 0, stream.value(), device_code_points, device_start_word_indices, device_end_word_indices, num_code_points, device_token_ids.data(), device_tokens_per_word.data()); CUDF_CHECK_CUDA(stream.value()); cudf::detail::grid_1d const grid_mark{static_cast<cudf::size_type>(num_strings + 1), THREADS_PER_BLOCK}; hipLaunchKernelGGL(( detail::mark_string_start_and_ends), dim3(grid_mark.num_blocks), dim3(grid_mark.num_threads_per_block), 0, stream.value(), device_code_points, device_strings_offsets, device_start_word_indices, device_end_word_indices, num_strings); CUDF_CHECK_CUDA(stream.value()); // check for special tokens and adjust indices thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<size_t>(0), num_code_points, mark_special_tokens{ device_code_points, device_start_word_indices, device_end_word_indices, num_code_points}); // Now start_word_indices has the word starts scattered throughout the array. We need to select // all values not equal to the max uint32_t and place them at the start of the array. We leverage // the fact that the start_word_indices and the end_word indices are contiguous to only launch one // device select kernel. auto itr_end = thrust::remove(rmm::exec_policy(stream), device_word_indices.begin(), device_word_indices.end(), std::numeric_limits<uint32_t>::max()); // The number of tokens selected will be double the number of words since we // select from both the start and end index arrays. uint32_t const num_words = thrust::distance(device_word_indices.begin(), itr_end) / 2; // We need to change the end_word_indices pointer after the selection is complete device_end_word_indices = device_start_word_indices + num_words; cudf::detail::grid_1d const grid{static_cast<cudf::size_type>(num_words), THREADS_PER_BLOCK}; detail:: hipLaunchKernelGGL(( kernel_wordpiece_tokenizer), dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(), device_code_points, vocab_table.table->view().data<uint64_t>(), vocab_table.bin_coefficients->view().data<uint64_t>(), vocab_table.bin_offsets->view().data<uint16_t>(), vocab_table.unknown_token_id, vocab_table.outer_hash_a, vocab_table.outer_hash_b, vocab_table.num_bins, device_start_word_indices, device_end_word_indices, max_word_length, num_words, device_token_ids.data(), device_tokens_per_word.data()); CUDF_CHECK_CUDA(stream.value()); // Repurpose the input array for the token ids. In the worst case, each code point ends up being a // token so this will always have enough memory to store the contiguous tokens. uint32_t* contiguous_token_ids = device_code_points; thrust::copy_if(rmm::exec_policy(stream), device_token_ids.begin(), device_token_ids.end(), contiguous_token_ids, copy_if_fn{}); // Repurpose start word indices since it is the same size and type as the required output. uint32_t* token_id_counts = device_start_word_indices; thrust::transform_inclusive_scan(rmm::exec_policy(stream), device_tokens_per_word.data(), device_tokens_per_word.data() + num_code_points, token_id_counts, tranform_fn{}, thrust::plus<uint32_t>()); // Update the device_strings_offsets using the token_id_counts thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(1), num_strings, update_strings_lengths_fn{token_id_counts, device_strings_offsets}); } } // namespace detail } // namespace nvtext
e28f814b3375316ac0a18f8a726dae58c176638d.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <text/subword/detail/hash_utils.cuh> #include <text/subword/detail/tokenizer_utils.cuh> #include <text/subword/detail/wordpiece_tokenizer.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/strings/string_view.cuh> #include <cudf/utilities/error.hpp> #include <nvtext/subword_tokenize.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <thrust/copy.h> #include <thrust/distance.h> #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/remove.h> #include <thrust/transform_scan.h> namespace nvtext { namespace detail { namespace { /** * @brief Initializes the token-ids, word-indices, and token counts vectors. * * Each thread process a single code point from `code_points`. * This also locates the start and end of each word within the `code_points` buffer. * A word start is identified as a non-space character that appears right after a space. * A word end is identified as a space character that appears right after a non-space one. * If the code point at this thread does not represent a word start or word end, * a max uint32_t value is written to the appropriate vector instead. * A post processing step is required to filter the relevant values in these * vectors. * * It is guaranteed that the same number of valid values will be written to both the * start and end indices and that after the select step, the two arrays will be aligned. * That is, `start_word_indices[word]` and `end_word_indices[word]` are the start and * end for the same word. * * Memory required is 13 bytes per code point values: * - 4 bytes each for `start_word_indices` and `end_word_indices` * - 4 bytes for each `token_ids` * - 1 byte for each each `tokens_per_word` * Also, there is a code point value for each byte in the input strings. * * @param[in] code_points A pointer to the code points in the strings after normalization. * @param[out] start_word_indices An array of size `num_code_points` which will contain the * starting index for each word. * @param[out] end_word_indices An array of size `num_code_points` which will contain the * ending index for each word. * @param num_code_points The total number of code_points. * @param[out] token_ids An array of size `num_code_points` which will hold the token ids. * This kernel just sets all the values to max uint32_t. * @param[out] tokens_per_word An array of size `num_code_points` which hold the number of * tokens. This kernel just sets all the values to 0. */ __global__ void init_data_and_mark_word_start_and_ends(uint32_t const* code_points, uint32_t* start_word_indices, uint32_t* end_word_indices, size_t num_code_points, uint32_t* token_ids, uint8_t* tokens_per_word) { uint32_t char_for_thread = blockDim.x * blockIdx.x + threadIdx.x; // Deal with the start_word_indices array if (char_for_thread < num_code_points) { uint32_t val_to_write = std::numeric_limits<uint32_t>::max(); if ((code_points[char_for_thread] != SPACE_CODE_POINT) && (char_for_thread > 0) && (code_points[char_for_thread - 1] == SPACE_CODE_POINT)) { val_to_write = char_for_thread; } start_word_indices[char_for_thread] = val_to_write; // Deal with the end_word_indices_array val_to_write = std::numeric_limits<uint32_t>::max(); if ((code_points[char_for_thread] != SPACE_CODE_POINT) && (char_for_thread + 1 < num_code_points) && (code_points[char_for_thread + 1] == SPACE_CODE_POINT)) { val_to_write = char_for_thread + 1; } end_word_indices[char_for_thread] = val_to_write; token_ids[char_for_thread] = std::numeric_limits<uint32_t>::max(); tokens_per_word[char_for_thread] = 0; } } /** * @brief Resolves the string boundaries for the start and end words. * * This kernel should be called after `init_data_and_mark_word_start_and_ends` with at * least `num_strings` total threads. * * The start and end indices are updated to honor the string boundaries * within the strings array. This corrects any word ranges that span across * individual strings. * * @param code_points A pointer to the code points in the strings. * @param strings_offsets An array containing the index of the starting character of each string * with an extra space at the end containing the total number of characters. As a result, * this array is of length num_strings + 1. * @param start_word_indices An array which will contain the starting index for each word scattered * throughout. If an index does not represent a word start, the max-uint32_t value is written * to indicate this. * @param end_word_indices An array which will contain the one past the end index for each word * scattered throughout. If an index does not represent a word end, the max uint32_t value is * written to indicate this. * @param num_strings The total number of strings to be processed. */ __global__ void mark_string_start_and_ends(uint32_t const* code_points, uint32_t const* strings_offsets, uint32_t* start_word_indices, uint32_t* end_word_indices, uint32_t num_strings) { uint32_t idx = blockDim.x * blockIdx.x + threadIdx.x; // Ensure the starting character of each strings is written to the word start array. if (idx <= num_strings) { auto const offset = strings_offsets[idx]; if ((idx < num_strings) && (code_points[offset] != SPACE_CODE_POINT)) { start_word_indices[offset] = offset; } if ((offset > 0) && (code_points[offset - 1] != SPACE_CODE_POINT)) { end_word_indices[offset - 1] = offset; } } } /** * @brief Currently supported special tokens. * * Code logic expects these to be 3 upper-case characters along * with a single trailing space. */ __constant__ char special_tokens[35]{"BOS EOS UNK SEP PAD CLS MASK "}; constexpr cudf::size_type MIN_ST_WIDTH = 4; // Min token size in special_tokens constexpr cudf::size_type MAX_ST_WIDTH = 5; // Max token size in special_tokens struct mark_special_tokens { /** * @brief Check given code-point array to the list of known * special tokens. */ __device__ bool is_special_token(uint32_t const* token, cudf::size_type size) const { if (size < MIN_ST_WIDTH || size > MAX_ST_WIDTH) return false; char str_token[MAX_ST_WIDTH]; // convert code-points to chars thrust::transform(thrust::seq, token, token + size, str_token, [](uint32_t cp) { // also upper-case them to match again special_tokens array return static_cast<char>(cp >= 'a' ? cp - 'a' + 'A' : cp); }); // search the special tokens array for the str_token cudf::string_view tokens(special_tokens, sizeof(special_tokens)); return tokens.find(str_token, size) >= 0; } /** * @brief Check code-points for special tokens and adjust indices. * * Tokens will appear in the `code_points` array as: * `_[_ttt_]_` where `_` are single space characters and * ttt is the variable-length token name * * The logic below uses the following variables to represent position * values in the `code_points` array after locating a special token: * ``` * _ [ _ t t t _ ] _ * ^ ^ ^ ^ * si sp ep ei * ``` * where `si` is `start_index` * `sp` is `start_pos` * `ep` is `end_pos` * `ei` is `end_index` * * When a special token is found, the `code_points` are adjusted * to remove the spaces and capitalize the name. * ``` * _ [ _ t t t _ ] _ is updated to * _ [ T T T ] _ ] _ * ``` * This is required for the downstream word-piece tokenizer to * match it to the vocabulary hash table. * * The `start_word_indices` and `end_word_indices` are updated to * identify the token and to ignore the extra trailing `]` character. */ __device__ void operator()(size_t idx) const { uint32_t const start_index = start_word_indices[idx]; if ((start_index == std::numeric_limits<uint32_t>::max()) || ((start_index + MIN_ST_WIDTH + 2) > num_code_points)) return; if (code_points[start_index] != '[') return; // check for matching end bracket uint32_t const start_pos = start_index + 2; // after the space delimiter // search for next start-word and then check it is a ']' uint32_t const end_index = [&] { auto const begin = start_word_indices + start_pos; auto const width = std::min(static_cast<size_t>(MAX_ST_WIDTH + 1), (num_code_points - start_pos)); auto const end = begin + width; // checking the next start-word is more reliable than arbitrarily searching for ']' // in case the text is split across string rows auto const iter = thrust::find_if(thrust::seq, begin + 1, end, [](auto swi) { return swi != std::numeric_limits<uint32_t>::max(); }); return iter == end ? start_index : static_cast<uint32_t>(iter - start_word_indices); }(); if (code_points[end_index] != ']') return; // check for special token auto const size = static_cast<cudf::size_type>(end_index - start_pos); if (!is_special_token(code_points + start_pos, size)) return; // special token found // adjust code-points auto const end_pos = end_index - 2; // change _[_ttt_]_ to _[TTT]_ for (auto left_idx = start_pos - 1; left_idx <= end_pos; ++left_idx) { auto const cp = code_points[left_idx + 1]; code_points[left_idx] = cp >= 'a' ? cp - 'a' + 'A' : cp; } code_points[end_pos] = ']'; // erase the intermediate indices thrust::fill(thrust::seq, start_word_indices + start_index + 1, // keep the first one start_word_indices + end_index + 1, std::numeric_limits<uint32_t>::max()); thrust::fill(thrust::seq, end_word_indices + start_index, end_word_indices + end_index + 1, std::numeric_limits<uint32_t>::max()); // reset the new end-word index end_word_indices[end_pos] = end_pos + 1; } uint32_t* const code_points; uint32_t* const start_word_indices; uint32_t* const end_word_indices; size_t const num_code_points; }; /** * @brief Converts words into token ids. * * Each thread is assigned a word to convert based on the `hash_table`. Each thread converts * its word and writes the number of tokens it found in the `tokens_per_word` array. * * The `tokens_per_word` array is kept to the length `num_code_points + 1`. This means each thread * can write its number of tokens to the `tokens_per_word` corresponding to the starting * character of each word. Since strings must start at some word, we can prefix sum this array * and use the strings_lengths code point offsets to directly index the number of tokens in each * string. * * The `token_ids` array should be initialized to the max uint32_t before calling this kernel. * * @param code_points An array containing all of the code points to be processed * @param hash_table An array containing the flattened hash table with key, value pairs * packed in 64-bits * @param bin_coefficients A pointer to the GPU pointer containing the hashing parameters for * each hash bin on the GPU. * @param bin_offsets: A pointer to the GPU pointer containing the start index of each bin in * the flattened hash table. * @param token_ids The index for each token found during tokenization. This is of length * num_code_points. In most cases, multiple characters will collapse to one token. In these * cases, the max uint32_t will be in place. Cub will be used later to filter out these * invalid ids later. * @param word_starts An array of length `num_code_points`. The first total word elements contains * the index of the first character for each word. * @param word_ends An array of length num_code_points. The first total_words elements contains the * past the end index for each word. This array is kept aligned with the initial * token_ids array containing the word start code points. * `word_ends[word] - filtered_start_indices[word] = word_length` * @param tokens_per_word An array of size num_code_points that will contain the number of tokens in * each word in a string. This array can be exclusive summed and the result used in * conjunction with the strings lengths array to find the tokens in each string. This is * possible since the number of tokens in each word will be placed at the index corresponding * to the start character of a word. If we assume prefix_summed is the prefix sum of the * tokens_per_word array, then `prefix_summed[strings_lengths[string_idx] - 1]` is the number * of tokens found before the start of string. * @param unk_token_id The token id to be place for unknown tokens * @param max_word_length The maximum length of a word. Any word longer than this length is * replaced by the unknown token. * @param total_words The total number of white space separated words * @param outer_hash_a_param The a parameter for the outer hash * @param outer_hash_b_param: The b parameter for the outer hash * @param num_outer_bins: The number of bins for the outer hash */ __global__ void kernel_wordpiece_tokenizer(uint32_t const* code_points, uint64_t const* hash_table, uint64_t const* bin_coefficients, uint16_t const* bin_offsets, uint16_t unk_token_id, uint32_t outer_hash_a_param, uint32_t outer_hash_b_param, uint16_t num_outer_bins, uint32_t const* word_starts, uint32_t const* word_ends, uint32_t max_word_length, uint32_t total_words, uint32_t* token_ids, uint8_t* tokens_per_word) { uint32_t const word_to_tokenize = blockDim.x * blockIdx.x + threadIdx.x; if (word_to_tokenize >= total_words) return; // Each thread gets the start code_point offset for each word and resets the token_id memory to // the default value. In a post processing step, all of these values will be removed. auto const token_start = word_starts[word_to_tokenize]; auto const token_end = word_ends[word_to_tokenize]; auto const word_length = token_end - token_start; // The sdbm hash of "##" constexpr uint32_t hashtag_hash = 2296000; uint16_t num_values_tokenized = 0; // initialize start, end uint32_t start = token_start; uint32_t end = token_end; if (word_length > max_word_length) { start = token_end; num_values_tokenized = 1; token_ids[token_start] = unk_token_id; tokens_per_word[token_start] = num_values_tokenized; } while (start < token_end) { end = token_end; // init token_id to no token int token_id = -1; // compute current length uint32_t const length = token_end - start; uint64_t substr_hash = sdbm_hash(code_points + start, length, start == token_start ? 0 : hashtag_hash); while (start < end) { token_id = retrieve(substr_hash, outer_hash_a_param, outer_hash_b_param, num_outer_bins, hash_table, bin_coefficients, bin_offsets); if (token_id != -1) { break; } --end; // Pop off the last value from the substr hash substr_hash = prev_sdbm_hash(substr_hash, code_points[end]); } if (token_id == -1) { end = token_end; token_id = unk_token_id; // We need to clean up the global array. This case is very uncommon. // Only 0.016% of words cannot be resolved to a token from the squad dev set. for (uint32_t i = 1; i < num_values_tokenized; ++i) { token_ids[token_start + i] = std::numeric_limits<uint32_t>::max(); } num_values_tokenized = 0; } token_ids[token_start + num_values_tokenized] = token_id; ++num_values_tokenized; start = end; } tokens_per_word[token_start] = num_values_tokenized; } } // namespace wordpiece_tokenizer::wordpiece_tokenizer(hashed_vocabulary const& vocab_table, uint32_t max_rows_final_tensor, uint32_t max_sequence_length, uint32_t stride, bool do_truncate, bool do_lower_case, uint32_t max_word_length) : vocab_table(vocab_table), normalizer(vocab_table.cp_metadata->view().data<codepoint_metadata_type>(), vocab_table.aux_cp_table->view().data<aux_codepoint_data_type>(), do_lower_case), max_sequence_length{max_sequence_length}, stride(stride), do_truncate(do_truncate), max_word_length{max_word_length} { } uvector_pair wordpiece_tokenizer::tokenize(char const* d_strings, uint32_t const* d_offsets, uint32_t num_strings, rmm::cuda_stream_view stream) { auto cps_and_offsets = normalizer.normalize(d_strings, d_offsets, num_strings, stream); tokenize(cps_and_offsets, stream); return uvector_pair(std::move(cps_and_offsets.first), std::move(cps_and_offsets.second)); } struct copy_if_fn { // inline lambda not allowed in private or protected member function __device__ bool operator()(uint32_t cp) { return cp != std::numeric_limits<uint32_t>::max(); } }; struct tranform_fn { // just converting uint8 value to uint32 __device__ uint32_t operator()(uint8_t count) { return count; } }; void wordpiece_tokenizer::tokenize(uvector_pair& cps_and_offsets, rmm::cuda_stream_view stream) { uint32_t* device_code_points = cps_and_offsets.first->data(); size_t const num_code_points = cps_and_offsets.first->size(); uint32_t* device_strings_offsets = cps_and_offsets.second->data(); uint32_t const num_strings = cps_and_offsets.second->size() - 1; const size_t four_byte_cp_chunks = 1 + (num_code_points - 1) / sizeof(uint32_t); const size_t rounded_num_cps = sizeof(uint32_t) * four_byte_cp_chunks; rmm::device_uvector<uint8_t> device_tokens_per_word(rounded_num_cps, stream); rmm::device_uvector<uint32_t> device_token_ids(num_code_points, stream); rmm::device_uvector<uint32_t> device_word_indices(2 * num_code_points, stream); // make device_start_word_indices and device_end_word_indices contiguous uint32_t* device_start_word_indices = device_word_indices.data(); uint32_t* device_end_word_indices = device_start_word_indices + num_code_points; cudf::detail::grid_1d const grid_init{static_cast<cudf::size_type>(num_code_points), THREADS_PER_BLOCK}; detail::init_data_and_mark_word_start_and_ends<<<grid_init.num_blocks, grid_init.num_threads_per_block, 0, stream.value()>>>(device_code_points, device_start_word_indices, device_end_word_indices, num_code_points, device_token_ids.data(), device_tokens_per_word.data()); CUDF_CHECK_CUDA(stream.value()); cudf::detail::grid_1d const grid_mark{static_cast<cudf::size_type>(num_strings + 1), THREADS_PER_BLOCK}; detail::mark_string_start_and_ends<<<grid_mark.num_blocks, grid_mark.num_threads_per_block, 0, stream.value()>>>(device_code_points, device_strings_offsets, device_start_word_indices, device_end_word_indices, num_strings); CUDF_CHECK_CUDA(stream.value()); // check for special tokens and adjust indices thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<size_t>(0), num_code_points, mark_special_tokens{ device_code_points, device_start_word_indices, device_end_word_indices, num_code_points}); // Now start_word_indices has the word starts scattered throughout the array. We need to select // all values not equal to the max uint32_t and place them at the start of the array. We leverage // the fact that the start_word_indices and the end_word indices are contiguous to only launch one // device select kernel. auto itr_end = thrust::remove(rmm::exec_policy(stream), device_word_indices.begin(), device_word_indices.end(), std::numeric_limits<uint32_t>::max()); // The number of tokens selected will be double the number of words since we // select from both the start and end index arrays. uint32_t const num_words = thrust::distance(device_word_indices.begin(), itr_end) / 2; // We need to change the end_word_indices pointer after the selection is complete device_end_word_indices = device_start_word_indices + num_words; cudf::detail::grid_1d const grid{static_cast<cudf::size_type>(num_words), THREADS_PER_BLOCK}; detail:: kernel_wordpiece_tokenizer<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>( device_code_points, vocab_table.table->view().data<uint64_t>(), vocab_table.bin_coefficients->view().data<uint64_t>(), vocab_table.bin_offsets->view().data<uint16_t>(), vocab_table.unknown_token_id, vocab_table.outer_hash_a, vocab_table.outer_hash_b, vocab_table.num_bins, device_start_word_indices, device_end_word_indices, max_word_length, num_words, device_token_ids.data(), device_tokens_per_word.data()); CUDF_CHECK_CUDA(stream.value()); // Repurpose the input array for the token ids. In the worst case, each code point ends up being a // token so this will always have enough memory to store the contiguous tokens. uint32_t* contiguous_token_ids = device_code_points; thrust::copy_if(rmm::exec_policy(stream), device_token_ids.begin(), device_token_ids.end(), contiguous_token_ids, copy_if_fn{}); // Repurpose start word indices since it is the same size and type as the required output. uint32_t* token_id_counts = device_start_word_indices; thrust::transform_inclusive_scan(rmm::exec_policy(stream), device_tokens_per_word.data(), device_tokens_per_word.data() + num_code_points, token_id_counts, tranform_fn{}, thrust::plus<uint32_t>()); // Update the device_strings_offsets using the token_id_counts thrust::for_each_n(rmm::exec_policy(stream), thrust::make_counting_iterator<uint32_t>(1), num_strings, update_strings_lengths_fn{token_id_counts, device_strings_offsets}); } } // namespace detail } // namespace nvtext
0170cfdf8e1d8450e36fb06267673d9bc302557e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { const int BW = 32; int ex = threadIdx.x + blockIdx.x * BW, ey = threadIdx.y + blockIdx.y * BW, tx = threadIdx.x, ty = threadIdx.y; __shared__ float dM[BW][BW], dN[BW][BW]; float sum = 0; int bCount = (M.width + BW - 1) / BW; // for(int tt = 0; tt < 1000; tt++) { // sum = 0; for(int i = 0; i < bCount; i++) { if(ey < M.height && BW * i + tx < M.width) dM[ty][tx] = M.elements[ey * M.width + BW * i + tx]; else dM[ty][tx] = 0; if(ty + BW * i < N.height && ex < N.width) dN[ty][tx] = N.elements[(ty + BW * i) * N.width + ex]; else dN[ty][tx] = 0; /* if(i * BW + tx < N.height && BW * blockIdx.x + ty < N.width) dN[tx][ty] = N.elements[(i * BW + tx) * N.width + BW * blockIdx.x + ty]; else dN[tx][ty] = 0; */ __syncthreads(); for(int k = 0; k < BW; k++) { sum += dM[ty][k] * dN[k][tx]; } __syncthreads(); } // } if(ex < P.width && ey < P.height) P.elements[P.width * ey + ex] = sum; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
0170cfdf8e1d8450e36fb06267673d9bc302557e.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <stdio.h> #include "matrixmul.h" //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// // Matrix multiplication kernel thread specification __global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P) { const int BW = 32; int ex = threadIdx.x + blockIdx.x * BW, ey = threadIdx.y + blockIdx.y * BW, tx = threadIdx.x, ty = threadIdx.y; __shared__ float dM[BW][BW], dN[BW][BW]; float sum = 0; int bCount = (M.width + BW - 1) / BW; // for(int tt = 0; tt < 1000; tt++) { // sum = 0; for(int i = 0; i < bCount; i++) { if(ey < M.height && BW * i + tx < M.width) dM[ty][tx] = M.elements[ey * M.width + BW * i + tx]; else dM[ty][tx] = 0; if(ty + BW * i < N.height && ex < N.width) dN[ty][tx] = N.elements[(ty + BW * i) * N.width + ex]; else dN[ty][tx] = 0; /* if(i * BW + tx < N.height && BW * blockIdx.x + ty < N.width) dN[tx][ty] = N.elements[(i * BW + tx) * N.width + BW * blockIdx.x + ty]; else dN[tx][ty] = 0; */ __syncthreads(); for(int k = 0; k < BW; k++) { sum += dM[ty][k] * dN[k][tx]; } __syncthreads(); } // } if(ex < P.width && ey < P.height) P.elements[P.width * ey + ex] = sum; } #endif // #ifndef _MATRIXMUL_KERNEL_H_
53b57581ce586967b20c9844f2b7f43438cafa24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef CPU_ONLY #include "execution_model/host_device_vector.h" #include "execution_model/host_vector.h" #include "kernel/kernel_launch.h" #include "kernel/kernel_launch_impl_cpu.h" #include "kernel/kernel_launch_impl_gpu.cuh" #include "kernel/make_runtime_constants_reduce_launchable.h" #include "kernel/reduce_samples.cuh" #include "kernel/runtime_constants_reducer_impl_gpu.cuh" #include "kernel/work_division.h" #include "lib/assert.h" #include "lib/cuda/reduce.cuh" #include "lib/cuda/utils.h" #include "lib/span.h" #include "meta/all_values/tag.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include <algorithm> #include <random> using kernel::WorkDivision; template <typename T> __global__ void sum_sub_blocks(Span<const T> in, Span<T> out, unsigned sub_block_size) { unsigned thread_idx = threadIdx.x; unsigned block_idx = blockIdx.x; unsigned block_size = blockDim.x; unsigned overall_idx = thread_idx + block_idx * block_size; unsigned sub_block_idx = overall_idx / sub_block_size; auto add = [](const T &lhs, const T &rhs) { return lhs + rhs; }; const T total = sub_block_reduce<T>(in[overall_idx], add, thread_idx, block_size, sub_block_size); if (thread_idx % sub_block_size == 0) { out[sub_block_idx] = total; } } #define EXPECT_FLOATS_EQ(expected, actual) \ EXPECT_EQ(expected.size(), actual.size()) << "Sizes differ."; \ for (size_t idx = 0; idx < ::min(expected.size(), actual.size()); \ ++idx) { \ EXPECT_FLOAT_EQ(expected[idx], actual[idx]) \ << "at index: " << idx << " and line: " << __LINE__; \ } TEST(Reduce, sum) { auto run_test = [](auto dist, auto check_equality) { for (unsigned num_locations : {1, 2, 3, 7, 8, 17, 32, 256}) { for (unsigned samples_per : {1, 2, 3, 7, 8, 32, 37, 49, 128, 189, 256}) for (unsigned block_size : {32, 128, 256, 1024}) { for (unsigned base_target_samples_per_thread : {1, 2, 3, 5}) { const unsigned size = num_locations * samples_per; // avoid this test taking too long if (size > 4096) { continue; } const unsigned target_x_block_size = block_size; unsigned target_samples_per_thread = base_target_samples_per_thread; WorkDivision division; do { division = WorkDivision({block_size, target_x_block_size, true, target_samples_per_thread}, samples_per, num_locations, 1); target_samples_per_thread *= 2; } while (division.num_sample_blocks() != 1); ASSERT_EQ(division.num_sample_blocks(), 1); std::mt19937 gen(testing::UnitTest::GetInstance()->random_seed()); using T = std::decay_t<decltype(dist(gen))>; HostDeviceVector<T> vals(size); std::generate(vals.begin(), vals.end(), [&]() { return dist(gen); }); HostDeviceVector<T> out_vals(num_locations); Span<const T> in = vals; Span<T> out = out_vals; bool use_direct_approach = block_size % samples_per == 0 && size % block_size == 0; if (use_direct_approach) { unsigned num_blocks = size / block_size; always_assert(num_blocks * block_size == size); hipLaunchKernelGGL(( sum_sub_blocks<T>) , dim3(num_blocks), dim3(block_size), 0, 0, in, out, samples_per); } auto division_run = [&](auto tag, Span<T> out_div) { constexpr ExecutionModel exec = tag; kernel::KernelLaunch<exec>::run( ThrustData<exec>{}, division, 0, division.total_num_blocks(), kernel::make_runtime_constants_reduce_launchable<exec, T>( 1, [=] HOST_DEVICE(const WorkDivision &division, const kernel::GridLocationInfo &info, const unsigned /*block_idx*/, const unsigned /*thread_idx*/, const auto &, auto &interactor) { auto [start_sample, end_sample, j, unused] = info; T total = 0; for (unsigned i = start_sample; i < end_sample; ++i) { total += in[i + j * samples_per]; } auto add = [](const T &lhs, const T &rhs) { return lhs + rhs; }; auto op = interactor[0].reduce( total, add, division.sample_block_size()); if (op.has_value()) { out_div[j] = *op; } })); }; HostDeviceVector<T> out_vals_division(num_locations); HostVector<T> out_vals_division_cpu(num_locations); division_run(tag_v<ExecutionModel::GPU>, out_vals_division); division_run(tag_v<ExecutionModel::CPU>, out_vals_division_cpu); std::vector<T> expected(num_locations, 0.f); for (unsigned location = 0; location < num_locations; ++location) { for (unsigned i = location * samples_per; i < (location + 1) * samples_per; ++i) { expected[location] += vals[i]; } } if (use_direct_approach) { check_equality(expected, out_vals); } check_equality(expected, out_vals_division); check_equality(expected, out_vals_division_cpu); } } } }; run_test(std::uniform_real_distribution<double>(0.0, 1.0), [](const auto &expected, const auto &actual) { EXPECT_FLOATS_EQ(expected, actual); }); run_test(std::uniform_int_distribution<int>(-100, 100), [](const auto &expected, const auto &actual) { EXPECT_THAT(expected, testing::ElementsAreArray(actual)); }); } #endif
53b57581ce586967b20c9844f2b7f43438cafa24.cu
#ifndef CPU_ONLY #include "execution_model/host_device_vector.h" #include "execution_model/host_vector.h" #include "kernel/kernel_launch.h" #include "kernel/kernel_launch_impl_cpu.h" #include "kernel/kernel_launch_impl_gpu.cuh" #include "kernel/make_runtime_constants_reduce_launchable.h" #include "kernel/reduce_samples.cuh" #include "kernel/runtime_constants_reducer_impl_gpu.cuh" #include "kernel/work_division.h" #include "lib/assert.h" #include "lib/cuda/reduce.cuh" #include "lib/cuda/utils.h" #include "lib/span.h" #include "meta/all_values/tag.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include <algorithm> #include <random> using kernel::WorkDivision; template <typename T> __global__ void sum_sub_blocks(Span<const T> in, Span<T> out, unsigned sub_block_size) { unsigned thread_idx = threadIdx.x; unsigned block_idx = blockIdx.x; unsigned block_size = blockDim.x; unsigned overall_idx = thread_idx + block_idx * block_size; unsigned sub_block_idx = overall_idx / sub_block_size; auto add = [](const T &lhs, const T &rhs) { return lhs + rhs; }; const T total = sub_block_reduce<T>(in[overall_idx], add, thread_idx, block_size, sub_block_size); if (thread_idx % sub_block_size == 0) { out[sub_block_idx] = total; } } #define EXPECT_FLOATS_EQ(expected, actual) \ EXPECT_EQ(expected.size(), actual.size()) << "Sizes differ."; \ for (size_t idx = 0; idx < std::min(expected.size(), actual.size()); \ ++idx) { \ EXPECT_FLOAT_EQ(expected[idx], actual[idx]) \ << "at index: " << idx << " and line: " << __LINE__; \ } TEST(Reduce, sum) { auto run_test = [](auto dist, auto check_equality) { for (unsigned num_locations : {1, 2, 3, 7, 8, 17, 32, 256}) { for (unsigned samples_per : {1, 2, 3, 7, 8, 32, 37, 49, 128, 189, 256}) for (unsigned block_size : {32, 128, 256, 1024}) { for (unsigned base_target_samples_per_thread : {1, 2, 3, 5}) { const unsigned size = num_locations * samples_per; // avoid this test taking too long if (size > 4096) { continue; } const unsigned target_x_block_size = block_size; unsigned target_samples_per_thread = base_target_samples_per_thread; WorkDivision division; do { division = WorkDivision({block_size, target_x_block_size, true, target_samples_per_thread}, samples_per, num_locations, 1); target_samples_per_thread *= 2; } while (division.num_sample_blocks() != 1); ASSERT_EQ(division.num_sample_blocks(), 1); std::mt19937 gen(testing::UnitTest::GetInstance()->random_seed()); using T = std::decay_t<decltype(dist(gen))>; HostDeviceVector<T> vals(size); std::generate(vals.begin(), vals.end(), [&]() { return dist(gen); }); HostDeviceVector<T> out_vals(num_locations); Span<const T> in = vals; Span<T> out = out_vals; bool use_direct_approach = block_size % samples_per == 0 && size % block_size == 0; if (use_direct_approach) { unsigned num_blocks = size / block_size; always_assert(num_blocks * block_size == size); sum_sub_blocks<T> <<<num_blocks, block_size>>>(in, out, samples_per); } auto division_run = [&](auto tag, Span<T> out_div) { constexpr ExecutionModel exec = tag; kernel::KernelLaunch<exec>::run( ThrustData<exec>{}, division, 0, division.total_num_blocks(), kernel::make_runtime_constants_reduce_launchable<exec, T>( 1, [=] HOST_DEVICE(const WorkDivision &division, const kernel::GridLocationInfo &info, const unsigned /*block_idx*/, const unsigned /*thread_idx*/, const auto &, auto &interactor) { auto [start_sample, end_sample, j, unused] = info; T total = 0; for (unsigned i = start_sample; i < end_sample; ++i) { total += in[i + j * samples_per]; } auto add = [](const T &lhs, const T &rhs) { return lhs + rhs; }; auto op = interactor[0].reduce( total, add, division.sample_block_size()); if (op.has_value()) { out_div[j] = *op; } })); }; HostDeviceVector<T> out_vals_division(num_locations); HostVector<T> out_vals_division_cpu(num_locations); division_run(tag_v<ExecutionModel::GPU>, out_vals_division); division_run(tag_v<ExecutionModel::CPU>, out_vals_division_cpu); std::vector<T> expected(num_locations, 0.f); for (unsigned location = 0; location < num_locations; ++location) { for (unsigned i = location * samples_per; i < (location + 1) * samples_per; ++i) { expected[location] += vals[i]; } } if (use_direct_approach) { check_equality(expected, out_vals); } check_equality(expected, out_vals_division); check_equality(expected, out_vals_division_cpu); } } } }; run_test(std::uniform_real_distribution<double>(0.0, 1.0), [](const auto &expected, const auto &actual) { EXPECT_FLOATS_EQ(expected, actual); }); run_test(std::uniform_int_distribution<int>(-100, 100), [](const auto &expected, const auto &actual) { EXPECT_THAT(expected, testing::ElementsAreArray(actual)); }); } #endif
0e5dabb37cec0411cc2feab9176d034658f1e7c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <limits> #include <raft/core/operators.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/linalg/map_reduce.cuh> #include <raft/linalg/map_then_reduce.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> namespace raft { namespace linalg { template <typename InType, typename OutType, typename MapOp> __global__ void naiveMapReduceKernel(OutType* out, const InType* in, size_t len, MapOp map) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { raft::myAtomicAdd(out, (OutType)map(in[idx])); } } template <typename InType, typename OutType, typename MapOp> void naiveMapReduce(OutType* out, const InType* in, size_t len, MapOp map, hipStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, (size_t)TPB); hipLaunchKernelGGL(( naiveMapReduceKernel<InType, OutType, MapOp>), dim3(nblks), dim3(TPB), 0, stream, out, in, len, map); RAFT_CUDA_TRY(hipPeekAtLastError()); } template <typename T> struct MapReduceInputs { T tolerance; size_t len; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MapReduceInputs<T>& dims) { return os; } // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename InType, typename OutType> void mapReduceLaunch( OutType* out_ref, OutType* out, const InType* in, size_t len, hipStream_t stream) { naiveMapReduce(out_ref, in, len, raft::identity_op{}, stream); mapThenSumReduce(out, len, raft::identity_op{}, 0, in); } template <typename InType, typename OutType> class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<InType>> { public: MapReduceTest() : params(::testing::TestWithParam<MapReduceInputs<InType>>::GetParam()), stream(resource::get_cuda_stream(handle)), in(params.len, stream), out_ref(params.len, stream), out(params.len, stream) { } protected: void SetUp() override { raft::random::RngState r(params.seed); auto len = params.len; uniform(handle, r, in.data(), len, InType(-1.0), InType(1.0)); mapReduceLaunch(out_ref.data(), out.data(), in.data(), len, stream); resource::sync_stream(handle, stream); } protected: raft::resources handle; hipStream_t stream; MapReduceInputs<InType> params; rmm::device_uvector<InType> in; rmm::device_uvector<OutType> out_ref, out; }; const std::vector<MapReduceInputs<float>> inputsf = {{0.001f, 1024 * 1024, 1234ULL}}; typedef MapReduceTest<float, float> MapReduceTestFF; TEST_P(MapReduceTestFF, Result) { ASSERT_TRUE(devArrMatch( out_ref.data(), out.data(), params.len, CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestFF, ::testing::ValuesIn(inputsf)); typedef MapReduceTest<float, double> MapReduceTestFD; TEST_P(MapReduceTestFD, Result) { ASSERT_TRUE(devArrMatch( out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestFD, ::testing::ValuesIn(inputsf)); const std::vector<MapReduceInputs<double>> inputsd = {{0.000001, 1024 * 1024, 1234ULL}}; typedef MapReduceTest<double, double> MapReduceTestDD; TEST_P(MapReduceTestDD, Result) { ASSERT_TRUE(devArrMatch( out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestDD, ::testing::ValuesIn(inputsd)); template <typename T> class MapGenericReduceTest : public ::testing::Test { using InType = typename T::first_type; using OutType = typename T::second_type; protected: MapGenericReduceTest() : input(n, resource::get_cuda_stream(handle)), output(resource::get_cuda_stream(handle)) { initInput(input.data(), input.size(), resource::get_cuda_stream(handle)); } public: void initInput(InType* input, int n, hipStream_t stream) { raft::random::RngState r(137); uniform(handle, r, input, n, InType(2), InType(3)); InType val = 1; raft::update_device(input + 42, &val, 1, resource::get_cuda_stream(handle)); val = 5; raft::update_device(input + 337, &val, 1, resource::get_cuda_stream(handle)); } void testMin() { OutType neutral = std::numeric_limits<InType>::max(); auto output_view = raft::make_device_scalar_view(output.data()); auto input_view = raft::make_device_vector_view<const InType>( input.data(), static_cast<std::uint32_t>(input.size())); map_reduce(handle, input_view, output_view, neutral, raft::identity_op{}, hipcub::Min()); EXPECT_TRUE(raft::devArrMatch( OutType(1), output.data(), 1, raft::Compare<OutType>(), resource::get_cuda_stream(handle))); } void testMax() { OutType neutral = std::numeric_limits<InType>::min(); auto output_view = raft::make_device_scalar_view(output.data()); auto input_view = raft::make_device_vector_view<const InType>( input.data(), static_cast<std::uint32_t>(input.size())); map_reduce(handle, input_view, output_view, neutral, raft::identity_op{}, hipcub::Max()); EXPECT_TRUE(raft::devArrMatch( OutType(5), output.data(), 1, raft::Compare<OutType>(), resource::get_cuda_stream(handle))); } protected: raft::resources handle; hipStream_t stream; int n = 1237; rmm::device_uvector<InType> input; rmm::device_scalar<OutType> output; }; using IoTypePair = ::testing::Types<std::pair<float, float>, std::pair<float, double>, std::pair<double, double>>; TYPED_TEST_CASE(MapGenericReduceTest, IoTypePair); TYPED_TEST(MapGenericReduceTest, min) { this->testMin(); } TYPED_TEST(MapGenericReduceTest, max) { this->testMax(); } } // end namespace linalg } // end namespace raft
0e5dabb37cec0411cc2feab9176d034658f1e7c5.cu
/* * Copyright (c) 2018-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <limits> #include <raft/core/operators.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/linalg/map_reduce.cuh> #include <raft/linalg/map_then_reduce.cuh> #include <raft/random/rng.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_uvector.hpp> namespace raft { namespace linalg { template <typename InType, typename OutType, typename MapOp> __global__ void naiveMapReduceKernel(OutType* out, const InType* in, size_t len, MapOp map) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { raft::myAtomicAdd(out, (OutType)map(in[idx])); } } template <typename InType, typename OutType, typename MapOp> void naiveMapReduce(OutType* out, const InType* in, size_t len, MapOp map, cudaStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, (size_t)TPB); naiveMapReduceKernel<InType, OutType, MapOp><<<nblks, TPB, 0, stream>>>(out, in, len, map); RAFT_CUDA_TRY(cudaPeekAtLastError()); } template <typename T> struct MapReduceInputs { T tolerance; size_t len; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MapReduceInputs<T>& dims) { return os; } // Or else, we get the following compilation error // for an extended __device__ lambda cannot have private or protected access // within its class template <typename InType, typename OutType> void mapReduceLaunch( OutType* out_ref, OutType* out, const InType* in, size_t len, cudaStream_t stream) { naiveMapReduce(out_ref, in, len, raft::identity_op{}, stream); mapThenSumReduce(out, len, raft::identity_op{}, 0, in); } template <typename InType, typename OutType> class MapReduceTest : public ::testing::TestWithParam<MapReduceInputs<InType>> { public: MapReduceTest() : params(::testing::TestWithParam<MapReduceInputs<InType>>::GetParam()), stream(resource::get_cuda_stream(handle)), in(params.len, stream), out_ref(params.len, stream), out(params.len, stream) { } protected: void SetUp() override { raft::random::RngState r(params.seed); auto len = params.len; uniform(handle, r, in.data(), len, InType(-1.0), InType(1.0)); mapReduceLaunch(out_ref.data(), out.data(), in.data(), len, stream); resource::sync_stream(handle, stream); } protected: raft::resources handle; cudaStream_t stream; MapReduceInputs<InType> params; rmm::device_uvector<InType> in; rmm::device_uvector<OutType> out_ref, out; }; const std::vector<MapReduceInputs<float>> inputsf = {{0.001f, 1024 * 1024, 1234ULL}}; typedef MapReduceTest<float, float> MapReduceTestFF; TEST_P(MapReduceTestFF, Result) { ASSERT_TRUE(devArrMatch( out_ref.data(), out.data(), params.len, CompareApprox<float>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestFF, ::testing::ValuesIn(inputsf)); typedef MapReduceTest<float, double> MapReduceTestFD; TEST_P(MapReduceTestFD, Result) { ASSERT_TRUE(devArrMatch( out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestFD, ::testing::ValuesIn(inputsf)); const std::vector<MapReduceInputs<double>> inputsd = {{0.000001, 1024 * 1024, 1234ULL}}; typedef MapReduceTest<double, double> MapReduceTestDD; TEST_P(MapReduceTestDD, Result) { ASSERT_TRUE(devArrMatch( out_ref.data(), out.data(), params.len, CompareApprox<double>(params.tolerance), stream)); } INSTANTIATE_TEST_SUITE_P(MapReduceTests, MapReduceTestDD, ::testing::ValuesIn(inputsd)); template <typename T> class MapGenericReduceTest : public ::testing::Test { using InType = typename T::first_type; using OutType = typename T::second_type; protected: MapGenericReduceTest() : input(n, resource::get_cuda_stream(handle)), output(resource::get_cuda_stream(handle)) { initInput(input.data(), input.size(), resource::get_cuda_stream(handle)); } public: void initInput(InType* input, int n, cudaStream_t stream) { raft::random::RngState r(137); uniform(handle, r, input, n, InType(2), InType(3)); InType val = 1; raft::update_device(input + 42, &val, 1, resource::get_cuda_stream(handle)); val = 5; raft::update_device(input + 337, &val, 1, resource::get_cuda_stream(handle)); } void testMin() { OutType neutral = std::numeric_limits<InType>::max(); auto output_view = raft::make_device_scalar_view(output.data()); auto input_view = raft::make_device_vector_view<const InType>( input.data(), static_cast<std::uint32_t>(input.size())); map_reduce(handle, input_view, output_view, neutral, raft::identity_op{}, cub::Min()); EXPECT_TRUE(raft::devArrMatch( OutType(1), output.data(), 1, raft::Compare<OutType>(), resource::get_cuda_stream(handle))); } void testMax() { OutType neutral = std::numeric_limits<InType>::min(); auto output_view = raft::make_device_scalar_view(output.data()); auto input_view = raft::make_device_vector_view<const InType>( input.data(), static_cast<std::uint32_t>(input.size())); map_reduce(handle, input_view, output_view, neutral, raft::identity_op{}, cub::Max()); EXPECT_TRUE(raft::devArrMatch( OutType(5), output.data(), 1, raft::Compare<OutType>(), resource::get_cuda_stream(handle))); } protected: raft::resources handle; cudaStream_t stream; int n = 1237; rmm::device_uvector<InType> input; rmm::device_scalar<OutType> output; }; using IoTypePair = ::testing::Types<std::pair<float, float>, std::pair<float, double>, std::pair<double, double>>; TYPED_TEST_CASE(MapGenericReduceTest, IoTypePair); TYPED_TEST(MapGenericReduceTest, min) { this->testMin(); } TYPED_TEST(MapGenericReduceTest, max) { this->testMax(); } } // end namespace linalg } // end namespace raft
6c55997d42b5655fc3c84e5c9cad75e5181a546d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "shared.h" #include "dTV_FGP_GPU_core.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* CUDA implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case) * which employs structural similarity of the level sets of two images/volumes, see [1,2] * The current implementation updates image 1 while image 2 is being fixed. * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED] * 3. lambdaPar - regularization parameter [REQUIRED] * 4. Number of iterations [OPTIONAL] * 5. eplsilon: tolerance constant [OPTIONAL] * 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] * * 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL] * 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL] * 9. GPU device number if for multigpu run (default 0) [OPTIONAL] * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the Matlab's codes and papers by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" * [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 10841106 */ #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) //struct square { __host__ __device__ float operator()(float x) { return x * x; } }; /************************************************/ /*****************2D modules*********************/ /************************************************/ __global__ void GradNorm_func2D_kernel(float *Refd, float *Refd_x, float *Refd_y, float eta, int N, int M, int ImSize) { float val1, val2, gradX, gradY, magn; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = Refd[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = Refd[(xIndex) + N*(yIndex + 1)]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; } return; } __global__ void ProjectVect_func2D_kernel(float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize) { float in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; } return; } __global__ void Obj_dfunc2D_kernel(float *Ad, float *D, float *R1, float *R2, int N, int M, int ImSize, float lambda) { float val1,val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { if (xIndex <= 0) {val1 = 0.0f;} else {val1 = R1[(xIndex-1) + N*yIndex];} if (yIndex <= 0) {val2 = 0.0f;} else {val2 = R2[xIndex + N*(yIndex-1)];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] - val1 - val2); } return; } __global__ void Grad_dfunc2D_kernel(float *P1, float *P2, float *D, float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize, float multip) { float val1,val2,in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = D[index] - D[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = D[index] - D[(xIndex) + N*(yIndex + 1)]; in_prod = val1*Refd_x[index] + val2*Refd_y[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; } return; } __global__ void Proj_dfunc2D_iso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float denom; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { denom = pow(P1[index],2) + pow(P2[index],2); if (denom > 1.0f) { P1[index] = P1[index]/sqrt(denom); P2[index] = P2[index]/sqrt(denom); } } return; } __global__ void Proj_dfunc2D_aniso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float val1, val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { val1 = abs(P1[index]); val2 = abs(P2[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; } return; } __global__ void Rupd_dfunc2D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, float multip2, int N, int M, int ImSize) { //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); } return; } __global__ void dTVnonneg2D_kernel(float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } __global__ void dTVcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void dTVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } /************************************************/ /*****************3D modules*********************/ /************************************************/ __global__ void GradNorm_func3D_kernel(float *Refd, float *Refd_x, float *Refd_y, float *Refd_z, float eta, int N, int M, int Z, int ImSize) { float val1, val2, val3, gradX, gradY, gradZ, magn; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = Refd[(N*M)*k + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = Refd[(N*M)*k + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = Refd[(N*M)*(k+1) + i + N*j]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; gradZ = val3 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2) + pow(gradZ,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; Refd_z[index] = gradZ/magn; } return; } __global__ void ProjectVect_func3D_kernel(float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize) { float in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index] + R3[index]*Refd_z[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; R3[index] = R3[index] - in_prod*Refd_z[index]; } return; } __global__ void Obj_dfunc3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda) { float val1,val2,val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(N*M)*(k) + (i-1) + N*j];} if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(N*M)*(k) + i + N*(j-1)];} if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(N*M)*(k-1) + i + N*j];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3); } return; } __global__ void Grad_dfunc3D_kernel(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize, float multip) { float val1,val2,val3,in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(N*M)*(k) + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(N*M)*(k) + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(N*M)*(k+1) + i + N*j]; in_prod = val1*Refd_x[index] + val2*Refd_y[index] + val3*Refd_z[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; val3 = val3 - in_prod*Refd_z[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; P3[index] = R3[index] + multip*val3; } return; } __global__ void Proj_dfunc3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float denom,sq_denom; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2); if (denom > 1.0f) { sq_denom = 1.0f/sqrt(denom); P1[index] = P1[index]*sq_denom; P2[index] = P2[index]*sq_denom; P3[index] = P3[index]*sq_denom; } } return; } __global__ void Proj_dfunc3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float val1, val2, val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { val1 = abs(P1[index]); val2 = abs(P2[index]); val3 = abs(P3[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} if (val3 < 1.0f) {val3 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; P3[index] = P3[index]/val3; } return; } __global__ void Rupd_dfunc3D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, float multip2, int N, int M, int Z, int ImSize) { //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); R3[index] = P3[index] + multip2*(P3[index] - P3_old[index]); } return; } __global__ void dTVnonneg3D_kernel(float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ ////////////MAIN HOST FUNCTION /////////////// extern "C" int dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iter, float epsil, float eta, int methodTV, int nonneg, int gpu_device, int dimX, int dimY, int dimZ) { int deviceCount = -1; // number of devices hipGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(hipSetDevice(gpu_device)); int count = 0, i; float re, multip,multip2; re = 0.0f; float tk = 1.0f; float tkp1=1.0f; if (dimZ <= 1) { /*2D verson*/ int ImSize = dimX*dimY; float *d_input, *d_update=NULL, *d_update_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D)); /*allocate space for images on device*/ checkCudaErrors( hipMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( hipMemcpy(d_input,Input,ImSize*sizeof(float),hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),hipMemcpyHostToDevice)); hipMemset(P1, 0, ImSize*sizeof(float)); hipMemset(P2, 0, ImSize*sizeof(float)); hipMemset(P1_prev, 0, ImSize*sizeof(float)); hipMemset(P2_prev, 0, ImSize*sizeof(float)); hipMemset(R1, 0, ImSize*sizeof(float)); hipMemset(R2, 0, ImSize*sizeof(float)); hipMemset(InputRef_x, 0, ImSize*sizeof(float)); hipMemset(InputRef_y, 0, ImSize*sizeof(float)); /******************** Run CUDA 2D kernel here ********************/ multip = (1.0f/(8.0f*lambdaPar)); /* calculate gradient vectors for the reference */ hipLaunchKernelGGL(( GradNorm_func2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_InputRef, InputRef_x, InputRef_y, eta, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/ hipLaunchKernelGGL(( ProjectVect_func2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* computing the gradient of the objective function */ hipLaunchKernelGGL(( Obj_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, R1, R2, dimX, dimY, ImSize, lambdaPar); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); if (nonneg != 0) { hipLaunchKernelGGL(( dTVnonneg2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ hipLaunchKernelGGL(( Grad_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, d_update, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize, multip); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* projection step */ if (methodTV == 0)hipLaunchKernelGGL(( Proj_dfunc2D_iso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, dimX, dimY, ImSize); /*isotropic TV*/ elsehipLaunchKernelGGL(( Proj_dfunc2D_aniso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, dimX, dimY, ImSize); /*anisotropic TV*/ checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); hipLaunchKernelGGL(( Rupd_dfunc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, multip2, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, P2, P2_prev, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ hipLaunchKernelGGL(( dTVResidCalc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, P1, dimX, dimY, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory hipMemcpy(Output,d_update,ImSize*sizeof(float),hipMemcpyDeviceToHost); hipFree(d_input); hipFree(d_update); if (epsil != 0.0f) hipFree(d_update_prev); hipFree(P1); hipFree(P2); hipFree(P1_prev); hipFree(P2_prev); hipFree(R1); hipFree(R2); hipFree(d_InputRef); hipFree(InputRef_x); hipFree(InputRef_y); } else { /*3D verson*/ int ImSize = dimX*dimY*dimZ; float *d_input, *d_update=NULL, *d_update_prev, *P1=NULL, *P2=NULL, *P3=NULL, *P1_prev=NULL, *P2_prev=NULL, *P3_prev=NULL, *R1=NULL, *R2=NULL, *R3=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *InputRef_z=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKZSIZE)); /*allocate space for images on device*/ checkCudaErrors( hipMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P3,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&P3_prev,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&R3,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( hipMalloc((void**)&InputRef_z,ImSize*sizeof(float)) ); checkCudaErrors( hipMemcpy(d_input,Input,ImSize*sizeof(float),hipMemcpyHostToDevice)); checkCudaErrors( hipMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),hipMemcpyHostToDevice)); hipMemset(P1, 0, ImSize*sizeof(float)); hipMemset(P2, 0, ImSize*sizeof(float)); hipMemset(P3, 0, ImSize*sizeof(float)); hipMemset(P1_prev, 0, ImSize*sizeof(float)); hipMemset(P2_prev, 0, ImSize*sizeof(float)); hipMemset(P3_prev, 0, ImSize*sizeof(float)); hipMemset(R1, 0, ImSize*sizeof(float)); hipMemset(R2, 0, ImSize*sizeof(float)); hipMemset(R3, 0, ImSize*sizeof(float)); hipMemset(InputRef_x, 0, ImSize*sizeof(float)); hipMemset(InputRef_y, 0, ImSize*sizeof(float)); hipMemset(InputRef_z, 0, ImSize*sizeof(float)); /********************** Run CUDA 3D kernel here ********************/ multip = (1.0f/(26.0f*lambdaPar)); /* calculate gradient vectors for the reference */ hipLaunchKernelGGL(( GradNorm_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_InputRef, InputRef_x, InputRef_y, InputRef_z, eta, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/ hipLaunchKernelGGL(( ProjectVect_func3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* computing the gradient of the objective function */ hipLaunchKernelGGL(( Obj_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, R1, R2, R3, dimX, dimY, dimZ, ImSize, lambdaPar); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); if (nonneg != 0) { hipLaunchKernelGGL(( dTVnonneg3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ hipLaunchKernelGGL(( Grad_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, d_update, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize, multip); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); /* projection step */ if (methodTV == 0)hipLaunchKernelGGL(( Proj_dfunc3D_iso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */ elsehipLaunchKernelGGL(( Proj_dfunc3D_aniso_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */ checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); hipLaunchKernelGGL(( Rupd_dfunc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, multip2, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P1, P1_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P2, P2_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); hipLaunchKernelGGL(( dTVcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, P3, P3_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ hipLaunchKernelGGL(( dTVResidCalc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, P1, dimX, dimY, dimZ, ImSize); checkCudaErrors( hipDeviceSynchronize() ); checkCudaErrors(hipPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory hipMemcpy(Output,d_update,ImSize*sizeof(float),hipMemcpyDeviceToHost); hipFree(d_input); hipFree(d_update); if (epsil != 0.0f) hipFree(d_update_prev); hipFree(P1); hipFree(P2); hipFree(P3); hipFree(P1_prev); hipFree(P2_prev); hipFree(P3_prev); hipFree(R1); hipFree(R2); hipFree(R3); hipFree(InputRef_x); hipFree(InputRef_y); hipFree(InputRef_z); hipFree(d_InputRef); } /*adding info into info_vector */ infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ hipDeviceSynchronize(); return 0; }
6c55997d42b5655fc3c84e5c9cad75e5181a546d.cu
/* This work is part of the Core Imaging Library developed by Visual Analytics and Imaging System Group of the Science Technology Facilities Council, STFC Copyright 2017 Daniil Kazantsev Copyright 2017 Srikanth Nagella, Edoardo Pasca Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "shared.h" #include "dTV_FGP_GPU_core.h" #include <thrust/functional.h> #include <thrust/device_vector.h> #include <thrust/transform_reduce.h> /* CUDA implementation of FGP-dTV [1,2] denoising/regularization model (2D/3D case) * which employs structural similarity of the level sets of two images/volumes, see [1,2] * The current implementation updates image 1 while image 2 is being fixed. * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. Additional reference image/volume of the same dimensions as (1) [REQUIRED] * 3. lambdaPar - regularization parameter [REQUIRED] * 4. Number of iterations [OPTIONAL] * 5. eplsilon: tolerance constant [OPTIONAL] * 6. eta: smoothing constant to calculate gradient of the reference [OPTIONAL] * * 7. TV-type: methodTV - 'iso' (0) or 'l1' (1) [OPTIONAL] * 8. nonneg: 'nonnegativity (0 is OFF by default) [OPTIONAL] * 9. GPU device number if for multigpu run (default 0) [OPTIONAL] * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the Matlab's codes and papers by * [1] Amir Beck and Marc Teboulle, "Fast Gradient-Based Algorithms for Constrained Total Variation Image Denoising and Deblurring Problems" * [2] M. J. Ehrhardt and M. M. Betcke, Multi-Contrast MRI Reconstruction with Structure-Guided Total Variation, SIAM Journal on Imaging Sciences 9(3), pp. 1084–1106 */ #define BLKXSIZE2D 16 #define BLKYSIZE2D 16 #define BLKXSIZE 8 #define BLKYSIZE 8 #define BLKZSIZE 8 #define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) ) //struct square { __host__ __device__ float operator()(float x) { return x * x; } }; /************************************************/ /*****************2D modules*********************/ /************************************************/ __global__ void GradNorm_func2D_kernel(float *Refd, float *Refd_x, float *Refd_y, float eta, int N, int M, int ImSize) { float val1, val2, gradX, gradY, magn; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = Refd[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = Refd[(xIndex) + N*(yIndex + 1)]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; } return; } __global__ void ProjectVect_func2D_kernel(float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize) { float in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; } return; } __global__ void Obj_dfunc2D_kernel(float *Ad, float *D, float *R1, float *R2, int N, int M, int ImSize, float lambda) { float val1,val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { if (xIndex <= 0) {val1 = 0.0f;} else {val1 = R1[(xIndex-1) + N*yIndex];} if (yIndex <= 0) {val2 = 0.0f;} else {val2 = R2[xIndex + N*(yIndex-1)];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] - val1 - val2); } return; } __global__ void Grad_dfunc2D_kernel(float *P1, float *P2, float *D, float *R1, float *R2, float *Refd_x, float *Refd_y, int N, int M, int ImSize, float multip) { float val1,val2,in_prod; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { /* boundary conditions */ if (xIndex >= N-1) val1 = 0.0f; else val1 = D[index] - D[(xIndex+1) + N*yIndex]; if (yIndex >= M-1) val2 = 0.0f; else val2 = D[index] - D[(xIndex) + N*(yIndex + 1)]; in_prod = val1*Refd_x[index] + val2*Refd_y[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; } return; } __global__ void Proj_dfunc2D_iso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float denom; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { denom = pow(P1[index],2) + pow(P2[index],2); if (denom > 1.0f) { P1[index] = P1[index]/sqrt(denom); P2[index] = P2[index]/sqrt(denom); } } return; } __global__ void Proj_dfunc2D_aniso_kernel(float *P1, float *P2, int N, int M, int ImSize) { float val1, val2; //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { val1 = abs(P1[index]); val2 = abs(P2[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; } return; } __global__ void Rupd_dfunc2D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *R1, float *R2, float tkp1, float tk, float multip2, int N, int M, int ImSize) { //calculate each thread global index const int xIndex=blockIdx.x*blockDim.x+threadIdx.x; const int yIndex=blockIdx.y*blockDim.y+threadIdx.y; int index = xIndex + N*yIndex; if ((xIndex < N) && (yIndex < M)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); } return; } __global__ void dTVnonneg2D_kernel(float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } __global__ void dTVcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input[index]; } } __global__ void dTVResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total) { int xIndex = blockDim.x * blockIdx.x + threadIdx.x; int yIndex = blockDim.y * blockIdx.y + threadIdx.y; int index = xIndex + N*yIndex; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } __global__ void dTVResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { Output[index] = Input1[index] - Input2[index]; } } /************************************************/ /*****************3D modules*********************/ /************************************************/ __global__ void GradNorm_func3D_kernel(float *Refd, float *Refd_x, float *Refd_y, float *Refd_z, float eta, int N, int M, int Z, int ImSize) { float val1, val2, val3, gradX, gradY, gradZ, magn; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = Refd[(N*M)*k + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = Refd[(N*M)*k + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = Refd[(N*M)*(k+1) + i + N*j]; gradX = val1 - Refd[index]; gradY = val2 - Refd[index]; gradZ = val3 - Refd[index]; magn = pow(gradX,2) + pow(gradY,2) + pow(gradZ,2); magn = sqrt(magn + pow(eta,2)); Refd_x[index] = gradX/magn; Refd_y[index] = gradY/magn; Refd_z[index] = gradZ/magn; } return; } __global__ void ProjectVect_func3D_kernel(float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize) { float in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { in_prod = R1[index]*Refd_x[index] + R2[index]*Refd_y[index] + R3[index]*Refd_z[index]; /* calculate inner product */ R1[index] = R1[index] - in_prod*Refd_x[index]; R2[index] = R2[index] - in_prod*Refd_y[index]; R3[index] = R3[index] - in_prod*Refd_z[index]; } return; } __global__ void Obj_dfunc3D_kernel(float *Ad, float *D, float *R1, float *R2, float *R3, int N, int M, int Z, int ImSize, float lambda) { float val1,val2,val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { if (i <= 0) {val1 = 0.0f;} else {val1 = R1[(N*M)*(k) + (i-1) + N*j];} if (j <= 0) {val2 = 0.0f;} else {val2 = R2[(N*M)*(k) + i + N*(j-1)];} if (k <= 0) {val3 = 0.0f;} else {val3 = R3[(N*M)*(k-1) + i + N*j];} //Write final result to global memory D[index] = Ad[index] - lambda*(R1[index] + R2[index] + R3[index] - val1 - val2 - val3); } return; } __global__ void Grad_dfunc3D_kernel(float *P1, float *P2, float *P3, float *D, float *R1, float *R2, float *R3, float *Refd_x, float *Refd_y, float *Refd_z, int N, int M, int Z, int ImSize, float multip) { float val1,val2,val3,in_prod; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { /* boundary conditions */ if (i >= N-1) val1 = 0.0f; else val1 = D[index] - D[(N*M)*(k) + (i+1) + N*j]; if (j >= M-1) val2 = 0.0f; else val2 = D[index] - D[(N*M)*(k) + i + N*(j+1)]; if (k >= Z-1) val3 = 0.0f; else val3 = D[index] - D[(N*M)*(k+1) + i + N*j]; in_prod = val1*Refd_x[index] + val2*Refd_y[index] + val3*Refd_z[index]; /* calculate inner product */ val1 = val1 - in_prod*Refd_x[index]; val2 = val2 - in_prod*Refd_y[index]; val3 = val3 - in_prod*Refd_z[index]; //Write final result to global memory P1[index] = R1[index] + multip*val1; P2[index] = R2[index] + multip*val2; P3[index] = R3[index] + multip*val3; } return; } __global__ void Proj_dfunc3D_iso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float denom,sq_denom; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { denom = pow(P1[index],2) + pow(P2[index],2) + pow(P3[index],2); if (denom > 1.0f) { sq_denom = 1.0f/sqrt(denom); P1[index] = P1[index]*sq_denom; P2[index] = P2[index]*sq_denom; P3[index] = P3[index]*sq_denom; } } return; } __global__ void Proj_dfunc3D_aniso_kernel(float *P1, float *P2, float *P3, int N, int M, int Z, int ImSize) { float val1, val2, val3; //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { val1 = abs(P1[index]); val2 = abs(P2[index]); val3 = abs(P3[index]); if (val1 < 1.0f) {val1 = 1.0f;} if (val2 < 1.0f) {val2 = 1.0f;} if (val3 < 1.0f) {val3 = 1.0f;} P1[index] = P1[index]/val1; P2[index] = P2[index]/val2; P3[index] = P3[index]/val3; } return; } __global__ void Rupd_dfunc3D_kernel(float *P1, float *P1_old, float *P2, float *P2_old, float *P3, float *P3_old, float *R1, float *R2, float *R3, float tkp1, float tk, float multip2, int N, int M, int Z, int ImSize) { //calculate each thread global index int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if ((i < N) && (j < M) && (k < Z)) { R1[index] = P1[index] + multip2*(P1[index] - P1_old[index]); R2[index] = P2[index] + multip2*(P2[index] - P2_old[index]); R3[index] = P3[index] + multip2*(P3[index] - P3_old[index]); } return; } __global__ void dTVnonneg3D_kernel(float* Output, int N, int M, int Z, int num_total) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int k = blockDim.z * blockIdx.z + threadIdx.z; int index = (N*M)*k + i + N*j; if (index < num_total) { if (Output[index] < 0.0f) Output[index] = 0.0f; } } /*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ ////////////MAIN HOST FUNCTION /////////////// extern "C" int dTV_FGP_GPU_main(float *Input, float *InputRef, float *Output, float *infovector, float lambdaPar, int iter, float epsil, float eta, int methodTV, int nonneg, int gpu_device, int dimX, int dimY, int dimZ) { int deviceCount = -1; // number of devices cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "No CUDA devices found\n"); return -1; } checkCudaErrors(cudaSetDevice(gpu_device)); int count = 0, i; float re, multip,multip2; re = 0.0f; float tk = 1.0f; float tkp1=1.0f; if (dimZ <= 1) { /*2D verson*/ int ImSize = dimX*dimY; float *d_input, *d_update=NULL, *d_update_prev=NULL, *P1=NULL, *P2=NULL, *P1_prev=NULL, *P2_prev=NULL, *R1=NULL, *R2=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D); dim3 dimGrid(idivup(dimX,BLKXSIZE2D), idivup(dimY,BLKYSIZE2D)); /*allocate space for images on device*/ checkCudaErrors( cudaMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( cudaMemcpy(d_input,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),cudaMemcpyHostToDevice)); cudaMemset(P1, 0, ImSize*sizeof(float)); cudaMemset(P2, 0, ImSize*sizeof(float)); cudaMemset(P1_prev, 0, ImSize*sizeof(float)); cudaMemset(P2_prev, 0, ImSize*sizeof(float)); cudaMemset(R1, 0, ImSize*sizeof(float)); cudaMemset(R2, 0, ImSize*sizeof(float)); cudaMemset(InputRef_x, 0, ImSize*sizeof(float)); cudaMemset(InputRef_y, 0, ImSize*sizeof(float)); /******************** Run CUDA 2D kernel here ********************/ multip = (1.0f/(8.0f*lambdaPar)); /* calculate gradient vectors for the reference */ GradNorm_func2D_kernel<<<dimGrid,dimBlock>>>(d_InputRef, InputRef_x, InputRef_y, eta, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*projects a 2D vector field R-1,2 onto the orthogonal complement of another 2D vector field InputRef_xy*/ ProjectVect_func2D_kernel<<<dimGrid,dimBlock>>>(R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* computing the gradient of the objective function */ Obj_dfunc2D_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, R1, R2, dimX, dimY, ImSize, lambdaPar); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); if (nonneg != 0) { dTVnonneg2D_kernel<<<dimGrid,dimBlock>>>(d_update, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ Grad_dfunc2D_kernel<<<dimGrid,dimBlock>>>(P1, P2, d_update, R1, R2, InputRef_x, InputRef_y, dimX, dimY, ImSize, multip); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* projection step */ if (methodTV == 0) Proj_dfunc2D_iso_kernel<<<dimGrid,dimBlock>>>(P1, P2, dimX, dimY, ImSize); /*isotropic TV*/ else Proj_dfunc2D_aniso_kernel<<<dimGrid,dimBlock>>>(P1, P2, dimX, dimY, ImSize); /*anisotropic TV*/ checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); Rupd_dfunc2D_kernel<<<dimGrid,dimBlock>>>(P1, P1_prev, P2, P2_prev, R1, R2, tkp1, tk, multip2, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(P1, P1_prev, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel2D<<<dimGrid,dimBlock>>>(P2, P2_prev, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ dTVResidCalc2D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, P1, dimX, dimY, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory cudaMemcpy(Output,d_update,ImSize*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(d_update); if (epsil != 0.0f) cudaFree(d_update_prev); cudaFree(P1); cudaFree(P2); cudaFree(P1_prev); cudaFree(P2_prev); cudaFree(R1); cudaFree(R2); cudaFree(d_InputRef); cudaFree(InputRef_x); cudaFree(InputRef_y); } else { /*3D verson*/ int ImSize = dimX*dimY*dimZ; float *d_input, *d_update=NULL, *d_update_prev, *P1=NULL, *P2=NULL, *P3=NULL, *P1_prev=NULL, *P2_prev=NULL, *P3_prev=NULL, *R1=NULL, *R2=NULL, *R3=NULL, *InputRef_x=NULL, *InputRef_y=NULL, *InputRef_z=NULL, *d_InputRef=NULL; dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE); dim3 dimGrid(idivup(dimX,BLKXSIZE), idivup(dimY,BLKYSIZE),idivup(dimZ,BLKZSIZE)); /*allocate space for images on device*/ checkCudaErrors( cudaMalloc((void**)&d_input,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_update,ImSize*sizeof(float)) ); if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P3,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P1_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P2_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&P3_prev,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R1,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R2,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&R3,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&d_InputRef,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_x,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_y,ImSize*sizeof(float)) ); checkCudaErrors( cudaMalloc((void**)&InputRef_z,ImSize*sizeof(float)) ); checkCudaErrors( cudaMemcpy(d_input,Input,ImSize*sizeof(float),cudaMemcpyHostToDevice)); checkCudaErrors( cudaMemcpy(d_InputRef,InputRef,ImSize*sizeof(float),cudaMemcpyHostToDevice)); cudaMemset(P1, 0, ImSize*sizeof(float)); cudaMemset(P2, 0, ImSize*sizeof(float)); cudaMemset(P3, 0, ImSize*sizeof(float)); cudaMemset(P1_prev, 0, ImSize*sizeof(float)); cudaMemset(P2_prev, 0, ImSize*sizeof(float)); cudaMemset(P3_prev, 0, ImSize*sizeof(float)); cudaMemset(R1, 0, ImSize*sizeof(float)); cudaMemset(R2, 0, ImSize*sizeof(float)); cudaMemset(R3, 0, ImSize*sizeof(float)); cudaMemset(InputRef_x, 0, ImSize*sizeof(float)); cudaMemset(InputRef_y, 0, ImSize*sizeof(float)); cudaMemset(InputRef_z, 0, ImSize*sizeof(float)); /********************** Run CUDA 3D kernel here ********************/ multip = (1.0f/(26.0f*lambdaPar)); /* calculate gradient vectors for the reference */ GradNorm_func3D_kernel<<<dimGrid,dimBlock>>>(d_InputRef, InputRef_x, InputRef_y, InputRef_z, eta, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* The main kernel */ for (i = 0; i < iter; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) { dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*projects a 3D vector field R-1,2,3 onto the orthogonal complement of another 3D vector field InputRef_xyz*/ ProjectVect_func3D_kernel<<<dimGrid,dimBlock>>>(R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* computing the gradient of the objective function */ Obj_dfunc3D_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, R1, R2, R3, dimX, dimY, dimZ, ImSize, lambdaPar); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); if (nonneg != 0) { dTVnonneg3D_kernel<<<dimGrid,dimBlock>>>(d_update, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); } /*Taking a step towards minus of the gradient*/ Grad_dfunc3D_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, d_update, R1, R2, R3, InputRef_x, InputRef_y, InputRef_z, dimX, dimY, dimZ, ImSize, multip); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); /* projection step */ if (methodTV == 0) Proj_dfunc3D_iso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* isotropic kernel */ else Proj_dfunc3D_aniso_kernel<<<dimGrid,dimBlock>>>(P1, P2, P3, dimX, dimY, dimZ, ImSize); /* anisotropic kernel */ checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tkp1 = (1.0f + sqrt(1.0f + 4.0f*tk*tk))*0.5f; multip2 = ((tk-1.0f)/tkp1); Rupd_dfunc3D_kernel<<<dimGrid,dimBlock>>>(P1, P1_prev, P2, P2_prev, P3, P3_prev, R1, R2, R3, tkp1, tk, multip2, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P1, P1_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P2, P2_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); dTVcopy_kernel3D<<<dimGrid,dimBlock>>>(P3, P3_prev, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); tk = tkp1; if ((epsil != 0.0f) && (i % 5 == 0)) { /* calculate norm - stopping rules using the Thrust library */ dTVResidCalc3D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, P1, dimX, dimY, dimZ, ImSize); checkCudaErrors( cudaDeviceSynchronize() ); checkCudaErrors(cudaPeekAtLastError() ); // setup arguments square<float> unary_op; thrust::plus<float> binary_op; thrust::device_vector<float> d_vec(P1, P1 + ImSize); float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op)); thrust::device_vector<float> d_vec2(d_update, d_update + ImSize); float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op)); // compute norm re = (reduction/reduction2); if (re < epsil) count++; if (count > 3) break; } } /***************************************************************/ //copy result matrix from device to host memory cudaMemcpy(Output,d_update,ImSize*sizeof(float),cudaMemcpyDeviceToHost); cudaFree(d_input); cudaFree(d_update); if (epsil != 0.0f) cudaFree(d_update_prev); cudaFree(P1); cudaFree(P2); cudaFree(P3); cudaFree(P1_prev); cudaFree(P2_prev); cudaFree(P3_prev); cudaFree(R1); cudaFree(R2); cudaFree(R3); cudaFree(InputRef_x); cudaFree(InputRef_y); cudaFree(InputRef_z); cudaFree(d_InputRef); } /*adding info into info_vector */ infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ cudaDeviceSynchronize(); return 0; }
e5a4f4ff13af9ab84eec22f9995c3fe81b3219ba.hip
// !!! This is a file automatically generated by hipify!!! //nvcc vetor_runtime.cu -arch=sm_20 -o vetor #include <iostream> #include <cstdlib> #include <cstring> #include <hip/hip_runtime.h> #include <cstdio> #include <fstream> #include <cmath> #include <ctime> #define DADOS_PATH "../DadosHistoricos/tabela_YHOO_bollinger.bin" #define TAMANHO_PADRAO 10 #define DESVIOS_PADRAO 0.5 #define TAMANHO_MAXIMO 90 #define DESVIOS_MAXIMO 4 #define TAMANHO_INCREMENTO 1 #define DESVIOS_INCREMENTO 0.1 #define TAMANHO_INICIAL 2 #define DESVIOS_INICIAL 0.0 #define CAPITAL_INICIAL 0.0 #define CHECK_ERROR(call) do { \ if( hipSuccess != call) { \ std::cerr << std::endl << "CUDA ERRO: " << \ hipGetErrorString(call) << " in file: " << __FILE__ \ << " in line: " << __LINE__ << std::endl; \ exit(0); \ } } while (0) using namespace std; __device__ float calcula_media(float* historico, int ini, int fim) { int i; float sum = 0.0; for(i=ini; i<fim; i++) { sum+= historico[i]; } return sum/(fim-ini); } __device__ float calcula_desvio(float* historico, int ini, int fim, float media) { int i; float sum = 0.0; for(i=ini; i<fim; i++) { // sum+= pow((historico[i]-media),2.0); sum+= ((historico[i]-media)*(historico[i]-media)); } sum = sum/(fim-ini); sum = sqrt(sum); return sum; } __global__ void trader_thread(float* historico, int* tamanhos_janelas, float* num_desvios, float* capital) { int i = blockDim.x * blockIdx.x + threadIdx.x; //??????????????????? int tamanho = 0; int dia_corrente = tamanhos_janelas[i]; int dia_anterior = tamanhos_janelas[i] - 1; float media_movel; float desvio; while(dia_corrente<(tamanho + 1)) { media_movel = calcula_media(historico, dia_corrente - tamanhos_janelas[i], dia_corrente); desvio = calcula_desvio(historico, dia_corrente - tamanhos_janelas[i], dia_corrente, media_movel); if(historico[dia_anterior] >= media_movel + (num_desvios[i])*desvio) { (capital[i])+= (historico[dia_corrente]); } else if(historico[dia_anterior] <= media_movel - (num_desvios[i])*desvio) { (capital[i])-= (historico[dia_corrente]); } dia_corrente++; dia_anterior++; } } int main (int argc, char **argv) { FILE * pFile; int h_tamanho; int* h_tamanhos_janelas = NULL; int* d_tamanhos_janelas = NULL; float* h_close_prices_vet = NULL; float* h_num_desvios = NULL; float* h_capital = NULL; float* d_close_prices_vet = NULL; float* d_num_desvios = NULL; float* d_capital = NULL; int number_of_threads; if (argc != 1) { return EXIT_FAILURE; } else { pFile = fopen(DADOS_PATH , "rb" ); if (pFile==NULL) { cout<<"Could not open file.\n" << "File error",stderr; return EXIT_FAILURE; } else { fread(&h_tamanho,sizeof(int),1,pFile); h_close_prices_vet = (float*) malloc(sizeof(float)*h_tamanho); fread(h_close_prices_vet,sizeof(float), h_tamanho,pFile); fclose (pFile); number_of_threads = ((TAMANHO_MAXIMO - TAMANHO_INICIAL)/TAMANHO_INCREMENTO + 1) * ((DESVIOS_MAXIMO - DESVIOS_INICIAL)/DESVIOS_INCREMENTO + 1); h_tamanhos_janelas = (int*) malloc(sizeof(int)*number_of_threads); h_num_desvios = (float*) malloc(sizeof(float)*number_of_threads); h_capital = (float*) malloc(sizeof(float)*number_of_threads); int i = 0; for (int w = TAMANHO_INICIAL; w <= (int) TAMANHO_MAXIMO ; w+=TAMANHO_INCREMENTO) { for (float d = DESVIOS_INICIAL; d <= (int) DESVIOS_MAXIMO; d+=DESVIOS_INCREMENTO) { h_tamanhos_janelas[i] = w; h_num_desvios[i] = d; h_capital[i] = CAPITAL_INICIAL; i++; } } float elapsedTimeGPU = 0.0f, elapsedTimeMEM = 0.0f; hipEvent_t e_Start,e_Stop; size_t free = 0, total = 0; cout << endl << "CUDA runtime versao: " << CUDART_VERSION << endl; //Reset no device CHECK_ERROR(hipDeviceReset()); //Verificando espao livre em memria CHECK_ERROR(hipMemGetInfo(&free, &total)); cout << "Memoria livre: " << (free / 1024 / 1024) << " MB\n"; cout << "Memoria total: " << (total / 1024 / 1024) << " MB\n"; //Criando eventos CHECK_ERROR(hipEventCreate(&e_Start)); CHECK_ERROR(hipEventCreate(&e_Stop)); //Aloca memria GPU CHECK_ERROR(hipMalloc((void**) &d_close_prices_vet, h_tamanho * sizeof(float))); CHECK_ERROR(hipMalloc((void**) &d_tamanhos_janelas, number_of_threads * sizeof(int))); CHECK_ERROR(hipMalloc((void**) &d_num_desvios, number_of_threads * sizeof(float))); CHECK_ERROR(hipMalloc((void**) &d_capital, number_of_threads * sizeof(float))); //Copiando CPU --> GPU CHECK_ERROR(hipEventRecord(e_Start, hipEventDefault)); CHECK_ERROR(hipMemcpy(d_close_prices_vet, h_close_prices_vet, h_tamanho * sizeof(float), hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(d_tamanhos_janelas, h_tamanhos_janelas, number_of_threads * sizeof(float), hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(d_num_desvios, h_num_desvios, number_of_threads * sizeof(float), hipMemcpyHostToDevice)); CHECK_ERROR(hipMemcpy(d_capital, h_capital, number_of_threads * sizeof(float), hipMemcpyHostToDevice)); CHECK_ERROR(hipEventRecord(e_Stop, hipEventDefault)); CHECK_ERROR(hipEventSynchronize(e_Stop)); CHECK_ERROR(hipEventElapsedTime(&elapsedTimeMEM, e_Start, e_Stop)); CHECK_ERROR(hipEventRecord(e_Start, hipEventDefault)); int numBlocks = 2; //???????????????????????? int threadsPerBlock = number_of_threads / numBlocks; //???????????????????????? hipLaunchKernelGGL(( trader_thread), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_close_prices_vet, d_tamanhos_janelas, d_num_desvios,d_capital); CHECK_ERROR(hipDeviceSynchronize()); CHECK_ERROR(hipEventRecord(e_Stop, hipEventDefault)); CHECK_ERROR(hipEventSynchronize(e_Stop)); CHECK_ERROR(hipEventElapsedTime(&elapsedTimeGPU, e_Start, e_Stop)); //Copiando GPU --> CPU float elapsedTime = 0.0f; CHECK_ERROR(hipEventRecord(e_Start, hipEventDefault)); CHECK_ERROR(hipMemcpy(h_capital, d_capital, number_of_threads * sizeof(float), hipMemcpyDeviceToHost)); CHECK_ERROR(hipEventRecord(e_Stop, hipEventDefault)); CHECK_ERROR(hipEventSynchronize(e_Stop)); CHECK_ERROR(hipEventElapsedTime(&elapsedTime, e_Start, e_Stop)); elapsedTimeMEM += elapsedTime; cout << endl << "Tempo gasto [MEM]: " << elapsedTimeMEM << " (ms)" << endl; cout << endl << "Tempo gasto [GPU]: " << elapsedTimeGPU << " (ms)" << endl; cout << endl << "Resultado: "<< endl; CHECK_ERROR(hipFree(d_close_prices_vet)); CHECK_ERROR(hipFree(d_tamanhos_janelas)); CHECK_ERROR(hipFree(d_num_desvios)); CHECK_ERROR(hipFree(d_capital)); delete [] h_close_prices_vet; delete [] h_tamanhos_janelas; delete [] h_num_desvios; delete [] h_capital; // free(h_close_prices_vet); // free(h_tamanhos_janelas); // free(h_num_desvios); // free(h_capital); cout << "FIM" << endl; return EXIT_SUCCESS; } } }
e5a4f4ff13af9ab84eec22f9995c3fe81b3219ba.cu
//nvcc vetor_runtime.cu -arch=sm_20 -o vetor #include <iostream> #include <cstdlib> #include <cstring> #include <cuda_runtime.h> #include <cstdio> #include <fstream> #include <cmath> #include <ctime> #define DADOS_PATH "../DadosHistoricos/tabela_YHOO_bollinger.bin" #define TAMANHO_PADRAO 10 #define DESVIOS_PADRAO 0.5 #define TAMANHO_MAXIMO 90 #define DESVIOS_MAXIMO 4 #define TAMANHO_INCREMENTO 1 #define DESVIOS_INCREMENTO 0.1 #define TAMANHO_INICIAL 2 #define DESVIOS_INICIAL 0.0 #define CAPITAL_INICIAL 0.0 #define CHECK_ERROR(call) do { \ if( cudaSuccess != call) { \ std::cerr << std::endl << "CUDA ERRO: " << \ cudaGetErrorString(call) << " in file: " << __FILE__ \ << " in line: " << __LINE__ << std::endl; \ exit(0); \ } } while (0) using namespace std; __device__ float calcula_media(float* historico, int ini, int fim) { int i; float sum = 0.0; for(i=ini; i<fim; i++) { sum+= historico[i]; } return sum/(fim-ini); } __device__ float calcula_desvio(float* historico, int ini, int fim, float media) { int i; float sum = 0.0; for(i=ini; i<fim; i++) { // sum+= pow((historico[i]-media),2.0); sum+= ((historico[i]-media)*(historico[i]-media)); } sum = sum/(fim-ini); sum = sqrt(sum); return sum; } __global__ void trader_thread(float* historico, int* tamanhos_janelas, float* num_desvios, float* capital) { int i = blockDim.x * blockIdx.x + threadIdx.x; //??????????????????? int tamanho = 0; int dia_corrente = tamanhos_janelas[i]; int dia_anterior = tamanhos_janelas[i] - 1; float media_movel; float desvio; while(dia_corrente<(tamanho + 1)) { media_movel = calcula_media(historico, dia_corrente - tamanhos_janelas[i], dia_corrente); desvio = calcula_desvio(historico, dia_corrente - tamanhos_janelas[i], dia_corrente, media_movel); if(historico[dia_anterior] >= media_movel + (num_desvios[i])*desvio) { (capital[i])+= (historico[dia_corrente]); } else if(historico[dia_anterior] <= media_movel - (num_desvios[i])*desvio) { (capital[i])-= (historico[dia_corrente]); } dia_corrente++; dia_anterior++; } } int main (int argc, char **argv) { FILE * pFile; int h_tamanho; int* h_tamanhos_janelas = NULL; int* d_tamanhos_janelas = NULL; float* h_close_prices_vet = NULL; float* h_num_desvios = NULL; float* h_capital = NULL; float* d_close_prices_vet = NULL; float* d_num_desvios = NULL; float* d_capital = NULL; int number_of_threads; if (argc != 1) { return EXIT_FAILURE; } else { pFile = fopen(DADOS_PATH , "rb" ); if (pFile==NULL) { cout<<"Could not open file.\n" << "File error",stderr; return EXIT_FAILURE; } else { fread(&h_tamanho,sizeof(int),1,pFile); h_close_prices_vet = (float*) malloc(sizeof(float)*h_tamanho); fread(h_close_prices_vet,sizeof(float), h_tamanho,pFile); fclose (pFile); number_of_threads = ((TAMANHO_MAXIMO - TAMANHO_INICIAL)/TAMANHO_INCREMENTO + 1) * ((DESVIOS_MAXIMO - DESVIOS_INICIAL)/DESVIOS_INCREMENTO + 1); h_tamanhos_janelas = (int*) malloc(sizeof(int)*number_of_threads); h_num_desvios = (float*) malloc(sizeof(float)*number_of_threads); h_capital = (float*) malloc(sizeof(float)*number_of_threads); int i = 0; for (int w = TAMANHO_INICIAL; w <= (int) TAMANHO_MAXIMO ; w+=TAMANHO_INCREMENTO) { for (float d = DESVIOS_INICIAL; d <= (int) DESVIOS_MAXIMO; d+=DESVIOS_INCREMENTO) { h_tamanhos_janelas[i] = w; h_num_desvios[i] = d; h_capital[i] = CAPITAL_INICIAL; i++; } } float elapsedTimeGPU = 0.0f, elapsedTimeMEM = 0.0f; cudaEvent_t e_Start,e_Stop; size_t free = 0, total = 0; cout << endl << "CUDA runtime versao: " << CUDART_VERSION << endl; //Reset no device CHECK_ERROR(cudaDeviceReset()); //Verificando espaço livre em memória CHECK_ERROR(cudaMemGetInfo(&free, &total)); cout << "Memoria livre: " << (free / 1024 / 1024) << " MB\n"; cout << "Memoria total: " << (total / 1024 / 1024) << " MB\n"; //Criando eventos CHECK_ERROR(cudaEventCreate(&e_Start)); CHECK_ERROR(cudaEventCreate(&e_Stop)); //Aloca memória GPU CHECK_ERROR(cudaMalloc((void**) &d_close_prices_vet, h_tamanho * sizeof(float))); CHECK_ERROR(cudaMalloc((void**) &d_tamanhos_janelas, number_of_threads * sizeof(int))); CHECK_ERROR(cudaMalloc((void**) &d_num_desvios, number_of_threads * sizeof(float))); CHECK_ERROR(cudaMalloc((void**) &d_capital, number_of_threads * sizeof(float))); //Copiando CPU --> GPU CHECK_ERROR(cudaEventRecord(e_Start, cudaEventDefault)); CHECK_ERROR(cudaMemcpy(d_close_prices_vet, h_close_prices_vet, h_tamanho * sizeof(float), cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(d_tamanhos_janelas, h_tamanhos_janelas, number_of_threads * sizeof(float), cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(d_num_desvios, h_num_desvios, number_of_threads * sizeof(float), cudaMemcpyHostToDevice)); CHECK_ERROR(cudaMemcpy(d_capital, h_capital, number_of_threads * sizeof(float), cudaMemcpyHostToDevice)); CHECK_ERROR(cudaEventRecord(e_Stop, cudaEventDefault)); CHECK_ERROR(cudaEventSynchronize(e_Stop)); CHECK_ERROR(cudaEventElapsedTime(&elapsedTimeMEM, e_Start, e_Stop)); CHECK_ERROR(cudaEventRecord(e_Start, cudaEventDefault)); int numBlocks = 2; //???????????????????????? int threadsPerBlock = number_of_threads / numBlocks; //???????????????????????? trader_thread<<<numBlocks, threadsPerBlock>>> (d_close_prices_vet, d_tamanhos_janelas, d_num_desvios,d_capital); CHECK_ERROR(cudaDeviceSynchronize()); CHECK_ERROR(cudaEventRecord(e_Stop, cudaEventDefault)); CHECK_ERROR(cudaEventSynchronize(e_Stop)); CHECK_ERROR(cudaEventElapsedTime(&elapsedTimeGPU, e_Start, e_Stop)); //Copiando GPU --> CPU float elapsedTime = 0.0f; CHECK_ERROR(cudaEventRecord(e_Start, cudaEventDefault)); CHECK_ERROR(cudaMemcpy(h_capital, d_capital, number_of_threads * sizeof(float), cudaMemcpyDeviceToHost)); CHECK_ERROR(cudaEventRecord(e_Stop, cudaEventDefault)); CHECK_ERROR(cudaEventSynchronize(e_Stop)); CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, e_Start, e_Stop)); elapsedTimeMEM += elapsedTime; cout << endl << "Tempo gasto [MEM]: " << elapsedTimeMEM << " (ms)" << endl; cout << endl << "Tempo gasto [GPU]: " << elapsedTimeGPU << " (ms)" << endl; cout << endl << "Resultado: "<< endl; CHECK_ERROR(cudaFree(d_close_prices_vet)); CHECK_ERROR(cudaFree(d_tamanhos_janelas)); CHECK_ERROR(cudaFree(d_num_desvios)); CHECK_ERROR(cudaFree(d_capital)); delete [] h_close_prices_vet; delete [] h_tamanhos_janelas; delete [] h_num_desvios; delete [] h_capital; // free(h_close_prices_vet); // free(h_tamanhos_janelas); // free(h_num_desvios); // free(h_capital); cout << "FIM" << endl; return EXIT_SUCCESS; } } }
ad513f1910e60f81d9a7088a5a07dc7ae9893d72.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "blurnaive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrix = NULL; hipMalloc(&matrix, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); int firstFrame = 1; int numFrames = 1; int frameCount = 1; int max = 1; int length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( blurnaive), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,output,firstFrame,numFrames,frameCount,max,length); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( blurnaive), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,output,firstFrame,numFrames,frameCount,max,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( blurnaive), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,output,firstFrame,numFrames,frameCount,max,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ad513f1910e60f81d9a7088a5a07dc7ae9893d72.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "blurnaive.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *matrix = NULL; cudaMalloc(&matrix, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); int firstFrame = 1; int numFrames = 1; int frameCount = 1; int max = 1; int length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); blurnaive<<<gridBlock,threadBlock>>>(matrix,output,firstFrame,numFrames,frameCount,max,length); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { blurnaive<<<gridBlock,threadBlock>>>(matrix,output,firstFrame,numFrames,frameCount,max,length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { blurnaive<<<gridBlock,threadBlock>>>(matrix,output,firstFrame,numFrames,frameCount,max,length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
551554fc26ad4d038ee46203f08942417e0a0db5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <random> #define E 2.71828182845904523536 #define EPSILON 0.0005 #define INPUT_SIZE 2 #define HIDDEN_SIZE 10 #define OUTPUT_SIZE 1 #define NUM_HIDDEN_LAYERS 4 #define TRAIN_SET_SIZE 100 __device__ float sigmoid(float x) { return 1.0 / (1.0 + powf(E, -x)); } __device__ float sigmoidPrime(float x) { float sig = sigmoid(x); return sig * (1 - sig); } struct Neuron { float2 weights_signal[HIDDEN_SIZE]; float bias; float input; float signal; float delta; } float randomFloat0to1() { return ((float) rand()) / ((float) RAND_MAX); } Neuron createNeuron() { float2 weights_signal[HIDDEN_SIZE]; for (int i = 0; i < HIDDEN_SIZE; i++) { weights_signal[i] = float2(randomFloat0to1(), 0.0); } return Neuron{ weights_signal, 1.0, 0.0, 0.0, 0.0 }; } Neuron* createNeuronLayer(int size) { Neuron* layer = (Neuron*) malloc(sizeof(Neuron) * size); for (int i = 0; i < HIDDEN_SIZE; i++) { layer[i] = createNeuron(); } return layer; } __kernel__ void feedInput(Neuron* input_layer, float* inputs[INPUT_SIZE], int data_point, int input_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } float input = inputs[data_point][global_index]; input_layer[i].signal = input; } __kernel__ void computeLayer(Neuron* layer, Neuron* prev_layer, int layer_size, int prev_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } Neuron neuron = layer[global_index]; float sum = neuron.bias; for (int i = 0; i < prev_layer_size; i++) { float input = prev_layer[i].signal; float weight_x_signal = input * neuron.weights_signal[i].x; neuron.weights[i].y = weight_x_signal; sum += weight_x_signal; } neuron.input = sum; neuron.signal = sigmoid(sum); layer[global_index] = neuron; } __kernel__ void computeErrors(Neuron* output_layer, float* expected[OUTPUT_SIZE], float* errors, int data_point, int output_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index > output_layer_size) { return; } errors[global_index] = expected[data_point][global_index] - output_layer[global_index].input; } __kernel__ void reduceAbsSum(float* nums, int array_size) { int local_index = threadIdx.x; int global_index = local_index + (blockIdx.x * blockDim.x); if (global_index >= array_size) { return; } nums[local_index] = abs(nums[local_index]); __syncthreads(); for (unsigned int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (local_index < offset) { int right = local_index + offset; if (right + global_offset < array_size) { nums[local_index] += nums[right]; } } __syncthreads(); } } __kernel__ void computeOutputDeltas(float error, Neuron* output_layer, int output_layer_size) { int local_index = threadIdx.x; int global_offset = blockDim.x * blockIdx.x; int global_index = local_index + global_offset; if (global_index >= output_layer_size) { return; } Neuron neuron = output_layer[global_index]; neuron.delta = error * sigmoidPrime(neuron.input); } __kernel__ void computeDeltas(Neuron* layer, Neuron* next_layer, int layer_size, int next_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } float sum_delta_weights = 0.0; for (int i = 0; i < next_layer_size; i++) { Neuron infront = next_layer[i]; sum_delta_weights += infront.weights_signal[global_index].y * infront.delta; } layer[global_index].delta = sigmoidPrime(sum_delta_weights); } __kernel__ void adjustWeights(Neuron* layer, int layer_size, int prev_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } Neuron neuron = layer[global_index]; neuron.bias += 2 * 1 * neuron.delta; for (int i = 0; i < prev_layer_size; i++) { neuron.weights_signal[i].x += 2 * neuron.weights_signal[i].y * neuron.delta; } layer[global_index] = neuron; } int main() { srand((int) time(NULL)); Neuron* h_input_layer = createNeuronLayer(INPUT_SIZE); Neuron* h_hidden_layers[NUM_HIDDEN_LAYERS]; for(int i = 0; i < NUM_HIDDEN_LAYERS; i++) { h_hidden_layers[i] = createNeuronLayer[HIDDEN_SIZE]; } Neuron* h_output_layer = createNeuronLayer(OUTPUT_SIZE); Neuron* d_input_layer; Neuron* d_hidden_layers[NUM_HIDDEN_LAYERS]; Neuron* d_output_layer; hipMalloc(&d_input_layer, sizeof(Neuron) * INPUT_SIZE); hipMalloc(&d_hidden_layers, sizeof(NEURON) * HIDDEN_SIZE * NUM_HIDDEN_LAYERS); hipMalloc(&d_output_layer, sizeof(Neuron) * OUTPUT_SIZE); hipMemcpy(d_input_layer, h_input_layer, sizeof(Neuron) * INPUT_SIZE, hipMemcpyHostToDevice); hipMemcpy(d_hidden_layers, h_hidden_layers, sizeof(Neuron) * HIDDEN_SIZE * NUM_HIDDEN_LAYERS); hipMemcpy(d_output_layer, h_output_layer, sizeof(Neuron) * OUTPUT_SIZE, hipMemcpyHostToDevice); float* h_input[INPUT_SIZE] = (float*) malloc(sizeof(float) * INPUT_SIZE * TRAIN_SET_SIZE); float* d_input[INPUT_SIZE]; hipMalloc(&d_input, sizeof(float) * INPUT_SIZE * TRAIN_SET_SIZE); for (int i = 0; i < TRAIN_SET_SIZE; i++) { for (int j = 0; j < INPUT_SIZE; j++) { h_input[i][j] = randomFloat0to1(); } } hipMemcpy(d_input, h_input, sizeof(float) * INPUT_SIZE * TRAIN_SET_SIZE, hipMemcpyHostToDevice); float* h_expected[OUTPUT_SIZE] = (float*) malloc(sizeof(float) * OUTPUT_SIZE * TRAIN_SET_SIZE); float* d_expected[OUTPUT_SIZE]; hipMalloc(&d_expected, sizeof(float) * OUTPUT_SIZE * TRAIN_SET_SIZE); for (int i = 0; i < TRAIN_SET_SIZE; i++) { float sum = 0.0; for (int j = 0; j < INPUT_SIZE; j++) { sum += h_input[i][j]; } h_expected[i] = sum; } hipMemcpy(d_expected, h_expected, sizeof(float) * OUTPUT_SIZE * TRAIN_SET_SIZE, hipMemcpyHostToDevice); float* d_errors; hipMalloc(&d_errors, sizeof(float) * OUTPUT_SIZE); float avg_error = 999.9; for(int i = 0; i < 100; i++) { printf("Average Error: %f", avg_error); avg_error = 0.0; for (int j = 0; j < 10 * TRAIN_SET_SIZE; j++) { int data_point = j % TRAIN_SET_SIZE; hipLaunchKernelGGL(( feedInput), dim3(1), dim3(2), 0, 0, d_input_layer, d_input, data_point, INPUT_SIZE); hipLaunchKernelGGL(( computeLayer), dim3(1), dim3(HIDDEN_SIZE), 0, 0, d_hidden_layers[0], d_input_layer, HIDDEN_SIZE, HIDDEN_SIZE); for (int layer_num = 1; layer_num < NUM_HIDDEN_LAYERS; layer_num++) { hipLaunchKernelGGL(( computeLayer), dim3(1), dim3(HIDDEN_SIZE), 0, 0, d_hidden_layers[layer_num], d_hidden_layers[layer_num - 1], HIDDEN_SIZE, HIDDEN_SIZE); } hipLaunchKernelGGL(( computeLayer), dim3(1), dim3(HIDDEN_SIZE), 0, 0, d_output_layer, d_hidden_layers[NUM_HIDDEN_LAYERS - 1], OUTPUT_SIZE, HIDDEN_SIZE); hipLaunchKernelGGL(( computeErrors), dim3(1), dim3(OUTPUT_SIZE), 0, 0, d_output_layer, d_expected, d_errors, data_point, OUTPUT_SIZE); hipLaunchKernelGGL(( computeOutputDeltas), dim3(1), dim3(OUTPUT_SIZE), 0, 0, d_output_layer, d_errors, OUTPUT_SIZE); hipLaunchKernelGGL(( computeDeltas), dim3(1), dim3(HIDDEN_SIZE), 0, 0, d_hidden_layers[layer_num - 1], d_output_layer, HIDDEN_SIZE, OUTPUT_SIZE); for (int layer_num = layer_num - 2; layer_num >= 0; layer_num--) { hipLaunchKernelGGL(( computeDeltas), dim3(1), dim3(HIDDEN_SIZE), 0, 0, d_hidden_layers[layer_num], d_hidden_layers[layer_num + 1, HIDDEN_SIZE, HIDDEN_SIZE]); } hipLaunchKernelGGL(( adjustWeights), dim3(1), dim3(HIDDEN_SIZE), 0, 0, d_output_layer, OUTPUT_SIZE, HIDDEN_SIZE); for (int layer_num = NUM_HIDDEN_LAYERS - 1; i >= 0; layer_num--) { hipLaunchKernelGGL(( adjustWeights), dim3(1), dim3(HIDDEN_SIZE), 0, 0, d_hidden_layers[layer_num], HIDDEN_SIZE, HIDDEN_SIZE); } hipLaunchKernelGGL(( reduceAbsSum), dim3(1), dim3(OUTPUT_SIZE), 0, 0, d_errors, OUTPUT_SIZE); float sum_errors = 0.0; hipMemcpy(&sum_errors, d_errors[0], sizeof(float), hipMemcpyDeviceToHost); avg_error += sum_errors; } avg_error /= 10 * TRAIN_SET_SIZE; } }
551554fc26ad4d038ee46203f08942417e0a0db5.cu
#include <stdio.h> #include <random> #define E 2.71828182845904523536 #define EPSILON 0.0005 #define INPUT_SIZE 2 #define HIDDEN_SIZE 10 #define OUTPUT_SIZE 1 #define NUM_HIDDEN_LAYERS 4 #define TRAIN_SET_SIZE 100 __device__ float sigmoid(float x) { return 1.0 / (1.0 + powf(E, -x)); } __device__ float sigmoidPrime(float x) { float sig = sigmoid(x); return sig * (1 - sig); } struct Neuron { float2 weights_signal[HIDDEN_SIZE]; float bias; float input; float signal; float delta; } float randomFloat0to1() { return ((float) rand()) / ((float) RAND_MAX); } Neuron createNeuron() { float2 weights_signal[HIDDEN_SIZE]; for (int i = 0; i < HIDDEN_SIZE; i++) { weights_signal[i] = float2(randomFloat0to1(), 0.0); } return Neuron{ weights_signal, 1.0, 0.0, 0.0, 0.0 }; } Neuron* createNeuronLayer(int size) { Neuron* layer = (Neuron*) malloc(sizeof(Neuron) * size); for (int i = 0; i < HIDDEN_SIZE; i++) { layer[i] = createNeuron(); } return layer; } __kernel__ void feedInput(Neuron* input_layer, float* inputs[INPUT_SIZE], int data_point, int input_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } float input = inputs[data_point][global_index]; input_layer[i].signal = input; } __kernel__ void computeLayer(Neuron* layer, Neuron* prev_layer, int layer_size, int prev_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } Neuron neuron = layer[global_index]; float sum = neuron.bias; for (int i = 0; i < prev_layer_size; i++) { float input = prev_layer[i].signal; float weight_x_signal = input * neuron.weights_signal[i].x; neuron.weights[i].y = weight_x_signal; sum += weight_x_signal; } neuron.input = sum; neuron.signal = sigmoid(sum); layer[global_index] = neuron; } __kernel__ void computeErrors(Neuron* output_layer, float* expected[OUTPUT_SIZE], float* errors, int data_point, int output_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index > output_layer_size) { return; } errors[global_index] = expected[data_point][global_index] - output_layer[global_index].input; } __kernel__ void reduceAbsSum(float* nums, int array_size) { int local_index = threadIdx.x; int global_index = local_index + (blockIdx.x * blockDim.x); if (global_index >= array_size) { return; } nums[local_index] = abs(nums[local_index]); __syncthreads(); for (unsigned int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if (local_index < offset) { int right = local_index + offset; if (right + global_offset < array_size) { nums[local_index] += nums[right]; } } __syncthreads(); } } __kernel__ void computeOutputDeltas(float error, Neuron* output_layer, int output_layer_size) { int local_index = threadIdx.x; int global_offset = blockDim.x * blockIdx.x; int global_index = local_index + global_offset; if (global_index >= output_layer_size) { return; } Neuron neuron = output_layer[global_index]; neuron.delta = error * sigmoidPrime(neuron.input); } __kernel__ void computeDeltas(Neuron* layer, Neuron* next_layer, int layer_size, int next_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } float sum_delta_weights = 0.0; for (int i = 0; i < next_layer_size; i++) { Neuron infront = next_layer[i]; sum_delta_weights += infront.weights_signal[global_index].y * infront.delta; } layer[global_index].delta = sigmoidPrime(sum_delta_weights); } __kernel__ void adjustWeights(Neuron* layer, int layer_size, int prev_layer_size) { int local_index = threadIdx.x; int global_index = local_index + (blockDim.x * blockIdx.x); if (global_index >= layer_size) { return; } Neuron neuron = layer[global_index]; neuron.bias += 2 * 1 * neuron.delta; for (int i = 0; i < prev_layer_size; i++) { neuron.weights_signal[i].x += 2 * neuron.weights_signal[i].y * neuron.delta; } layer[global_index] = neuron; } int main() { srand((int) time(NULL)); Neuron* h_input_layer = createNeuronLayer(INPUT_SIZE); Neuron* h_hidden_layers[NUM_HIDDEN_LAYERS]; for(int i = 0; i < NUM_HIDDEN_LAYERS; i++) { h_hidden_layers[i] = createNeuronLayer[HIDDEN_SIZE]; } Neuron* h_output_layer = createNeuronLayer(OUTPUT_SIZE); Neuron* d_input_layer; Neuron* d_hidden_layers[NUM_HIDDEN_LAYERS]; Neuron* d_output_layer; cudaMalloc(&d_input_layer, sizeof(Neuron) * INPUT_SIZE); cudaMalloc(&d_hidden_layers, sizeof(NEURON) * HIDDEN_SIZE * NUM_HIDDEN_LAYERS); cudaMalloc(&d_output_layer, sizeof(Neuron) * OUTPUT_SIZE); cudaMemcpy(d_input_layer, h_input_layer, sizeof(Neuron) * INPUT_SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_hidden_layers, h_hidden_layers, sizeof(Neuron) * HIDDEN_SIZE * NUM_HIDDEN_LAYERS); cudaMemcpy(d_output_layer, h_output_layer, sizeof(Neuron) * OUTPUT_SIZE, cudaMemcpyHostToDevice); float* h_input[INPUT_SIZE] = (float*) malloc(sizeof(float) * INPUT_SIZE * TRAIN_SET_SIZE); float* d_input[INPUT_SIZE]; cudaMalloc(&d_input, sizeof(float) * INPUT_SIZE * TRAIN_SET_SIZE); for (int i = 0; i < TRAIN_SET_SIZE; i++) { for (int j = 0; j < INPUT_SIZE; j++) { h_input[i][j] = randomFloat0to1(); } } cudaMemcpy(d_input, h_input, sizeof(float) * INPUT_SIZE * TRAIN_SET_SIZE, cudaMemcpyHostToDevice); float* h_expected[OUTPUT_SIZE] = (float*) malloc(sizeof(float) * OUTPUT_SIZE * TRAIN_SET_SIZE); float* d_expected[OUTPUT_SIZE]; cudaMalloc(&d_expected, sizeof(float) * OUTPUT_SIZE * TRAIN_SET_SIZE); for (int i = 0; i < TRAIN_SET_SIZE; i++) { float sum = 0.0; for (int j = 0; j < INPUT_SIZE; j++) { sum += h_input[i][j]; } h_expected[i] = sum; } cudaMemcpy(d_expected, h_expected, sizeof(float) * OUTPUT_SIZE * TRAIN_SET_SIZE, cudaMemcpyHostToDevice); float* d_errors; cudaMalloc(&d_errors, sizeof(float) * OUTPUT_SIZE); float avg_error = 999.9; for(int i = 0; i < 100; i++) { printf("Average Error: %f", avg_error); avg_error = 0.0; for (int j = 0; j < 10 * TRAIN_SET_SIZE; j++) { int data_point = j % TRAIN_SET_SIZE; feedInput<<<1, 2>>>(d_input_layer, d_input, data_point, INPUT_SIZE); computeLayer<<<1, HIDDEN_SIZE>>>(d_hidden_layers[0], d_input_layer, HIDDEN_SIZE, HIDDEN_SIZE); for (int layer_num = 1; layer_num < NUM_HIDDEN_LAYERS; layer_num++) { computeLayer<<<1, HIDDEN_SIZE>>>(d_hidden_layers[layer_num], d_hidden_layers[layer_num - 1], HIDDEN_SIZE, HIDDEN_SIZE); } computeLayer<<<1, HIDDEN_SIZE>>>(d_output_layer, d_hidden_layers[NUM_HIDDEN_LAYERS - 1], OUTPUT_SIZE, HIDDEN_SIZE); computeErrors<<<1, OUTPUT_SIZE>>>(d_output_layer, d_expected, d_errors, data_point, OUTPUT_SIZE); computeOutputDeltas<<<1, OUTPUT_SIZE>>>(d_output_layer, d_errors, OUTPUT_SIZE); computeDeltas<<<1, HIDDEN_SIZE>>>(d_hidden_layers[layer_num - 1], d_output_layer, HIDDEN_SIZE, OUTPUT_SIZE); for (int layer_num = layer_num - 2; layer_num >= 0; layer_num--) { computeDeltas<<<1, HIDDEN_SIZE>>>(d_hidden_layers[layer_num], d_hidden_layers[layer_num + 1, HIDDEN_SIZE, HIDDEN_SIZE]); } adjustWeights<<<1, HIDDEN_SIZE>>>(d_output_layer, OUTPUT_SIZE, HIDDEN_SIZE); for (int layer_num = NUM_HIDDEN_LAYERS - 1; i >= 0; layer_num--) { adjustWeights<<<1, HIDDEN_SIZE>>>(d_hidden_layers[layer_num], HIDDEN_SIZE, HIDDEN_SIZE); } reduceAbsSum<<<1, OUTPUT_SIZE>>>(d_errors, OUTPUT_SIZE); float sum_errors = 0.0; cudaMemcpy(&sum_errors, d_errors[0], sizeof(float), cudaMemcpyDeviceToHost); avg_error += sum_errors; } avg_error /= 10 * TRAIN_SET_SIZE; } }
9ff05f719be283a3bbf3c502f6ba494cdf6c990c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "device_launch_parameters.h" #define imin(a, b) (a < b ? a : b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock); __global__ void dot(int size, float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < size) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } float malloc_test(int size) { hipEvent_t start, stop; float elapsedTime; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float *)malloc(N * sizeof(float)); b = (float *)malloc(N * sizeof(float)); partial_c = (float *)malloc(blocksPerGrid * sizeof(float)); for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(hipMalloc((void **)&dev_a, N * sizeof(float))); HANDLE_ERROR(hipMalloc((void **)&dev_b, N * sizeof(float))); HANDLE_ERROR(hipMalloc((void **)&dev_partial_c, blocksPerGrid * sizeof(float))); HANDLE_ERROR(hipEventRecord(start, 0)); HANDLE_ERROR(hipMemcpy(dev_a, a, N * sizeof(float), hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b, b, N * sizeof(float), hipMemcpyHostToDevice)); dot << <blocksPerGrid, threadsPerBlock >> >(size, dev_a, dev_b, dev_partial_c); HANDLE_ERROR(hipMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), hipMemcpyDeviceToHost)); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("Value calculated: %f\n", c); HANDLE_ERROR(hipFree(dev_a)); HANDLE_ERROR(hipFree(dev_b)); HANDLE_ERROR(hipFree(dev_partial_c)); free(a); free(b); free(partial_c); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); return elapsedTime; } float host_alloc_test(int size) { hipEvent_t start, stop; float elapsedTime; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; HANDLE_ERROR(hipHostMalloc((void **)&a, size * sizeof(float), hipHostMallocWriteCombined | hipHostMallocMapped)); HANDLE_ERROR(hipHostMalloc((void **)&b, size * sizeof(float), hipHostMallocWriteCombined | hipHostMallocMapped)); HANDLE_ERROR(hipHostMalloc((void **)&partial_c, size * sizeof(float), hipHostMallocWriteCombined | hipHostMallocMapped)); for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(hipHostGetDevicePointer(&dev_a, a, 0)); HANDLE_ERROR(hipHostGetDevicePointer(&dev_b, b, 0)); HANDLE_ERROR(hipHostGetDevicePointer(&dev_partial_c, partial_c, 0)); HANDLE_ERROR(hipEventRecord(start, 0)); dot << <blocksPerGrid, threadsPerBlock >> >(size, dev_a, dev_b, dev_partial_c); HANDLE_ERROR(hipDeviceSynchronize()); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("Value calculated: %f\n", c); HANDLE_ERROR(hipHostFree(a)); HANDLE_ERROR(hipHostFree(b)); HANDLE_ERROR(hipHostFree(partial_c)); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); return elapsedTime; } int main(void) { hipDeviceProp_t prop; int whichDevice; HANDLE_ERROR(hipGetDevice(&whichDevice)); HANDLE_ERROR(hipGetDeviceProperties(&prop, whichDevice)); if (prop.integrated == 1) printf("Integrated device\n"); else printf("Discrete device\n"); if (prop.canMapHostMemory != 1) { printf("Device can not map memory.\n"); return 0; } HANDLE_ERROR(hipSetDeviceFlags(hipDeviceMapHost)); float elapsedTime; elapsedTime = malloc_test(N); printf("Time using hipMalloc: %3.1f ms\n", elapsedTime); elapsedTime = host_alloc_test(N); printf("Time using hipHostMalloc: %3.1f ms\n", elapsedTime); return 0; }
9ff05f719be283a3bbf3c502f6ba494cdf6c990c.cu
#include "common.h" #include "device_launch_parameters.h" #define imin(a, b) (a < b ? a : b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock); __global__ void dot(int size, float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < size) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } cache[cacheIndex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } float malloc_test(int size) { cudaEvent_t start, stop; float elapsedTime; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float *)malloc(N * sizeof(float)); b = (float *)malloc(N * sizeof(float)); partial_c = (float *)malloc(blocksPerGrid * sizeof(float)); for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(cudaMalloc((void **)&dev_a, N * sizeof(float))); HANDLE_ERROR(cudaMalloc((void **)&dev_b, N * sizeof(float))); HANDLE_ERROR(cudaMalloc((void **)&dev_partial_c, blocksPerGrid * sizeof(float))); HANDLE_ERROR(cudaEventRecord(start, 0)); HANDLE_ERROR(cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice)); dot << <blocksPerGrid, threadsPerBlock >> >(size, dev_a, dev_b, dev_partial_c); HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("Value calculated: %f\n", c); HANDLE_ERROR(cudaFree(dev_a)); HANDLE_ERROR(cudaFree(dev_b)); HANDLE_ERROR(cudaFree(dev_partial_c)); free(a); free(b); free(partial_c); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); return elapsedTime; } float host_alloc_test(int size) { cudaEvent_t start, stop; float elapsedTime; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; HANDLE_ERROR(cudaHostAlloc((void **)&a, size * sizeof(float), cudaHostAllocWriteCombined | cudaHostAllocMapped)); HANDLE_ERROR(cudaHostAlloc((void **)&b, size * sizeof(float), cudaHostAllocWriteCombined | cudaHostAllocMapped)); HANDLE_ERROR(cudaHostAlloc((void **)&partial_c, size * sizeof(float), cudaHostAllocWriteCombined | cudaHostAllocMapped)); for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2; } HANDLE_ERROR(cudaHostGetDevicePointer(&dev_a, a, 0)); HANDLE_ERROR(cudaHostGetDevicePointer(&dev_b, b, 0)); HANDLE_ERROR(cudaHostGetDevicePointer(&dev_partial_c, partial_c, 0)); HANDLE_ERROR(cudaEventRecord(start, 0)); dot << <blocksPerGrid, threadsPerBlock >> >(size, dev_a, dev_b, dev_partial_c); HANDLE_ERROR(cudaThreadSynchronize()); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); c = 0; for (int i = 0; i < blocksPerGrid; i++) { c += partial_c[i]; } printf("Value calculated: %f\n", c); HANDLE_ERROR(cudaFreeHost(a)); HANDLE_ERROR(cudaFreeHost(b)); HANDLE_ERROR(cudaFreeHost(partial_c)); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); return elapsedTime; } int main(void) { cudaDeviceProp prop; int whichDevice; HANDLE_ERROR(cudaGetDevice(&whichDevice)); HANDLE_ERROR(cudaGetDeviceProperties(&prop, whichDevice)); if (prop.integrated == 1) printf("Integrated device\n"); else printf("Discrete device\n"); if (prop.canMapHostMemory != 1) { printf("Device can not map memory.\n"); return 0; } HANDLE_ERROR(cudaSetDeviceFlags(cudaDeviceMapHost)); float elapsedTime; elapsedTime = malloc_test(N); printf("Time using cudaMalloc: %3.1f ms\n", elapsedTime); elapsedTime = host_alloc_test(N); printf("Time using cudaHostAlloc: %3.1f ms\n", elapsedTime); return 0; }
362dc9963dd6e7cc98eadae2702f5f43c667afe5.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include <stdlib.h> #include <errno.h> #include "util.hpp" using namespace std; #ifndef RAW_PERFORMANCE_NUM_THREADS # error RAW_PERFORMANCE_NUM_THREADS not defined, fix your config.mk #endif #ifndef RAW_PERFORMANCE_NUM_RANDOMS_PER_THREAD # error RAW_PERFORMANCE_NUM_RANDOMS_PER_THREAD not defined, fix your config.mk #endif #ifndef RAW_PERFORMANCE_NUM_KERNEL_CALLS # error RAW_PERFORMANCE_NUM_KERNEL_CALLS not defined, fix your config.mk #endif #define BLOCKSIZE 256 unsigned int *random_numbers_dev; enum { NUM_THREADS = RAW_PERFORMANCE_NUM_THREADS }; #include RANDOM_NUMBER_GENERATOR //enum { NUM_RANDOMS = NUM_THREADS * RNG::num_randoms_per_call }; void initialize_cuda() { choose_device(); CUDA_CHECK_ERROR(hipMalloc(&random_numbers_dev, sizeof(*random_numbers_dev) * NUM_THREADS)); } __global__ void kernel_empty() { } __global__ void kernel_generate_randoms(const RNG::DevParameters params, unsigned int *random_numbers) { int idx = blockDim.x * blockIdx.x + threadIdx.x; RNG::RNGState rng_state; RNG::initialize(&params, &rng_state); unsigned int rnds[RNG::num_randoms_per_call]; unsigned int sum = 0; for(int i = 0; i < RAW_PERFORMANCE_NUM_RANDOMS_PER_THREAD / RNG::num_randoms_per_call; i++) { RNG::generate_random_numbers(&rng_state, rnds, 1, RNG::num_randoms_per_call); // use the random numbers to prevent the compiler from trying to be smart #pragma unroll for(int j = 0; j < RNG::num_randoms_per_call; j++) sum ^= rnds[j]; } RNG::finalize(&params, &rng_state); random_numbers[idx] = sum; } int main(int argc, char **argv) { struct timeval tv1, tv2; initialize_cuda(); RNG::DevParameters rng_parameters; RNG::initialize_rng(&rng_parameters); dim3 block(BLOCKSIZE, 1, 1); dim3 grid(RAW_PERFORMANCE_NUM_THREADS / BLOCKSIZE, 1, 1); hipLaunchKernelGGL(( kernel_empty), dim3(grid), dim3(block) , 0, 0, ); CUDA_CHECK_ERROR(hipGetLastError()); hipLaunchKernelGGL(( kernel_generate_randoms), dim3(grid), dim3(block) , 0, 0, rng_parameters, random_numbers_dev); CUDA_CHECK_ERROR(hipGetLastError()); hipDeviceSynchronize(); gettimeofday(&tv1, NULL); // multiple kernel calls to prevent hitting the maximum kernel launch time for(int i = 0; i < RAW_PERFORMANCE_NUM_KERNEL_CALLS; i++) { hipLaunchKernelGGL(( kernel_generate_randoms), dim3(grid), dim3(block) , 0, 0, rng_parameters, random_numbers_dev); CUDA_CHECK_ERROR(hipGetLastError()); } hipDeviceSynchronize(); gettimeofday(&tv2, NULL); cout << ((tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec) << endl; return 0; }
362dc9963dd6e7cc98eadae2702f5f43c667afe5.cu
#include <iostream> #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <sys/time.h> #include <stdlib.h> #include <errno.h> #include "util.hpp" using namespace std; #ifndef RAW_PERFORMANCE_NUM_THREADS # error RAW_PERFORMANCE_NUM_THREADS not defined, fix your config.mk #endif #ifndef RAW_PERFORMANCE_NUM_RANDOMS_PER_THREAD # error RAW_PERFORMANCE_NUM_RANDOMS_PER_THREAD not defined, fix your config.mk #endif #ifndef RAW_PERFORMANCE_NUM_KERNEL_CALLS # error RAW_PERFORMANCE_NUM_KERNEL_CALLS not defined, fix your config.mk #endif #define BLOCKSIZE 256 unsigned int *random_numbers_dev; enum { NUM_THREADS = RAW_PERFORMANCE_NUM_THREADS }; #include RANDOM_NUMBER_GENERATOR //enum { NUM_RANDOMS = NUM_THREADS * RNG::num_randoms_per_call }; void initialize_cuda() { choose_device(); CUDA_CHECK_ERROR(cudaMalloc(&random_numbers_dev, sizeof(*random_numbers_dev) * NUM_THREADS)); } __global__ void kernel_empty() { } __global__ void kernel_generate_randoms(const RNG::DevParameters params, unsigned int *random_numbers) { int idx = blockDim.x * blockIdx.x + threadIdx.x; RNG::RNGState rng_state; RNG::initialize(&params, &rng_state); unsigned int rnds[RNG::num_randoms_per_call]; unsigned int sum = 0; for(int i = 0; i < RAW_PERFORMANCE_NUM_RANDOMS_PER_THREAD / RNG::num_randoms_per_call; i++) { RNG::generate_random_numbers(&rng_state, rnds, 1, RNG::num_randoms_per_call); // use the random numbers to prevent the compiler from trying to be smart #pragma unroll for(int j = 0; j < RNG::num_randoms_per_call; j++) sum ^= rnds[j]; } RNG::finalize(&params, &rng_state); random_numbers[idx] = sum; } int main(int argc, char **argv) { struct timeval tv1, tv2; initialize_cuda(); RNG::DevParameters rng_parameters; RNG::initialize_rng(&rng_parameters); dim3 block(BLOCKSIZE, 1, 1); dim3 grid(RAW_PERFORMANCE_NUM_THREADS / BLOCKSIZE, 1, 1); kernel_empty<<< grid, block >>>(); CUDA_CHECK_ERROR(cudaGetLastError()); kernel_generate_randoms<<< grid, block >>> (rng_parameters, random_numbers_dev); CUDA_CHECK_ERROR(cudaGetLastError()); cudaThreadSynchronize(); gettimeofday(&tv1, NULL); // multiple kernel calls to prevent hitting the maximum kernel launch time for(int i = 0; i < RAW_PERFORMANCE_NUM_KERNEL_CALLS; i++) { kernel_generate_randoms<<< grid, block >>> (rng_parameters, random_numbers_dev); CUDA_CHECK_ERROR(cudaGetLastError()); } cudaThreadSynchronize(); gettimeofday(&tv2, NULL); cout << ((tv2.tv_sec - tv1.tv_sec) * 1000000 + tv2.tv_usec - tv1.tv_usec) << endl; return 0; }
8a6bb09f981f98ed518cc23d7261a9c74b1cda53.hip
// !!! This is a file automatically generated by hipify!!! /* nvcc collider31.cu -o collider31 -lglut -lm -lGLU -lGL --use_fast_math -O3 -Xptxas "-warn-lmem-usage -warn-spills" -arch=sm_52 nvcc collider31.cu -o collider31 -lglut -lm -lGLU -lGL -prec-div=false -prec-sqrt=false -ftz=true -O3 nvcc collider31.cu -o collider31nofast -lglut -lm -lGLU -lGL -O3 */ #include <GL/glut.h> #include <GL/glu.h> #include <GL/gl.h> #include <math.h> #include <stdio.h> #include "stdio.h" #include <stdlib.h> #include <hip/hip_runtime.h> #include <string.h> #include <dirent.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <signal.h> #include <iostream> #include <fstream> #include <sstream> #include <time.h> #include <iostream> #include <fstream> #include <sstream> #include <stdio.h> #include <stdlib.h> using namespace std; #define BLOCKSIZE 256 #define NUMBEROFEARTHRADIFORMOONMATERIAL 20.0 //Global to hold the time of the collision double RunTime = 0.0; //Continue and branch run globals int TypeOfRun = 0; char RootFolderName[256] = ""; double AddedRunTime = 0; //Globals for files FILE *RunStatsFile; FILE *PosAndVelFile; FILE *StartPosAndVelFile; FILE *ContinueRunStatsFile; FILE *ContinueRunPosAndVelFile; //Globals to hold positions, velocities, and forces on both the GPU and CPU float4 *PlaceHolder; //needs to be hard defined for cuda float4 *Pos, *Vel, *Force; float4 *Pos_DEV0, *Vel_DEV0, *Force_DEV0; float4 *PosFstHalf_0, *VelFstHalf_0, *ForceFstHalf_0; float4 *PosSndHalf_0, *VelSndHalf_0; float4 *PosFstHalf_1, *VelFstHalf_1; float4 *PosSndHalf_1, *VelSndHalf_1, *ForceSndHalf_1; //Globals to setup the kernals dim3 BlockConfig, GridConfig; int NumberOfGpus, Gpu0Access, Gpu1Access; //Globals to be set by the setRunParameters function double UnitLength = -1.0; double Diameter = -1.0; double UnitMass = -1.0; double MassSi = -1.0; double MassFe = -1.0; double MassOfBody1 = -1.0; double MassOfBody2 = -1.0; double UnitTime = -1.0; double Gravity = -1.0; int NSi = -1; int NSi1 = -1; int NSi2 = -1; int NFe = -1; int NFe1 = -1; int NFe2 = -1; //Globals to be set by the findEarthAndMoon function int NumberOfEarthElements = -1; int NumberOfMoonElements = -1; int *EarthIndex; int *MoonIndex; //Global to trigger printing collision stats to the screen int PrintCollisionStats = 0; //Global to trigger printing continue stats to the screen int PrintContinueStats = 0; //Globals for the run to be read in from the runSetup file float3 InitialPosition1; float3 InitialPosition2; float3 InitialVelocity1; float3 InitialVelocity2; float4 InitialSpin1; float4 InitialSpin2; float3 BranchPosition1; float3 BranchPosition2; float3 BranchVelocity1; float3 BranchVelocity2; float4 BranchSpin1; float4 BranchSpin2; double FractionEarthMassOfBody1; //Mass of body 1 as a proportion of the Earth's mass double FractionEarthMassOfBody2; //Mass of body 2 as a proportion of the Earth's mass double FractionFeBody1; //Percent by mass of iron in body 1 double FractionSiBody1; //Percent by mass of silicate in body 1 double FractionFeBody2; //Percent by mass of iron in body 2 double FractionSiBody2; //Percent by mass of silicate in body 2 float DampRateBody1; float DampRateBody2; float EnergyTargetBody1; float EnergyTargetBody2; int N; float TotalRunTime; float BranchRunTime; float DampTime; float DampRestTime; float EnergyAdjustmentTime; float EnergyAdjustmentRestTime; float SpinRestTime; float BranchSpinRestTime; float SetupTime; float Dt; int WriteToFile; int RecordRate; double DensityFe; //Density of iron in kilograms meterE-3 (Canup science 2012) double DensitySi; //Density of silcate in kilograms meterE-3 (Canup science 2012) double KFe; double KSi; double KRFe; double KRSi; double SDFe; double SDSi; int DrawRate; int DrawQuality; int UseMultipleGPU; double UniversalGravity; //Universal gravitational constant in kilometersE3 kilogramsE-1 and secondsE-2 (??? source) double MassOfEarth; double MassOfMoon; double AngularMomentumEarthMoonSystem; double EarthAxialTilt; double MoonAxialTilt; double Pi; void readRunParameters() { ifstream data; string name; if(TypeOfRun == 0) { data.open("RunSetup"); } else if(TypeOfRun == 1) { data.open("RootSetup"); } else if(TypeOfRun == 2) { data.open("RunSetup"); if(data.is_open() != 1) data.open("RootSetup"); } else { printf("\nTSU Error bad TypeOfRun selected\n"); exit(0); } if(data.is_open() == 1) { getline(data,name,'='); data >> InitialPosition1.x; getline(data,name,'='); data >> InitialPosition1.y; getline(data,name,'='); data >> InitialPosition1.z; getline(data,name,'='); data >> InitialPosition2.x; getline(data,name,'='); data >> InitialPosition2.y; getline(data,name,'='); data >> InitialPosition2.z; getline(data,name,'='); data >> InitialVelocity1.x; getline(data,name,'='); data >> InitialVelocity1.y; getline(data,name,'='); data >> InitialVelocity1.z; getline(data,name,'='); data >> InitialVelocity2.x; getline(data,name,'='); data >> InitialVelocity2.y; getline(data,name,'='); data >> InitialVelocity2.z; getline(data,name,'='); data >> InitialSpin1.x; getline(data,name,'='); data >> InitialSpin1.y; getline(data,name,'='); data >> InitialSpin1.z; getline(data,name,'='); data >> InitialSpin1.w; getline(data,name,'='); data >> InitialSpin2.x; getline(data,name,'='); data >> InitialSpin2.y; getline(data,name,'='); data >> InitialSpin2.z; getline(data,name,'='); data >> InitialSpin2.w; getline(data,name,'='); data >> FractionEarthMassOfBody1; getline(data,name,'='); data >> FractionEarthMassOfBody2; getline(data,name,'='); data >> FractionFeBody1; getline(data,name,'='); data >> FractionSiBody1; getline(data,name,'='); data >> FractionFeBody2; getline(data,name,'='); data >> FractionSiBody2; getline(data,name,'='); data >> DampRateBody1; getline(data,name,'='); data >> DampRateBody2; getline(data,name,'='); data >> EnergyTargetBody1; getline(data,name,'='); data >> EnergyTargetBody2; getline(data,name,'='); data >> N; getline(data,name,'='); data >> TotalRunTime; getline(data,name,'='); data >> DampTime; getline(data,name,'='); data >> DampRestTime; getline(data,name,'='); data >> EnergyAdjustmentTime; getline(data,name,'='); data >> EnergyAdjustmentRestTime; getline(data,name,'='); data >> SpinRestTime; getline(data,name,'='); data >> Dt; getline(data,name,'='); data >> WriteToFile; getline(data,name,'='); data >> RecordRate; getline(data,name,'='); data >> DensityFe; getline(data,name,'='); data >> DensitySi; getline(data,name,'='); data >> KFe; getline(data,name,'='); data >> KSi; getline(data,name,'='); data >> KRFe; getline(data,name,'='); data >> KRSi; getline(data,name,'='); data >> SDFe; getline(data,name,'='); data >> SDSi; getline(data,name,'='); data >> DrawRate; getline(data,name,'='); data >> DrawQuality; getline(data,name,'='); data >> UseMultipleGPU; getline(data,name,'='); data >> UniversalGravity; getline(data,name,'='); data >> MassOfEarth; getline(data,name,'='); data >> MassOfMoon; getline(data,name,'='); data >> AngularMomentumEarthMoonSystem; getline(data,name,'='); data >> EarthAxialTilt; getline(data,name,'='); data >> MoonAxialTilt; getline(data,name,'='); data >> Pi; } else { printf("\nTSU Error could not open run or root Setup file\n"); exit(0); } data.close(); } void readBranchParameters() { ifstream data; string name; data.open("BranchSetup"); if(data.is_open() == 1) { getline(data,name,'='); data >> BranchPosition1.x; getline(data,name,'='); data >> BranchPosition1.y; getline(data,name,'='); data >> BranchPosition1.z; getline(data,name,'='); data >> BranchPosition2.x; getline(data,name,'='); data >> BranchPosition2.y; getline(data,name,'='); data >> BranchPosition2.z; getline(data,name,'='); data >> BranchVelocity1.x; getline(data,name,'='); data >> BranchVelocity1.y; getline(data,name,'='); data >> BranchVelocity1.z; getline(data,name,'='); data >> BranchVelocity2.x; getline(data,name,'='); data >> BranchVelocity2.y; getline(data,name,'='); data >> BranchVelocity2.z; getline(data,name,'='); data >> BranchSpin1.x; getline(data,name,'='); data >> BranchSpin1.y; getline(data,name,'='); data >> BranchSpin1.z; getline(data,name,'='); data >> BranchSpin1.w; getline(data,name,'='); data >> BranchSpin2.x; getline(data,name,'='); data >> BranchSpin2.y; getline(data,name,'='); data >> BranchSpin2.z; getline(data,name,'='); data >> BranchSpin2.w; getline(data,name,'='); data >> BranchSpinRestTime; getline(data,name,'='); data >> BranchRunTime; } else { printf("\nTSU Error could not open Branch Setup file\n"); exit(0); } data.close(); } void setRunParameters() { double massBody1 = MassOfEarth*FractionEarthMassOfBody1; double massBody2 = MassOfEarth*FractionEarthMassOfBody2; if(FractionFeBody1 + FractionSiBody1 != 1.0) { printf("\nTSU Error: body1 fraction don't add to 1\n"); exit(0); } if(FractionFeBody2 + FractionSiBody2 != 1.0) { printf("\nTSU Error: body2 fraction don't add to 1\n"); exit(0); } double totalMassOfFeBody1 = FractionFeBody1*massBody1; double totalMassOfSiBody1 = FractionSiBody1*massBody1; double totalMassOfFeBody2 = FractionFeBody2*massBody2; double totalMassOfSiBody2 = FractionSiBody2*massBody2; double totalMassOfFe = totalMassOfFeBody1 + totalMassOfFeBody2; double totalMassOfSi = totalMassOfSiBody1 + totalMassOfSiBody2; double massFe; double massSi; double diameterOfElement; if(totalMassOfFe != 0.0) NFe = (double)N*(DensitySi/DensityFe)/(totalMassOfSi/totalMassOfFe + DensitySi/DensityFe); else NFe = 0; NSi = N - NFe; if(totalMassOfFe != 0.0) NFe1 = NFe*totalMassOfFeBody1/totalMassOfFe; else NFe1 = 0; NFe2 = NFe - NFe1; if(totalMassOfSi != 0.0) NSi1 = NSi*totalMassOfSiBody1/totalMassOfSi; else NSi1 = 0; NSi2 = NSi - NSi1; if(NFe != 0) massFe = totalMassOfFe/NFe; else massFe = 0.0; if(NSi != 0) massSi = totalMassOfSi/NSi; else massSi = 0.0; if(NSi != 0) diameterOfElement = pow((6.0*massSi)/(Pi*DensitySi), (1.0/3.0)); else diameterOfElement = pow((6.0*massFe)/(Pi*DensityFe), (1.0/3.0)); UnitLength = diameterOfElement; if(NSi != 0) UnitMass = massSi; else UnitMass = massFe; if(NSi != 0) UnitTime = sqrt((6.0*massSi*(double)NSi)/(UniversalGravity*Pi*DensitySi*totalMassOfSi)); else if(NFe != 0) UnitTime = sqrt((6.0*massFe*(double)NFe)/(UniversalGravity*Pi*DensityFe*totalMassOfFe)); else { printf("TSU Error: No mass, function setRunParameters\n"); exit(0); } //In this system this is what sets the length unit, the time unit, and the mass unit. Diameter = 1.0; Gravity = 1.0; if(NSi != 0) { MassSi = 1.0; MassFe = DensityFe/DensitySi; } else if(NFe != 0) { MassFe = 1.0; } else { printf("TSU Error: No mass, function setRunParameters\n"); exit(0); } //Setting mass of bodies in our units MassOfBody1 = massBody1/UnitMass; MassOfBody2 = massBody2/UnitMass; //Putting Initial positions into our units InitialPosition1.x /= UnitLength; InitialPosition1.y /= UnitLength; InitialPosition1.z /= UnitLength; InitialPosition2.x /= UnitLength; InitialPosition2.y /= UnitLength; InitialPosition2.z /= UnitLength; //Putting Initial Velocities into our units InitialVelocity1.x *= UnitTime/UnitLength; InitialVelocity1.y *= UnitTime/UnitLength; InitialVelocity1.z *= UnitTime/UnitLength; InitialVelocity2.x *= UnitTime/UnitLength; InitialVelocity2.y *= UnitTime/UnitLength; InitialVelocity2.z *= UnitTime/UnitLength; //Putting Initial Angule Velocities into our units InitialSpin1.w *= UnitTime/3600.0; InitialSpin2.w *= UnitTime/3600.0; //Putting Run times into our units TotalRunTime *= 3600.0/UnitTime; DampTime *= 3600.0/UnitTime; DampRestTime *= 3600.0/UnitTime; EnergyAdjustmentTime *= 3600.0/UnitTime; EnergyAdjustmentRestTime *= 3600.0/UnitTime; SpinRestTime *= 3600.0/UnitTime; SetupTime = (DampTime + DampRestTime + EnergyAdjustmentTime + EnergyAdjustmentRestTime + SpinRestTime); KFe *= UnitTime*UnitTime*UnitLength/UnitMass; KSi *= UnitTime*UnitTime*UnitLength/UnitMass; } void setBranchParameters() { //Putting Branch positions into our units BranchPosition1.x /= UnitLength; BranchPosition1.y /= UnitLength; BranchPosition1.z /= UnitLength; BranchPosition2.x /= UnitLength; BranchPosition2.y /= UnitLength; BranchPosition2.z /= UnitLength; //Putting Branch Velocities into our units BranchVelocity1.x *= UnitTime/UnitLength; BranchVelocity1.y *= UnitTime/UnitLength; BranchVelocity1.z *= UnitTime/UnitLength; BranchVelocity2.x *= UnitTime/UnitLength; BranchVelocity2.y *= UnitTime/UnitLength; BranchVelocity2.z *= UnitTime/UnitLength; //Putting Branch Angule Velocities into our units BranchSpin1.w *= UnitTime/3600.0; BranchSpin2.w *= UnitTime/3600.0; //Putting Branch Run times into our units BranchSpinRestTime *= 3600.0/UnitTime; BranchRunTime *= 3600.0/UnitTime; } //Globals for setting up the viewing window int XWindowSize = 2500; int YWindowSize = 2500; double Near = 0.2; double Far = 600.0; double ViewBoxSize = 300.0; GLdouble Left = -ViewBoxSize; GLdouble Right = ViewBoxSize; GLdouble Bottom = -ViewBoxSize; GLdouble Top = ViewBoxSize; GLdouble Front = ViewBoxSize; GLdouble Back = -ViewBoxSize; //Direction here your eye is located location double EyeX = 100.0; double EyeY = 100.0; double EyeZ = 100.0; //Where you are looking double CenterX = 0.0; double CenterY = 0.0; double CenterZ = 0.0; //Up vector for viewing double UpX = 0.0; double UpY = 1.0; double UpZ = 0.0; void createFolderForNewRun() { //Create output folder to store run parameters and run positions and velocities time_t t = time(0); struct tm * now = localtime( & t ); int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min; stringstream smonth, sday, stimeHour, stimeMin; smonth << month; sday << day; stimeHour << curTimeHour; stimeMin << curTimeMin; string monthday; if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str(); else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str(); string foldernametemp = "Run:" + monthday; const char *foldername = foldernametemp.c_str(); mkdir(foldername , S_IRWXU|S_IRWXG|S_IRWXO); chdir(foldername); //Copying the RunSetup file into the run folder FILE *runSetupIn; FILE *runSetupOut; long sizeOfFile; char * buffer; runSetupIn = fopen("../RunSetup", "rb"); fseek (runSetupIn , 0 , SEEK_END); sizeOfFile = ftell (runSetupIn); rewind (runSetupIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, runSetupIn); runSetupOut = fopen("RunSetup", "wb"); fwrite (buffer, 1, sizeOfFile, runSetupOut); fclose(runSetupIn); fclose(runSetupOut); free (buffer); } void createFolderForBranchRun(const char* rootFolder) { //Create output folder to store run parameters and run positions and velocities time_t t = time(0); struct tm * now = localtime( & t ); int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min; stringstream smonth, sday, stimeHour, stimeMin; smonth << month; sday << day; stimeHour << curTimeHour; stimeMin << curTimeMin; string monthday; if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str(); else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str(); string foldernametemp = "BranchRun:" + monthday; const char *foldername = foldernametemp.c_str(); mkdir(foldername , S_IRWXU|S_IRWXG|S_IRWXO); chdir(foldername); FILE *fileIn; FILE *fileOut; long sizeOfFile; char * buffer; char path[256]; //Copying the RunSetup file into the branch run folder strcpy(path, "../"); strcat(path, rootFolder); strcat(path,"/RunSetup"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The RunSetup file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("RootSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileOut); fileOut = fopen("RunSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Copying the RunStatsFile file into the branch run folder strcpy(path, "../"); strcat(path, rootFolder); strcat(path,"/RunStats"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The RunStats file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("RootRunStats", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Copying the Branch Positions and Velocities file into the branch run folder strcpy(path, "../"); strcat(path, rootFolder); strcat(path,"/StartPosAndVel"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The StartPosAndVel file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("RootStartPosAndVel", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Copying the Branch setup file into the branch run folder strcpy(path, "../"); strcat(path,"BranchSetup"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The BranchSetup file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("BranchSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); } void openNewRunFiles() { RunStatsFile = fopen("RunStats", "wb"); PosAndVelFile = fopen("PosAndVel", "wb"); StartPosAndVelFile = fopen("StartPosAndVel", "wb"); ContinueRunStatsFile = fopen("ContinueRunStats", "wb"); ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel", "wb"); } void openBranchRunFiles() { RunStatsFile = fopen("RunStats", "wb"); PosAndVelFile = fopen("PosAndVel", "wb"); StartPosAndVelFile = fopen("StartPosAndVel", "wb"); ContinueRunStatsFile = fopen("ContinueRunStats", "wb"); ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel", "wb"); } void openContinueRunFiles() { RunStatsFile = fopen("RunStats", "wb"); PosAndVelFile = fopen("PosAndVel", "ab"); //fseek(PosAndVelFile,0,SEEK_END); ContinueRunStatsFile = fopen("ContinueRunStats", "wb"); ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel", "wb"); } void recordSetupStats() { float mag; fprintf(RunStatsFile, "The conversion parameters to take you to and from our units to the real world units follow\n"); fprintf(RunStatsFile, "\nOur length unit is this many kilometers: UnitLength = %f", UnitLength); fprintf(RunStatsFile, "\nOur mass unit is this many kilograms: UnitMass = %e", UnitMass); fprintf(RunStatsFile, "\nOur time unit is this many seconds: UnitTime = %f\n", UnitTime); fprintf(RunStatsFile, "\nThe initail statistics for this run in our units follow\n"); fprintf(RunStatsFile, "\nDiameter of an element: Diameter = %f", Diameter); fprintf(RunStatsFile, "\nGravity in our units: Gravity = %f", Gravity); fprintf(RunStatsFile, "\nThe mass of a silicate element: MassSi = %f", MassSi); fprintf(RunStatsFile, "\nThe mass of an iron element: MassFe = %f\n", MassFe); fprintf(RunStatsFile, "\nThe push back strength of iron: KFe = %f", KFe); fprintf(RunStatsFile, "\nThe push back strength of silicate: KSi = %f\n", KSi); fprintf(RunStatsFile, "\nThe mass of body one: MassOfBody1 = %f", MassOfBody1); fprintf(RunStatsFile, "\nThe mass of body two: MassOfBody2 = %f\n", MassOfBody2); fprintf(RunStatsFile, "\nThe initial position of body one: (%f, %f, %f)", InitialPosition1.x, InitialPosition1.y, InitialPosition1.z); fprintf(RunStatsFile, "\nThe initial position of body two: (%f, %f, %f)\n", InitialPosition2.x, InitialPosition2.y, InitialPosition2.z); fprintf(RunStatsFile, "\nThe initial velocity of body one: (%f, %f, %f)", InitialVelocity1.x, InitialVelocity1.y, InitialVelocity1.z); fprintf(RunStatsFile, "\nThe initial velocity of body two: (%f, %f, %f)\n", InitialVelocity2.x, InitialVelocity2.y, InitialVelocity2.z); mag = sqrt(InitialSpin1.x*InitialSpin1.x + InitialSpin1.y*InitialSpin1.y + InitialSpin1.z*InitialSpin1.z); fprintf(RunStatsFile, "\nThe initial spin in revolutions per time unit of body one: (%f, %f, %f, %f)", InitialSpin1.x/mag, InitialSpin1.y/mag, InitialSpin1.z/mag, InitialSpin1.w); mag = sqrt(InitialSpin2.x*InitialSpin2.x + InitialSpin2.y*InitialSpin2.y + InitialSpin2.z*InitialSpin2.z); fprintf(RunStatsFile, "\nThe initial spin in revolutions per time unit of body two: (%f, %f, %f, %f)\n", InitialSpin2.x/mag, InitialSpin2.y/mag, InitialSpin2.z/mag, InitialSpin2.w); fprintf(RunStatsFile, "\nTotal number of elements: N = %d", N); fprintf(RunStatsFile, "\nTotal number of iron elements: NFe = %d", NFe); fprintf(RunStatsFile, "\nTotal number of silicate elements: NSi = %d", NSi); fprintf(RunStatsFile, "\nTotal number of iron elements in body1: NFe1 = %d", NFe1); fprintf(RunStatsFile, "\nTotal number of silicate elements in body1: NSi1 = %d", NSi1); fprintf(RunStatsFile, "\nTotal number of iron elements in body2 NFe2: = %d", NFe2); fprintf(RunStatsFile, "\nTotal number of silicate elements in body2: NSi2 = %d\n", NSi2); fprintf(RunStatsFile, "\nTime step in our units: Dt = %f", Dt); fprintf(RunStatsFile, "\nRecord rate: RecordRate = %d", RecordRate); fprintf(RunStatsFile, "\nTotal run time in our units: TotalRunTime = %f\n", TotalRunTime); fprintf(RunStatsFile, "\nDamp time in our units: DampTime = %f", DampTime); fprintf(RunStatsFile, "\nDamp rest time in our units: DampRestTime = %f", DampRestTime); fprintf(RunStatsFile, "\nEnergy adjustment time in our units: EnergyAdjustmentTime = %f", EnergyAdjustmentTime); fprintf(RunStatsFile, "\nEnergy adjustment rest time in our units: EnergyAdjustmentRestTime = %f", EnergyAdjustmentRestTime); fprintf(RunStatsFile, "\nSpin rest time in our units: SpinRestTime = %f", SpinRestTime); fprintf(RunStatsFile, "\nTotal setup time in our units: SetupTime = %f\n", SetupTime); } //Creating structures to hold constants needed in the kernals struct forceSeperateKernalConstantsStruct { float GMassFeFe; float GMassFeSi; float KFeFe; float KSiSi; float KFeSi; float KRFeFe; float KRSiSi; float KRFeSi; float KRMix; float ShellBreakFe; float ShellBreakSi; float ShellBreakFeSi1; float ShellBreakFeSi2; int boarder1; int boarder2; int boarder3; }; struct forceCollisionKernalConstantsStruct { float GMassFeFe; float GMassFeSi; float KFeFe; float KSiSi; float KFeSi; float KRFeFe; float KRSiSi; float KRFeSi; float KRMix; float ShellBreakFe; float ShellBreakSi; float ShellBreakFeSi1; float ShellBreakFeSi2; int NFe; }; struct moveSeperateKernalConstantsStruct { float Dt; float DtOverMassFe; float DtOverMassSi; int boarder1; int boarder2; int boarder3; }; struct moveCollisionKernalConstantsStruct { float Dt; float DtOverMassFe; float DtOverMassSi; int NFe; }; //Globals to hold kernal constants forceSeperateKernalConstantsStruct ForceSeperateConstant; forceCollisionKernalConstantsStruct ForceCollisionConstant; moveSeperateKernalConstantsStruct MoveSeperateConstant; moveCollisionKernalConstantsStruct MoveCollisionConstant; void loadKernalConstantStructures() { //Force kernal seperate ForceSeperateConstant.GMassFeFe = Gravity*MassFe*MassFe; ForceSeperateConstant.GMassFeSi = Gravity*MassFe*MassSi; ForceSeperateConstant.KFeFe = 2.0*KFe; ForceSeperateConstant.KSiSi = 2.0*KSi; ForceSeperateConstant.KFeSi = KFe + KSi; ForceSeperateConstant.KRFeFe = 2.0*KFe*KRFe; ForceSeperateConstant.KRSiSi = 2.0*KSi*KRSi; ForceSeperateConstant.KRFeSi = KFe*KRFe + KSi*KRSi; if(SDFe >= SDSi) ForceSeperateConstant.KRMix = KFe + KSi*KRSi; else ForceSeperateConstant.KRMix = KFe*KRFe + KSi; ForceSeperateConstant.ShellBreakFe = Diameter - Diameter*SDFe; ForceSeperateConstant.ShellBreakSi = Diameter - Diameter*SDSi; if(SDFe >= SDSi) { ForceSeperateConstant.ShellBreakFeSi1 = Diameter - Diameter*SDSi; ForceSeperateConstant.ShellBreakFeSi2 = Diameter - Diameter*SDFe; } else { ForceSeperateConstant.ShellBreakFeSi1 = Diameter - Diameter*SDFe; ForceSeperateConstant.ShellBreakFeSi2 = Diameter - Diameter*SDSi; } ForceSeperateConstant.boarder1 = NFe1; ForceSeperateConstant.boarder2 = NFe1 + NSi1; ForceSeperateConstant.boarder3 = NFe1 + NSi1 + NFe2; //Force kernal Earth Moon System ForceCollisionConstant.GMassFeFe = Gravity*MassFe*MassFe; ForceCollisionConstant.GMassFeSi = Gravity*MassFe*MassSi; ForceCollisionConstant.KFeFe = 2.0*KFe; ForceCollisionConstant.KSiSi = 2.0*KSi; ForceCollisionConstant.KFeSi = KFe + KSi; ForceCollisionConstant.KRFeFe = 2.0*KFe*KRFe; ForceCollisionConstant.KRSiSi = 2.0*KSi*KRSi; ForceCollisionConstant.KRFeSi = KFe*KRFe + KSi*KRSi; if(SDFe >= SDSi) ForceCollisionConstant.KRMix = KFe + KSi*KRSi; else ForceCollisionConstant.KRMix = KFe*KRFe + KSi; ForceCollisionConstant.ShellBreakFe = Diameter - Diameter*SDFe; ForceCollisionConstant.ShellBreakSi = Diameter - Diameter*SDSi; if(SDFe >= SDSi) { ForceCollisionConstant.ShellBreakFeSi1 = Diameter - Diameter*SDSi; ForceCollisionConstant.ShellBreakFeSi2 = Diameter - Diameter*SDFe; } else { ForceCollisionConstant.ShellBreakFeSi1 = Diameter - Diameter*SDFe; ForceCollisionConstant.ShellBreakFeSi2 = Diameter - Diameter*SDSi; } ForceCollisionConstant.NFe = NFe; //Move kernal seperate MoveSeperateConstant.Dt = Dt; MoveSeperateConstant.DtOverMassFe = Dt/MassFe; MoveSeperateConstant.DtOverMassSi = Dt/MassSi; MoveSeperateConstant.boarder1 = NFe1; MoveSeperateConstant.boarder2 = NSi1 + NFe1; MoveSeperateConstant.boarder3 = NFe1 + NSi1 + NFe2; //Move kernal Earth Moon System MoveCollisionConstant.Dt = Dt; MoveCollisionConstant.DtOverMassSi = Dt/MassSi; MoveCollisionConstant.DtOverMassFe = Dt/MassFe; MoveCollisionConstant.NFe = NFe; } void errorCheck(const char *message) { hipError_t error; error = hipGetLastError(); if(error != hipSuccess) { printf("\n CUDA ERROR: %s = %s\n", message, hipGetErrorString(error)); exit(0); } } void allocateCPUMemory() { PlaceHolder = (float4*)malloc(N*sizeof(float4)); Pos = (float4*)malloc(N*sizeof(float4)); Vel = (float4*)malloc(N*sizeof(float4)); Force = (float4*)malloc(N*sizeof(float4)); } void checkSetupForErrors() { if(N%BLOCKSIZE != 0) { printf("\nTSU Error: Number of Particles is not a multiple of the block size \n\n"); exit(0); } } void deviceSetupSeperate() { BlockConfig.x = BLOCKSIZE; BlockConfig.y = 1; BlockConfig.z = 1; GridConfig.x = (N-1)/BlockConfig.x + 1; GridConfig.y = 1; GridConfig.z = 1; hipMalloc((void**)&Pos_DEV0, N *sizeof(float4)); errorCheck("hipMalloc Pos"); hipMalloc((void**)&Vel_DEV0, N *sizeof(float4)); errorCheck("hipMalloc Vel"); hipMalloc((void**)&Force_DEV0, N *sizeof(float4)); errorCheck("hipMalloc Force"); } void deviceSetupCollision() { hipGetDeviceCount(&NumberOfGpus); printf("\n***** You have %d GPUs available\n", NumberOfGpus); errorCheck("hipGetDeviceCount"); hipDeviceCanAccessPeer(&Gpu0Access,0,1); errorCheck("cudaDeviceCanAccessPeer0"); hipDeviceCanAccessPeer(&Gpu1Access,1,0); errorCheck("cudaDeviceCanAccessPeer1"); if(1 < NumberOfGpus && UseMultipleGPU == 1) { printf("\n***** You will be using %d GPUs\n", NumberOfGpus); if(Gpu0Access == 0) { printf("\nTSU Error: Device0 can not do peer to peer\n"); } if(Gpu1Access == 0) { printf("\nTSU Error: Device1 can not do peer to peer\n"); } hipDeviceEnablePeerAccess(1,0); errorCheck("hipDeviceEnablePeerAccess"); BlockConfig.x = BLOCKSIZE; BlockConfig.y = 1; BlockConfig.z = 1; GridConfig.x = ((N/2)-1)/BlockConfig.x + 1; GridConfig.y = 1; GridConfig.z = 1; hipSetDevice(0); errorCheck("cudaSetDevice0"); hipMalloc( (void**)&PosFstHalf_0, (N/2)*sizeof(float4) ); errorCheck("hipMalloc PFH0"); hipMalloc( (void**)&PosSndHalf_0, (N/2)*sizeof(float4) ); errorCheck("hipMalloc PSH0"); hipMalloc( (void**)&VelFstHalf_0, (N/2)*sizeof(float4) ); errorCheck("hipMalloc VFH0"); hipMalloc( (void**)&VelSndHalf_0, (N/2)*sizeof(float4) ); errorCheck("hipMalloc VSH0"); hipMalloc( (void**)&ForceFstHalf_0, (N/2)*sizeof(float4) ); errorCheck("hipMalloc FFH0"); hipSetDevice(1); errorCheck("cudaSetDevice1"); hipMalloc( (void**)&PosFstHalf_1, (N/2)*sizeof(float4) ); errorCheck("hipMalloc PFH1"); hipMalloc( (void**)&PosSndHalf_1, (N/2)*sizeof(float4) ); errorCheck("hipMalloc PSH1"); hipMalloc( (void**)&VelFstHalf_1, (N/2)*sizeof(float4) ); errorCheck("hipMalloc VFH1"); hipMalloc( (void**)&VelSndHalf_1, (N/2)*sizeof(float4) ); errorCheck("hipMalloc VSH1"); hipMalloc( (void**)&ForceSndHalf_1, (N/2)*sizeof(float4) ); errorCheck("hipMalloc FSH1"); } else { BlockConfig.x = BLOCKSIZE; BlockConfig.y = 1; BlockConfig.z = 1; GridConfig.x = (N-1)/BlockConfig.x + 1; GridConfig.y = 1; GridConfig.z = 1; hipMalloc((void**)&Pos_DEV0, N *sizeof(float4)); errorCheck("hipMalloc P0"); hipMalloc((void**)&Vel_DEV0, N *sizeof(float4)); errorCheck("hipMalloc V0"); hipMalloc((void**)&Force_DEV0, N *sizeof(float4)); errorCheck("hipMalloc F0"); } } void cleanUpSeperate() { hipFree(Pos_DEV0); hipFree(Vel_DEV0); hipFree(Force_DEV0); fclose(StartPosAndVelFile); } void cleanUpCollision() { fclose(RunStatsFile); fclose(PosAndVelFile); fclose(ContinueRunStatsFile); fclose(ContinueRunPosAndVelFile); if(1 < NumberOfGpus && UseMultipleGPU == 1) { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipFree(PosFstHalf_0); hipFree(VelFstHalf_0); hipFree(ForceFstHalf_0); hipFree(PosSndHalf_0); hipFree(VelSndHalf_0); hipSetDevice(1); errorCheck("hipSetDevice 0"); hipFree(PosFstHalf_1); hipFree(VelFstHalf_1); hipFree(ForceSndHalf_1); hipFree(PosSndHalf_1); hipFree(VelSndHalf_1); } else { hipFree(Pos_DEV0); hipFree(Vel_DEV0); hipFree(Force_DEV0); } } void createBodies() { float radius1, radius2, stretch; float volume, mag, radius, seperation; int test, repeatCount; time_t t; printf("\nCreating the raw bodies\n"); //Creating body one //This assumes a 68% packing ratio of a shpere with shperes and then stretches it by strecth //to safely fit all the balls in. stretch = 2.0; volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)NFe1/0.68)*stretch; radius1 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)(NFe1 + NSi1)/0.68)*stretch; radius2 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); srand((unsigned) time(&t)); repeatCount = 0; for(int i=0; i<NFe1; i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*radius1; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = 0; j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 0.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassFe; } for(int i = NFe1; i < (NFe1 + NSi1); i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*(radius2-radius1) + radius1 + Diameter; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = NFe1; j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 1.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassSi; } printf("\nrepeat count body one= %d", repeatCount); //Setting the body one's center of mass location for(int i=0; i<(NFe1 + NSi1); i++) { Pos[i].x += InitialPosition1.x; Pos[i].y += InitialPosition1.y; Pos[i].z += InitialPosition1.z; } //Creating body two //This assumes a 68% packing ratio of a shpere with shperes and then stretches it by strecth //to safely fit all the balls in. stretch = 2.0; volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)NFe2/0.68)*stretch; radius1 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)(NFe2 + NSi2)/0.68)*stretch; radius2 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); srand((unsigned) time(&t)); repeatCount = 0; for(int i = (NFe1 + NSi1); i < (NFe1 + NSi1 + NFe2); i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*radius1; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = (NFe1 + NSi1); j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 2.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassFe; } for(int i = (NFe1 + NSi1 + NFe2); i < N; i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*(radius2-radius1) + radius1 + Diameter; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = (NFe1 + NSi1 + NFe2); j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 3.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassSi; } printf("\nrepeat count body two = %d", repeatCount); //Setting the body one's center of mass location for(int i = (NFe1 + NSi1); i < N; i++) { Pos[i].x += InitialPosition2.x; Pos[i].y += InitialPosition2.y; Pos[i].z += InitialPosition2.z; } printf("\n************************************************** Initial bodies have been formed\n"); } __global__ void getForcesSeperate(float4 *pos, float4 *vel, float4 *force, forceSeperateKernalConstantsStruct constant) { int id, ids; int i,j; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; int materialSwitch; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = pos[id].x; posMe.y = pos[id].y; posMe.z = pos[id].z; velMe.x = vel[id].x; velMe.y = vel[id].y; velMe.z = vel[id].z; for(j = 0; j < gridDim.x; j++) { shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = vel[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i = 0; i < blockDim.x; i++) { ids = i + blockDim.x*j; if((id < constant.boarder2 && ids < constant.boarder2) || (constant.boarder2 <= id && constant.boarder2 <= ids)) { if((id < constant.boarder2) && (ids < constant.boarder2)) materialSwitch = constant.boarder1; if((constant.boarder2 <= id) && (constant.boarder2 <= ids)) materialSwitch = constant.boarder3; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id < materialSwitch) test = 1; if(ids < materialSwitch) test++; if(test == 0) //silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } } force[id].x = forceSum.x; force[id].y = forceSum.y; force[id].z = forceSum.z; __syncthreads(); } } __global__ void moveBodiesSeperate(float4 *pos, float4 *vel, float4 * force, moveSeperateKernalConstantsStruct constant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(constant.boarder3 <= id) temp = constant.DtOverMassSi; else if(constant.boarder2 <= id) temp = constant.DtOverMassFe; else if(constant.boarder1 <= id) temp = constant.DtOverMassSi; else temp = constant.DtOverMassFe; vel[id].x += (force[id].x)*temp; vel[id].y += (force[id].y)*temp; vel[id].z += (force[id].z)*temp; pos[id].x += vel[id].x*constant.Dt; pos[id].y += vel[id].y*constant.Dt; pos[id].z += vel[id].z*constant.Dt; } __global__ void moveBodiesDampedSeperate(float4 *pos, float4 *vel, float4 * force, moveSeperateKernalConstantsStruct constant, float DampRateBody1, float DampRateBody2) { float temp; float damp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(constant.boarder3 <= id) { temp = constant.DtOverMassSi; damp = DampRateBody2; } else if(constant.boarder2 <= id) { temp = constant.DtOverMassFe; damp = DampRateBody2; } else if(constant.boarder1 <= id) { temp = constant.DtOverMassSi; damp = DampRateBody1; } else { temp = constant.DtOverMassFe; damp = DampRateBody1; } vel[id].x += (force[id].x-damp*vel[id].x)*temp; vel[id].y += (force[id].y-damp*vel[id].y)*temp; vel[id].z += (force[id].z-damp*vel[id].z)*temp; pos[id].x += vel[id].x*constant.Dt; pos[id].y += vel[id].y*constant.Dt; pos[id].z += vel[id].z*constant.Dt; } __global__ void getForcesCollisionSingleGPU(float4 *pos, float4 *vel, float4 *force, forceCollisionKernalConstantsStruct constant) { int id, ids; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = pos[id].x; posMe.y = pos[id].y; posMe.z = pos[id].z; velMe.x = vel[id].x; velMe.y = vel[id].y; velMe.z = vel[id].z; for(int j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = vel[threadIdx.x + blockDim.x*j]; __syncthreads(); for(int i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id < constant.NFe) test = 1; if(ids < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } force[id].x = forceSum.x; force[id].y = forceSum.y; force[id].z = forceSum.z; } __global__ void moveBodiesCollisionSingleGPU(float4 *pos, float4 *vel, float4 * force, moveCollisionKernalConstantsStruct MoveCollisionConstant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(id < MoveCollisionConstant.NFe) temp = MoveCollisionConstant.DtOverMassFe; else temp = MoveCollisionConstant.DtOverMassSi; vel[id].x += (force[id].x)*temp; vel[id].y += (force[id].y)*temp; vel[id].z += (force[id].z)*temp; pos[id].x += vel[id].x*MoveCollisionConstant.Dt; pos[id].y += vel[id].y*MoveCollisionConstant.Dt; pos[id].z += vel[id].z*MoveCollisionConstant.Dt; } __global__ void getForcesCollisionDoubleGPU0(float4 *posFstHalf, float4 *posSndHalf, float4 *velFstHalf, float4 *velSndHalf, float4 *forceFstHalf, int N, forceCollisionKernalConstantsStruct constant) { int id, ids; int i,j; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = posFstHalf[id].x; posMe.y = posFstHalf[id].y; posMe.z = posFstHalf[id].z; velMe.x = velFstHalf[id].x; velMe.y = velFstHalf[id].y; velMe.z = velFstHalf[id].z; for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posFstHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velFstHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id < constant.NFe) test = 1; if(ids < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posSndHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velSndHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); invr = 1.0f/r; test = 0; if(id < constant.NFe) test = 1; if(ids+(N/2) < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } forceFstHalf[id].x = forceSum.x; forceFstHalf[id].y = forceSum.y; forceFstHalf[id].z = forceSum.z; } __global__ void getForcesCollisionDoubleGPU1(float4 *posFstHalf, float4 *posSndHalf, float4 *velFstHalf, float4 *velSndHalf, float4 *forceSndHalf, int N, forceCollisionKernalConstantsStruct constant) { int id, ids; int i,j; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = posSndHalf[id].x; posMe.y = posSndHalf[id].y; posMe.z = posSndHalf[id].z; velMe.x = velSndHalf[id].x; velMe.y = velSndHalf[id].y; velMe.z = velSndHalf[id].z; for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posFstHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velFstHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); invr = 1.0f/r; test = 0; if(id + (N/2) < constant.NFe) test = 1; if(ids < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posSndHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velSndHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j ; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id + (N/2) < constant.NFe) test = 1; if(ids+(N/2) < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } forceSndHalf[id].x = forceSum.x; forceSndHalf[id].y = forceSum.y; forceSndHalf[id].z = forceSum.z; } __global__ void moveBodiesCollisionDoubleGPU0(float4 *posFstHalf, float4 *velFstHalf, float4 * forceFstHalf, int N, moveCollisionKernalConstantsStruct constant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(id < constant.NFe) temp = constant.DtOverMassFe; else temp = constant.DtOverMassSi; velFstHalf[id].x += (forceFstHalf[id].x)*temp; velFstHalf[id].y += (forceFstHalf[id].y)*temp; velFstHalf[id].z += (forceFstHalf[id].z)*temp; posFstHalf[id].x += velFstHalf[id].x*constant.Dt; posFstHalf[id].y += velFstHalf[id].y*constant.Dt; posFstHalf[id].z += velFstHalf[id].z*constant.Dt; } __global__ void moveBodiesCollisionDoubleGPU1(float4 *posSndHalf, float4 *velSndHalf, float4 * forceSndHalf, int N, moveCollisionKernalConstantsStruct constant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(id + (N/2) < constant.NFe) temp = constant.DtOverMassFe; else temp = constant.DtOverMassSi; velSndHalf[id].x += (forceSndHalf[id].x)*temp; velSndHalf[id].y += (forceSndHalf[id].y)*temp; velSndHalf[id].z += (forceSndHalf[id].z)*temp; posSndHalf[id].x += velSndHalf[id].x*constant.Dt; posSndHalf[id].y += velSndHalf[id].y*constant.Dt; posSndHalf[id].z += velSndHalf[id].z*constant.Dt; } float3 getCenterOfMassSeperate(int scope) { float totalMass; float assumeZero = 0.0000001; float3 centerOfMass; centerOfMass.x = 0.0f; centerOfMass.y = 0.0f; centerOfMass.z = 0.0f; if(scope == 0) //entire system { totalMass = MassOfBody1 + MassOfBody2; if(totalMass < assumeZero) return(centerOfMass); for(int i = 0; i < NFe1; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } else if(scope == 1) //body1 { totalMass = MassOfBody1; if(totalMass < assumeZero) return(centerOfMass); for(int i = 0; i < NFe1; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } else if(scope == 2) //body2 { totalMass = MassOfBody2; if(totalMass < assumeZero) return(centerOfMass); for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } else { printf("\nTSU Error: In getCenterOfMassSeperate function scope invalid\n"); exit(0); } centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; return(centerOfMass); } float3 getLinearVelocitySeperate(int scope) { double totalMass; float assumeZero = 0.0000001; float3 linearVelocity; linearVelocity.x = 0.0f; linearVelocity.y = 0.0f; linearVelocity.z = 0.0f; if(scope == 0) //Entire system { totalMass = MassOfBody1 + MassOfBody2; if(totalMass < assumeZero) return(linearVelocity); for(int i = 0; i < NFe1; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } else if(scope == 1) //body1 { totalMass = MassOfBody1; if(totalMass < assumeZero) return(linearVelocity); for(int i = 0; i < NFe1; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } else if (scope == 2) //body2 { totalMass = MassOfBody2; if(totalMass < assumeZero) return(linearVelocity); for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } else { printf("\nTSU Error: In getLinearVelocitySeperate function scope invalid\n"); exit(0); } linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; return(linearVelocity); } float3 getAngularMomentumSeperate(int scope, float3 center, float3 velocity) { float3 angularMomentum; float3 r; float3 v; angularMomentum.x = 0.0f; angularMomentum.y = 0.0f; angularMomentum.z = 0.0f; if(scope == 0) //entire system { for(int i = 0; i < NFe1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } else if(scope == 1) //body1 { for(int i = 0; i < NFe1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } else if(scope == 2) //body2 { for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } else { printf("\nTSU Error: In getAngularMomentumSeperate function scope invalid\n"); exit(0); } return(angularMomentum); } void setBodyPositionSeperate(int bodyId, float x, float y, float z) { int start, stop; if(bodyId == 1) { start = 0; stop = NFe1 + NSi1; } else if(bodyId == 2) { start = NFe1 + NSi1; stop = N; } else { printf("\nTSU Error: in setBodyPositionSeperate function bodyId invalid\n"); exit(0); } float3 centerOfMass = getCenterOfMassSeperate(bodyId); for(int i = start; i < stop; i++) { Pos[i].x += x - centerOfMass.x; Pos[i].y += y - centerOfMass.y; Pos[i].z += z - centerOfMass.z; } } void setBodyVelocitySeperate(int bodyId, float vx, float vy, float vz) { int start, stop; if(bodyId == 1) { start = 0; stop = NFe1 + NSi1; } else if(bodyId == 2) { start = NFe1 + NSi1; stop = N; } else { printf("\nTSU Error: in setBodyVelocitySeperate invalid bodyId\n"); exit(0); } float3 RandomlinearVelocity = getLinearVelocitySeperate(bodyId); for(int i = start; i < stop; i++) { Vel[i].x += vx - RandomlinearVelocity.x; Vel[i].y += vy - RandomlinearVelocity.y; Vel[i].z += vz - RandomlinearVelocity.z; } } void spinBodySeperate(int bodyId, float4 spinVector) { float3 r; //vector from center of mass to the position vector float3 centerOfMass; float3 n; //Unit vector perpendicular to the plane of spin float mag; float assumeZero = 0.0000001; int start, stop; if(bodyId == 1) { start = 0; stop = NFe1 + NSi1; } else { start = NFe1 + NSi1; stop = N; } //Making sure the spin vector is a unit vector mag = sqrt(spinVector.x*spinVector.x + spinVector.y*spinVector.y + spinVector.z*spinVector.z); if(assumeZero < mag) { spinVector.x /= mag; spinVector.y /= mag; spinVector.z /= mag; } else { printf("\nTSU Error: In spinBodySeperate. The spin direction vector is zero.\n"); exit(0); } centerOfMass = getCenterOfMassSeperate(bodyId); for(int i = start; i < stop; i++) { //Creating a vector from the center of mass to the point r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; float magsquared = r.x*r.x + r.y*r.y + r.z*r.z; float spinDota = spinVector.x*r.x + spinVector.y*r.y + spinVector.z*r.z; float perpendicularDistance = sqrt(magsquared - spinDota*spinDota); float perpendicularVelocity = spinVector.w*2.0*Pi*perpendicularDistance; //finding unit vector perpendicular to both the position vector and the spin vector n.x = (spinVector.y*r.z - spinVector.z*r.y); n.y = -(spinVector.x*r.z - spinVector.z*r.x); n.z = (spinVector.x*r.y - spinVector.y*r.x); mag = sqrt(n.x*n.x + n.y*n.y + n.z*n.z); if(mag != 0.0) { n.x /= mag; n.y /= mag; n.z /= mag; //Spining the element Vel[i].x += perpendicularVelocity*n.x; Vel[i].y += perpendicularVelocity*n.y; Vel[i].z += perpendicularVelocity*n.z; } } } double vectorMagnitude(float3 v) { return(sqrt(v.x*v.x + v.y*v.y + v.z*v.z)); } void recordStatsOfCreatedBodies() { float radiusOfBody; float massOfBody; float3 r; double mag, d; float3 centerOfMass; float3 linearVelocity; float3 angularMomentum; double lengthConvertion = UnitLength; double massConvertion = UnitMass; double velocityConvertion = UnitLength/UnitTime; double AngularMomentumConvertion = (UnitMass*UnitLength*UnitLength)/(UnitTime); fprintf(RunStatsFile, "\n\n\n*****************************************************************************************************\n"); fprintf(RunStatsFile, "\nThe follow are the statistics of the system right before they are released to collide in real world units\n"); fprintf(RunStatsFile, "\n\n***** Stats for the univeral system *****\n"); centerOfMass = getCenterOfMassSeperate(0); fprintf(RunStatsFile, "\nThe center of mass = (%f, %f, %f) Kilometers from (0, 0, 0)\n", centerOfMass.x*lengthConvertion, centerOfMass.y*lengthConvertion, centerOfMass.z*lengthConvertion); linearVelocity = getLinearVelocitySeperate(0); fprintf(RunStatsFile, "\nThe average linear velocity = (%f, %f, %f)", linearVelocity.x*velocityConvertion, linearVelocity.y*velocityConvertion, linearVelocity.z*velocityConvertion); mag = vectorMagnitude(linearVelocity); fprintf(RunStatsFile, "\nThe magitude of the avergae linear velocity = %f Kilometers/second\n", mag*velocityConvertion); angularMomentum = getAngularMomentumSeperate(0, getCenterOfMassSeperate(0), getLinearVelocitySeperate(0)); fprintf(RunStatsFile, "\nThe angular momentum = (%e, %e, %e)", angularMomentum.x*AngularMomentumConvertion, angularMomentum.y*AngularMomentumConvertion, angularMomentum.z*AngularMomentumConvertion); mag = vectorMagnitude(angularMomentum); fprintf(RunStatsFile, "\nThe magitude of the angular momentum = %e Kilograms*kilometers*kilometers/second\n", mag*AngularMomentumConvertion); fprintf(RunStatsFile, "\n\n***** Stats for Body1 *****\n"); centerOfMass = getCenterOfMassSeperate(1); radiusOfBody = 0.0; massOfBody = 0.0; for(int i = 0; i < NFe1; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassFe; } for(int i = NFe1; i < NSi1; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassSi; } fprintf(RunStatsFile, "\nMass = %e Kilograms\n", massOfBody*massConvertion); fprintf(RunStatsFile, "\nRadius = %f Kilometers\n", radiusOfBody*lengthConvertion); fprintf(RunStatsFile, "\nThe center of mass = (%f, %f, %f) Kilometers from (0, 0, 0)\n", centerOfMass.x*lengthConvertion, centerOfMass.y*lengthConvertion, centerOfMass.z*lengthConvertion); linearVelocity = getLinearVelocitySeperate(1); fprintf(RunStatsFile, "\nThe average linear velocity = (%f, %f, %f)", linearVelocity.x*velocityConvertion, linearVelocity.y*velocityConvertion, linearVelocity.z*velocityConvertion); mag = vectorMagnitude(linearVelocity); fprintf(RunStatsFile, "\nThe magitude of the avergae linear velocity = %f Kilometers/second\n", mag*velocityConvertion); angularMomentum = getAngularMomentumSeperate(1, getCenterOfMassSeperate(1), getLinearVelocitySeperate(1)); fprintf(RunStatsFile, "\nThe angular momentum = (%e, %e, %e)", angularMomentum.x*AngularMomentumConvertion, angularMomentum.y*AngularMomentumConvertion, angularMomentum.z*AngularMomentumConvertion); mag = vectorMagnitude(angularMomentum); fprintf(RunStatsFile, "\nThe magitude of the angular momentum = %e Kilograms*kilometers*kilometers/second\n", mag*AngularMomentumConvertion); fprintf(RunStatsFile, "\n\n***** Stats for Body2 *****\n"); centerOfMass = getCenterOfMassSeperate(2); radiusOfBody = 0.0; massOfBody = 0.0; for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassSi; } fprintf(RunStatsFile, "\nMass = %e Kilograms\n", massOfBody*massConvertion); fprintf(RunStatsFile, "\nRadius = %f Kilometers\n", radiusOfBody*lengthConvertion); fprintf(RunStatsFile, "\nThe center of mass = (%f, %f, %f) Kilometers from (0, 0, 0)\n", centerOfMass.x*lengthConvertion, centerOfMass.y*lengthConvertion, centerOfMass.z*lengthConvertion); linearVelocity = getLinearVelocitySeperate(2); fprintf(RunStatsFile, "\nThe average linear velocity = (%f, %f, %f)", linearVelocity.x*velocityConvertion, linearVelocity.y*velocityConvertion, linearVelocity.z*velocityConvertion); mag = vectorMagnitude(linearVelocity); fprintf(RunStatsFile, "\nThe magitude of the avergae linear velocity = %f Kilometers/second\n", mag*velocityConvertion); angularMomentum = getAngularMomentumSeperate(2, getCenterOfMassSeperate(2), getLinearVelocitySeperate(2)); fprintf(RunStatsFile, "\nThe angular momentum = (%e, %e, %e)", angularMomentum.x*AngularMomentumConvertion, angularMomentum.y*AngularMomentumConvertion, angularMomentum.z*AngularMomentumConvertion); mag = vectorMagnitude(angularMomentum); fprintf(RunStatsFile, "\nThe magitude of the angular momentum = %e Kilograms*kilometers*kilometers/second\n", mag*AngularMomentumConvertion); } void recordStartPosVelOfCreatedBodiesSeperate() { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Pos1"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Vel"); fwrite(Pos, sizeof(float4), N, StartPosAndVelFile); fwrite(Vel, sizeof(float4), N, StartPosAndVelFile); } int findEarthAndMoon() { int groupId[N], used[N]; float mag, dx, dy, dz; float touch = Diameter*1.5; int groupNumber, numberOfGroups; int k; for(int i = 0; i < N; i++) { groupId[i] = -1; used[i] = 0; } groupNumber = 0; for(int i = 0; i < N; i++) { if(groupId[i] == -1) { groupId[i] = groupNumber; //find all from this group k = i; while(k < N) { if(groupId[k] == groupNumber && used[k] == 0) { for(int j = i; j < N; j++) { dx = Pos[k].x - Pos[j].x; dy = Pos[k].y - Pos[j].y; dz = Pos[k].z - Pos[j].z; mag = sqrt(dx*dx + dy*dy + dz*dz); if(mag < touch) { groupId[j] = groupNumber; } } used[k] = 1; k = i; } else k++; } } groupNumber++; } numberOfGroups = groupNumber; if(numberOfGroups == 1) { printf("\n No Moon found\n"); } int count; int *groupSize = (int *)malloc(numberOfGroups*sizeof(int)); for(int i = 0; i < numberOfGroups; i++) { count = 0; for(int j = 0; j < N; j++) { if(i == groupId[j]) count++; } groupSize[i] = count; } int earthGroupId = -1; NumberOfEarthElements = 0; for(int i = 0; i < numberOfGroups; i++) { if(groupSize[i] > NumberOfEarthElements) { NumberOfEarthElements = groupSize[i]; earthGroupId = i; } } int moonGroupId = -1; NumberOfMoonElements = 0; for(int i = 0; i < numberOfGroups; i++) { if(groupSize[i] > NumberOfMoonElements && i != earthGroupId) { NumberOfMoonElements = groupSize[i]; moonGroupId = i; } } free(groupSize); EarthIndex = (int *)malloc(NumberOfEarthElements*sizeof(int)); MoonIndex = (int *)malloc(NumberOfMoonElements*sizeof(int)); int earthCount = 0; int moonCount = 0; for(int j = 0; j < N; j++) { if(groupId[j] == earthGroupId) { EarthIndex[earthCount] = j; earthCount++; } else if(groupId[j] == moonGroupId) { MoonIndex[moonCount] = j; moonCount++; } } return(1); } float getMassCollision(int scope) { float mass = 0.0; if(scope == 0) // entire system { for(int i = 0; i < N; i++) { if(i < NFe) mass += MassFe; else mass += MassSi; } } else if(scope == 1) // earth-moon syatem { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) mass += MassFe; else mass += MassSi; } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) mass += MassFe; else mass += MassSi; } } else if(scope == 2) // earth { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) mass += MassFe; else mass += MassSi; } } else if(scope == 3) // moon { for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) mass += MassFe; else mass += MassSi; } } else { printf("\nTSU Error: In getMassCollision function bodyId invalid\n"); exit(0); } return(mass); } float3 getCenterOfMassCollision(int scope) { float totalMass; float3 centerOfMass; centerOfMass.x = 0.0; centerOfMass.y = 0.0; centerOfMass.z = 0.0; if(scope == 0) // Entire System { for(int i = 0; i < N; i++) { if(i < NFe) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } else { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } totalMass = getMassCollision(0); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else if(scope == 1) // Earth-Moon System { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { centerOfMass.x += Pos[EarthIndex[i]].x*MassFe; centerOfMass.y += Pos[EarthIndex[i]].y*MassFe; centerOfMass.z += Pos[EarthIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[EarthIndex[i]].x*MassSi; centerOfMass.y += Pos[EarthIndex[i]].y*MassSi; centerOfMass.z += Pos[EarthIndex[i]].z*MassSi; } } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { centerOfMass.x += Pos[MoonIndex[i]].x*MassFe; centerOfMass.y += Pos[MoonIndex[i]].y*MassFe; centerOfMass.z += Pos[MoonIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[MoonIndex[i]].x*MassSi; centerOfMass.y += Pos[MoonIndex[i]].y*MassSi; centerOfMass.z += Pos[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(1); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else if(scope == 2) // Earth { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { centerOfMass.x += Pos[EarthIndex[i]].x*MassFe; centerOfMass.y += Pos[EarthIndex[i]].y*MassFe; centerOfMass.z += Pos[EarthIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[EarthIndex[i]].x*MassSi; centerOfMass.y += Pos[EarthIndex[i]].y*MassSi; centerOfMass.z += Pos[EarthIndex[i]].z*MassSi; } } totalMass = getMassCollision(2); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else if(scope == 3) // Moon { for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { centerOfMass.x += Pos[MoonIndex[i]].x*MassFe; centerOfMass.y += Pos[MoonIndex[i]].y*MassFe; centerOfMass.z += Pos[MoonIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[MoonIndex[i]].x*MassSi; centerOfMass.y += Pos[MoonIndex[i]].y*MassSi; centerOfMass.z += Pos[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(3); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else { printf("\nTSU Error: In getCenterOfMassCollision function scope invalid\n"); exit(0); } return(centerOfMass); } float3 getLinearVelocityCollision(int scope) { float totalMass; float3 linearVelocity; linearVelocity.x = 0.0; linearVelocity.y = 0.0; linearVelocity.z = 0.0; if(scope == 0) // entire system { for(int i = 0; i < N; i++) { if(i < NFe) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } else { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } totalMass = getMassCollision(0); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else if(scope == 1) // earth-moon system { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { linearVelocity.x += Vel[EarthIndex[i]].x*MassFe; linearVelocity.y += Vel[EarthIndex[i]].y*MassFe; linearVelocity.z += Vel[EarthIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[EarthIndex[i]].x*MassSi; linearVelocity.y += Vel[EarthIndex[i]].y*MassSi; linearVelocity.z += Vel[EarthIndex[i]].z*MassSi; } } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { linearVelocity.x += Vel[MoonIndex[i]].x*MassFe; linearVelocity.y += Vel[MoonIndex[i]].y*MassFe; linearVelocity.z += Vel[MoonIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[MoonIndex[i]].x*MassSi; linearVelocity.y += Vel[MoonIndex[i]].y*MassSi; linearVelocity.z += Vel[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(1); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else if(scope == 2) //earth { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { linearVelocity.x += Vel[EarthIndex[i]].x*MassFe; linearVelocity.y += Vel[EarthIndex[i]].y*MassFe; linearVelocity.z += Vel[EarthIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[EarthIndex[i]].x*MassSi; linearVelocity.y += Vel[EarthIndex[i]].y*MassSi; linearVelocity.z += Vel[EarthIndex[i]].z*MassSi; } } totalMass = getMassCollision(2); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else if(scope == 3) //moon { for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { linearVelocity.x += Vel[MoonIndex[i]].x*MassFe; linearVelocity.y += Vel[MoonIndex[i]].y*MassFe; linearVelocity.z += Vel[MoonIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[MoonIndex[i]].x*MassSi; linearVelocity.y += Vel[MoonIndex[i]].y*MassSi; linearVelocity.z += Vel[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(3); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else { printf("\nTSU Error: in getlinearVelocityEarthMoonSystem function scope invalid\n"); exit(0); } return(linearVelocity); } float3 getAngularMomentumCollision(int scope) { float3 centerOfMass, linearVelocity, angularMomentum; float3 r; float3 v; angularMomentum.x = 0.0; angularMomentum.y = 0.0; angularMomentum.z = 0.0; if(scope == 0) //Entire system { centerOfMass = getCenterOfMassCollision(0); linearVelocity = getLinearVelocityCollision(0); for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; v.x = Vel[i].x - linearVelocity.x; v.y = Vel[i].y - linearVelocity.y; v.z = Vel[i].z - linearVelocity.z; if(i < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else if(scope == 1) //Earth-Moon system { centerOfMass = getCenterOfMassCollision(1); linearVelocity = getLinearVelocityCollision(1); for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMass.x; r.y = Pos[EarthIndex[i]].y - centerOfMass.y; r.z = Pos[EarthIndex[i]].z - centerOfMass.z; v.x = Vel[EarthIndex[i]].x - linearVelocity.x; v.y = Vel[EarthIndex[i]].y - linearVelocity.y; v.z = Vel[EarthIndex[i]].z - linearVelocity.z; if(EarthIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } for(int i = 0; i < NumberOfMoonElements; i++) { r.x = Pos[MoonIndex[i]].x - centerOfMass.x; r.y = Pos[MoonIndex[i]].y - centerOfMass.y; r.z = Pos[MoonIndex[i]].z - centerOfMass.z; v.x = Vel[MoonIndex[i]].x - linearVelocity.x; v.y = Vel[MoonIndex[i]].y - linearVelocity.y; v.z = Vel[MoonIndex[i]].z - linearVelocity.z; if(MoonIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else if(scope == 2) //Earth { centerOfMass = getCenterOfMassCollision(2); linearVelocity = getLinearVelocityCollision(2); for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMass.x; r.y = Pos[EarthIndex[i]].y - centerOfMass.y; r.z = Pos[EarthIndex[i]].z - centerOfMass.z; v.x = Vel[EarthIndex[i]].x - linearVelocity.x; v.y = Vel[EarthIndex[i]].y - linearVelocity.y; v.z = Vel[EarthIndex[i]].z - linearVelocity.z; if(EarthIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else if(scope == 3) //Moon { centerOfMass = getCenterOfMassCollision(3); linearVelocity = getLinearVelocityCollision(3); for(int i = 0; i < NumberOfMoonElements; i++) { r.x = Pos[MoonIndex[i]].x - centerOfMass.x; r.y = Pos[MoonIndex[i]].y - centerOfMass.y; r.z = Pos[MoonIndex[i]].z - centerOfMass.z; v.x = Vel[MoonIndex[i]].x - linearVelocity.x; v.y = Vel[MoonIndex[i]].y - linearVelocity.y; v.z = Vel[MoonIndex[i]].z - linearVelocity.z; if(MoonIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else { printf("\nTSU Error: in getAngularMomentumCollision function scope invalid\n"); exit(0); } return(angularMomentum); } void printContinueStatsToScreen(double time) { double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; //double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; float3 r, v; double d, mass, mag, size, angle, x, y, z; float massEarth; float3 centerOfMassEarth; float3 linearVelocityEarth; float3 centerOfMassEarthMoonMaterial; float3 averageVelocityEarthMoonMaterial; int earthMaterialFeCountBody1 = 0; int earthMaterialFeCountBody2 = 0; int earthMaterialSiCountBody1 = 0; int earthMaterialSiCountBody2 = 0; float earthMaterialMass = 0.0; int moonMaterialFeCountBody1 = 0; int moonMaterialFeCountBody2 = 0; int moonMaterialSiCountBody1 = 0; int moonMaterialSiCountBody2 = 0; float moonMaterialMass = 0.0; int escapeMaterialFeCountBody1 = 0; int escapeMaterialFeCountBody2 = 0; int escapeMaterialSiCountBody1 = 0; int escapeMaterialSiCountBody2 = 0; float escapeMaterialMass = 0.0; int unusedMaterialFeCountBody1 = 0; int unusedMaterialFeCountBody2 = 0; int unusedMaterialSiCountBody1 = 0; int unusedMaterialSiCountBody2 = 0; float unusedMaterialMass = 0.0; float3 angularMomentumHolder; float3 angularMomentumEarthMoonMaterial; float3 angularMomentumEarthMaterial; float3 angularMomentumMoonMaterial; //Finding radius of what the current Earth is findEarthAndMoon(); centerOfMassEarth = getCenterOfMassCollision(2); massEarth = getMassCollision(2); float radiusOfEarth = 0.0; for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMassEarth.x; r.y = Pos[EarthIndex[i]].y - centerOfMassEarth.y; r.z = Pos[EarthIndex[i]].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfEarth) radiusOfEarth = d; } // Finding Roche limit and setting sphere to create Earth and sphere to create the Moon float densityEarth = massEarth/((Pi*4.0/3.0)*radiusOfEarth*radiusOfEarth*radiusOfEarth); float densitySi = MassSi/((Pi*4.0/3.0)*(Diameter/2.0)*(Diameter/2.0)*(Diameter/2.0)); float rocheLimit = 2.44*radiusOfEarth*pow((densityEarth/densitySi),1.0/3.0); float radiusEarthMaterial = rocheLimit; float radiusMoonMaterial = NUMBEROFEARTHRADIFORMOONMATERIAL*radiusOfEarth; // Finding mass of Earth material, Moon Material // Finding the center of mass and average velocity of the material we estimating will make the Earth-Moon system // Finding Moon mix and Earth mix earthMaterialMass = 0.0; moonMaterialMass = 0.0; centerOfMassEarthMoonMaterial.x = 0.0; centerOfMassEarthMoonMaterial.y = 0.0; centerOfMassEarthMoonMaterial.z = 0.0; averageVelocityEarthMoonMaterial.x = 0.0; averageVelocityEarthMoonMaterial.y = 0.0; averageVelocityEarthMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusEarthMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; earthMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) earthMaterialFeCountBody1++; else if(i < NFe1 + NFe2) earthMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) earthMaterialSiCountBody1++; else earthMaterialSiCountBody2++; } else if(d < radiusMoonMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; moonMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) moonMaterialFeCountBody1++; else if(i < NFe1 + NFe2) moonMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) moonMaterialSiCountBody1++; else moonMaterialSiCountBody2++; } } centerOfMassEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); // Getting a rough estimate of how much of the extra material has escape velocity from what we // considering will make the Earth-Moon system float velocity; float escapeVelocity; escapeMaterialMass = 0.0; unusedMaterialMass = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(radiusMoonMaterial <= d) { r.x = Pos[i].x - centerOfMassEarthMoonMaterial.x; r.y = Pos[i].y - centerOfMassEarthMoonMaterial.y; r.z = Pos[i].z - centerOfMassEarthMoonMaterial.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); v.x = Vel[i].x - averageVelocityEarthMoonMaterial.x; v.y = Vel[i].y - averageVelocityEarthMoonMaterial.y; v.z = Vel[i].z - averageVelocityEarthMoonMaterial.z; velocity = sqrt(v.x*v.x + v.y*v.y + v.z*v.z); escapeVelocity = sqrt(2.0*Gravity*(earthMaterialMass + moonMaterialMass)/d); if(velocity >= escapeVelocity) { if(i < NFe) mass = MassFe; else mass = MassSi; escapeMaterialMass += mass; if(i < NFe1) escapeMaterialFeCountBody1++; else if(i < NFe1 + NFe2) escapeMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) escapeMaterialSiCountBody1++; else escapeMaterialSiCountBody2++; } else { if(i < NFe) mass = MassFe; else mass = MassSi; unusedMaterialMass += mass; if(i < NFe1) unusedMaterialFeCountBody1++; else if(i < NFe1 + NFe2) unusedMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) unusedMaterialSiCountBody1++; else unusedMaterialSiCountBody2++; } } } // Finding the angular momentum of the Earth-Moon material // Finding the angular momentum of the Earth material // Finding the angular momentum of the Moon material linearVelocityEarth = getLinearVelocityCollision(2); angularMomentumEarthMoonMaterial.x = 0.0; angularMomentumEarthMoonMaterial.y = 0.0; angularMomentumEarthMoonMaterial.z = 0.0; angularMomentumEarthMaterial.x = 0.0; angularMomentumEarthMaterial.y = 0.0; angularMomentumEarthMaterial.z = 0.0; angularMomentumMoonMaterial.x = 0.0; angularMomentumMoonMaterial.y = 0.0; angularMomentumMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusMoonMaterial) { v.x = Vel[i].x - linearVelocityEarth.x; v.y = Vel[i].y - linearVelocityEarth.y; v.z = Vel[i].z - linearVelocityEarth.z; if(i < NFe) { angularMomentumHolder.x = (r.y*v.z - r.z*v.y)*MassFe; angularMomentumHolder.y = -(r.x*v.z - r.z*v.x)*MassFe; angularMomentumHolder.z = (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentumHolder.x = (r.y*v.z - r.z*v.y)*MassSi; angularMomentumHolder.y = -(r.x*v.z - r.z*v.x)*MassSi; angularMomentumHolder.z = (r.x*v.y - r.y*v.x)*MassSi; } angularMomentumEarthMoonMaterial.x += angularMomentumHolder.x; angularMomentumEarthMoonMaterial.y += angularMomentumHolder.y; angularMomentumEarthMoonMaterial.z += angularMomentumHolder.z; if(d < radiusEarthMaterial) { angularMomentumEarthMaterial.x += angularMomentumHolder.x; angularMomentumEarthMaterial.y += angularMomentumHolder.y; angularMomentumEarthMaterial.z += angularMomentumHolder.z; } else { angularMomentumMoonMaterial.x += angularMomentumHolder.x; angularMomentumMoonMaterial.y += angularMomentumHolder.y; angularMomentumMoonMaterial.z += angularMomentumHolder.z; } } } printf("\n\n\n*************************************************************************\n"); printf("\nThe following are the three stats to feed to the search program\n"); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); printf("\nAngular momentum of the Earth-Moon system = %e", mag); printf("\nRatio Earth mass to Moon mass = %f", earthMaterialMass/moonMaterialMass); printf("\nMoon compotition ratio = %f", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); printf("\n\n\n*************************************************************************\n"); printf("\nThe following are all the continuation stats of the run when time = %f hours\n", time*timeConverter/3600.0); printf("\nDistance is measured in Kilometers"); printf("\nMass is measured in Kilograms"); printf("\nTime is measured in seconds"); printf("\nVelocity is measured in Kilometers/second"); printf("\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); printf("\nThe radius of Earth = %f", radiusOfEarth*lengthConverter); printf("\nRoche limit = %f", rocheLimit*lengthConverter); printf("\nRoche limit/radius of Earth = %f \n", rocheLimit/radiusOfEarth); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; printf("\nAngular momentum of the Earth-Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Earth-Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Earth-Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarthMaterial.x*momentumConverter; y = angularMomentumEarthMaterial.y*momentumConverter; z = angularMomentumEarthMaterial.z*momentumConverter; printf("\nAngular momentum of the Earth material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Earth material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Earth's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoonMaterial.x*momentumConverter; y = angularMomentumMoonMaterial.y*momentumConverter; z = angularMomentumMoonMaterial.z*momentumConverter; printf("\nAngular momentum of the Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); printf("\nThe mass of Earth material = %e", earthMaterialMass*massConverter); printf("\nThe Earth material count Fe body 1 = %d", earthMaterialFeCountBody1); printf("\nThe Earth material count Fe body 2 = %d", earthMaterialFeCountBody2); printf("\nThe Earth material count Si body 1 = %d", earthMaterialSiCountBody1); printf("\nThe Earth material count Si body 2 = %d", earthMaterialSiCountBody2); printf("\nThe Earth material Body1/Body2 ratio = %f\n", (float)(earthMaterialFeCountBody1 + earthMaterialSiCountBody1)/(float)(earthMaterialFeCountBody2 + earthMaterialSiCountBody2)); printf("\nThe mass of Moon material = %e", moonMaterialMass*massConverter); printf("\nThe Moon material count Fe body 1 = %d", moonMaterialFeCountBody1); printf("\nThe Moon material count Fe body 2 = %d", moonMaterialFeCountBody2); printf("\nThe Moon material count Si body 1 = %d", moonMaterialSiCountBody1); printf("\nThe Moon material count Si body 2 = %d", moonMaterialSiCountBody2); printf("\nThe Moon material Body1/Body2 ratio = %f\n", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); printf("\nThe mass of escape material = %e", escapeMaterialMass*massConverter); printf("\nThe escape material count Fe body 1 = %d", escapeMaterialFeCountBody1); printf("\nThe escape material count Fe body 2 = %d", escapeMaterialFeCountBody2); printf("\nThe escape material count Si body 1 = %d", escapeMaterialSiCountBody1); printf("\nThe escape material count Si body 2 = %d", escapeMaterialSiCountBody2); printf("\nThe escape material Body1/Body2 ratio = %f\n", (float)(escapeMaterialFeCountBody1 + escapeMaterialSiCountBody1)/(float)(escapeMaterialFeCountBody2 + escapeMaterialSiCountBody2)); printf("\nThe mass of unused material = %e", unusedMaterialMass*massConverter); printf("\nThe unused material count Fe body 1 = %d", unusedMaterialFeCountBody1); printf("\nThe unused material count Fe body 2 = %d", unusedMaterialFeCountBody2); printf("\nThe unused material count Si body 1 = %d", unusedMaterialSiCountBody1); printf("\nThe unused material count Si body 2 = %d", unusedMaterialSiCountBody2); printf("\nThe unused material Body1/Body2 ratio = %f\n", (float)(unusedMaterialFeCountBody1 + unusedMaterialSiCountBody1)/(float)(unusedMaterialFeCountBody2 + unusedMaterialSiCountBody2)); printf("\n*************************************************************************\n\n\n"); } void printContinueStatsToFile(double time) { double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; //double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; float3 r, v; double d, mass, mag, size, angle, x, y, z; float massEarth; float3 centerOfMassEarth; float3 linearVelocityEarth; float3 centerOfMassEarthMoonMaterial; float3 averageVelocityEarthMoonMaterial; int earthMaterialFeCountBody1 = 0; int earthMaterialFeCountBody2 = 0; int earthMaterialSiCountBody1 = 0; int earthMaterialSiCountBody2 = 0; float earthMaterialMass = 0.0; int moonMaterialFeCountBody1 = 0; int moonMaterialFeCountBody2 = 0; int moonMaterialSiCountBody1 = 0; int moonMaterialSiCountBody2 = 0; float moonMaterialMass = 0.0; int escapeMaterialFeCountBody1 = 0; int escapeMaterialFeCountBody2 = 0; int escapeMaterialSiCountBody1 = 0; int escapeMaterialSiCountBody2 = 0; float escapeMaterialMass = 0.0; int unusedMaterialFeCountBody1 = 0; int unusedMaterialFeCountBody2 = 0; int unusedMaterialSiCountBody1 = 0; int unusedMaterialSiCountBody2 = 0; float unusedMaterialMass = 0.0; float3 angularMomentumEarthMoonMaterial; float3 angularMomentumEarthMaterial; float3 angularMomentumMoonMaterial; //Finding radius of what the current Earth is findEarthAndMoon(); centerOfMassEarth = getCenterOfMassCollision(2); massEarth = getMassCollision(2); float radiusOfEarth = 0.0; for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMassEarth.x; r.y = Pos[EarthIndex[i]].y - centerOfMassEarth.y; r.z = Pos[EarthIndex[i]].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfEarth) radiusOfEarth = d; } // Finding Roche limit and setting sphere to create Earth and sphere to create the Moon float densityEarth = massEarth/((Pi*4.0/3.0)*radiusOfEarth*radiusOfEarth*radiusOfEarth); float densitySi = MassSi/((Pi*4.0/3.0)*(Diameter/2.0)*(Diameter/2.0)*(Diameter/2.0)); float rocheLimit = 2.44*radiusOfEarth*pow((densityEarth/densitySi),1.0/3.0); float radiusEarthMaterial = rocheLimit; float radiusMoonMaterial = NUMBEROFEARTHRADIFORMOONMATERIAL*radiusOfEarth; // Finding mass of Earth material, Moon Material // Finding the center of mass and average velocity of the material we estimating will make the Earth-Moon system // Finding Moon mix and Earth mix earthMaterialMass = 0.0; moonMaterialMass = 0.0; centerOfMassEarthMoonMaterial.x = 0.0; centerOfMassEarthMoonMaterial.y = 0.0; centerOfMassEarthMoonMaterial.z = 0.0; averageVelocityEarthMoonMaterial.x = 0.0; averageVelocityEarthMoonMaterial.y = 0.0; averageVelocityEarthMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusEarthMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; earthMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) earthMaterialFeCountBody1++; else if(i < NFe1 + NFe2) earthMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) earthMaterialSiCountBody1++; else earthMaterialSiCountBody2++; } else if(d < radiusMoonMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; moonMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) moonMaterialFeCountBody1++; else if(i < NFe1 + NFe2) moonMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) moonMaterialSiCountBody1++; else moonMaterialSiCountBody2++; } } centerOfMassEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); // Getting a rough estimate of how much of the extra material has escape velocity from what we // considering will make the Earth-Moon system float velocity; float escapeVelocity; escapeMaterialMass = 0.0; unusedMaterialMass = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(radiusMoonMaterial <= d) { r.x = Pos[i].x - centerOfMassEarthMoonMaterial.x; r.y = Pos[i].y - centerOfMassEarthMoonMaterial.y; r.z = Pos[i].z - centerOfMassEarthMoonMaterial.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); v.x = Vel[i].x - averageVelocityEarthMoonMaterial.x; v.y = Vel[i].y - averageVelocityEarthMoonMaterial.y; v.z = Vel[i].z - averageVelocityEarthMoonMaterial.z; velocity = sqrt(v.x*v.x + v.y*v.y + v.z*v.z); escapeVelocity = sqrt(2.0*Gravity*(earthMaterialMass + moonMaterialMass)/d); if(velocity >= escapeVelocity) { if(i < NFe) mass = MassFe; else mass = MassSi; escapeMaterialMass += mass; if(i < NFe1) escapeMaterialFeCountBody1++; else if(i < NFe1 + NFe2) escapeMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) escapeMaterialSiCountBody1++; else escapeMaterialSiCountBody2++; } else { if(i < NFe) mass = MassFe; else mass = MassSi; unusedMaterialMass += mass; if(i < NFe1) unusedMaterialFeCountBody1++; else if(i < NFe1 + NFe2) unusedMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) unusedMaterialSiCountBody1++; else unusedMaterialSiCountBody2++; } } } // Finding the angular momentum of the Earth-Moon material // Finding the angular momentum of the Earth material // Finding the angular momentum of the Moon material linearVelocityEarth = getLinearVelocityCollision(2); angularMomentumEarthMoonMaterial.x = 0.0; angularMomentumEarthMoonMaterial.y = 0.0; angularMomentumEarthMoonMaterial.z = 0.0; angularMomentumEarthMaterial.x = 0.0; angularMomentumEarthMaterial.y = 0.0; angularMomentumEarthMaterial.z = 0.0; angularMomentumMoonMaterial.x = 0.0; angularMomentumMoonMaterial.y = 0.0; angularMomentumMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusMoonMaterial) { v.x = Vel[i].x - linearVelocityEarth.x; v.y = Vel[i].y - linearVelocityEarth.y; v.z = Vel[i].z - linearVelocityEarth.z; if(i < NFe) { angularMomentumEarthMoonMaterial.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentumEarthMoonMaterial.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentumEarthMoonMaterial.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentumEarthMoonMaterial.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentumEarthMoonMaterial.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentumEarthMoonMaterial.z += (r.x*v.y - r.y*v.x)*MassSi; } if(radiusEarthMaterial < d) { angularMomentumEarthMaterial.x += angularMomentumEarthMoonMaterial.x; angularMomentumEarthMaterial.y += angularMomentumEarthMoonMaterial.y; angularMomentumEarthMaterial.z += angularMomentumEarthMoonMaterial.z; } else { angularMomentumMoonMaterial.x += angularMomentumEarthMoonMaterial.x; angularMomentumMoonMaterial.y += angularMomentumEarthMoonMaterial.y; angularMomentumMoonMaterial.z += angularMomentumEarthMoonMaterial.z; } } } fprintf(ContinueRunStatsFile, "\n\n\n*************************************************************************\n"); fprintf(ContinueRunStatsFile, "\nThe following are the three stats to feed to the search program\n"); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nAngular momentum of the Earth-Moon system = %e", mag); fprintf(ContinueRunStatsFile, "\nRatio Earth mass to Moon mass = %f", earthMaterialMass/moonMaterialMass); fprintf(ContinueRunStatsFile, "\nMoon compotition ratio = %f", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\n\n\n*************************************************************************\n"); fprintf(ContinueRunStatsFile, "\nThe following are all the continuation stats of the run when time = %f hours\n", time*timeConverter/3600.0); fprintf(ContinueRunStatsFile, "\nDistance is measured in Kilometers"); fprintf(ContinueRunStatsFile, "\nMass is measured in Kilograms"); fprintf(ContinueRunStatsFile, "\nTime is measured in seconds"); fprintf(ContinueRunStatsFile, "\nVelocity is measured in Kilometers/second"); fprintf(ContinueRunStatsFile, "\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); fprintf(ContinueRunStatsFile, "\nThe radius of Earth = %f", radiusOfEarth*lengthConverter); fprintf(ContinueRunStatsFile, "\nRoche limit = %f", rocheLimit*lengthConverter); fprintf(ContinueRunStatsFile, "\nRoche limit/radius of Earth = %f \n", rocheLimit/radiusOfEarth); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; fprintf(ContinueRunStatsFile, "\nAngular momentum of the Earth-Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nMagnitude of the angular momentum of the Earth-Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(ContinueRunStatsFile, "\nAngle off ecliptic plane of the Earth-Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarthMaterial.x*momentumConverter; y = angularMomentumEarthMaterial.y*momentumConverter; z = angularMomentumEarthMaterial.z*momentumConverter; fprintf(ContinueRunStatsFile, "\nAngular momentum of the Earth material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nMagnitude of the angular momentum of the Earth material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(ContinueRunStatsFile, "\nAngle off ecliptic plane of the Earth's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoonMaterial.x*momentumConverter; y = angularMomentumMoonMaterial.y*momentumConverter; z = angularMomentumMoonMaterial.z*momentumConverter; fprintf(ContinueRunStatsFile, "\nAngular momentum of the Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nMagnitude of the angular momentum of the Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(ContinueRunStatsFile, "\nAngle off ecliptic plane of the Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); fprintf(ContinueRunStatsFile, "\nThe mass of Earth material = %e", earthMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe Earth material count Fe body 1 = %d", earthMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe Earth material count Fe body 2 = %d", earthMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe Earth material count Si body 1 = %d", earthMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe Earth material count Si body 2 = %d", earthMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe Earth material Body1/Body2 ratio = %f\n", (float)(earthMaterialFeCountBody1 + earthMaterialSiCountBody1)/(float)(earthMaterialFeCountBody2 + earthMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\nThe mass of Moon material = %e", moonMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe Moon material count Fe body 1 = %d", moonMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe Moon material count Fe body 2 = %d", moonMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe Moon material count Si body 1 = %d", moonMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe Moon material count Si body 2 = %d", moonMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe Moon material Body1/Body2 ratio = %f\n", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\nThe mass of escape material = %e", escapeMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe escape material count Fe body 1 = %d", escapeMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe escape material count Fe body 2 = %d", escapeMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe escape material count Si body 1 = %d", escapeMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe escape material count Si body 2 = %d", escapeMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe escape material Body1/Body2 ratio = %f\n", (float)(escapeMaterialFeCountBody1 + escapeMaterialSiCountBody1)/(float)(escapeMaterialFeCountBody2 + escapeMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\nThe mass of unused material = %e", unusedMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe unused material count Fe body 1 = %d", unusedMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe unused material count Fe body 2 = %d", unusedMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe unused material count Si body 1 = %d", unusedMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe unused material count Si body 2 = %d", unusedMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe unused material Body1/Body2 ratio = %f\n", (float)(unusedMaterialFeCountBody1 + unusedMaterialSiCountBody1)/(float)(unusedMaterialFeCountBody2 + unusedMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\n*************************************************************************\n\n\n"); } void printCollisionStatsToScreen(double time) { double mag, size, angle, x, y, z; double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; findEarthAndMoon(); int earthFeCountBody1 = 0; int earthFeCountBody2 = 0; int earthSiCountBody1 = 0; int earthSiCountBody2 = 0; int moonFeCountBody1 = 0; int moonFeCountBody2 = 0; int moonSiCountBody1 = 0; int moonSiCountBody2 = 0; float massUniversalSystem = getMassCollision(0); float massEarthMoonSystem = getMassCollision(1); float massEarth = getMassCollision(2); float massMoon = getMassCollision(3); float3 centerOfMassUniversalSystem = getCenterOfMassCollision(0); float3 centerOfMassEarthMoonSystem = getCenterOfMassCollision(1); float3 centerOfMassEarth = getCenterOfMassCollision(2); float3 centerOfMassMoon = getCenterOfMassCollision(3); float3 linearVelocityUniversalSystem = getLinearVelocityCollision(0); float3 linearVelocityEarthMoonSystem = getLinearVelocityCollision(1); float3 linearVelocityEarth = getLinearVelocityCollision(2); float3 linearVelocityMoon = getLinearVelocityCollision(3); float3 angularMomentumUniversalSystem = getAngularMomentumCollision(0); float3 angularMomentumEarthMoonSystem = getAngularMomentumCollision(1); float3 angularMomentumEarth = getAngularMomentumCollision(2); float3 angularMomentumMoon = getAngularMomentumCollision(3); for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe1) earthFeCountBody1++; else if(EarthIndex[i] < NFe1 + NFe2) earthFeCountBody2++; else if(EarthIndex[i] < NFe1 + NFe2 + NSi1) earthSiCountBody1++; else earthSiCountBody2++; } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe1) moonFeCountBody1++; else if(MoonIndex[i] < NFe1 + NFe2) moonFeCountBody2++; else if(MoonIndex[i] < NFe1 + NFe2 + NSi1) moonSiCountBody1++; else moonSiCountBody2++; } printf("\n\n\n*************************************************************************\n\n\n"); printf("\nThe following are the stats of the run when time = %f hours\n", time*timeConverter/3600.0); printf("\nDistance is measured in Kilometers"); printf("\nMass is measured in Kilograms"); printf("\nTime is measured in seconds"); printf("\nVelocity is measured in Kilometers/second"); printf("\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); printf("\nThe mass of Earth = %e", massEarth*massConverter); printf("\nThe mass of Moon = %e", massMoon*massConverter); if(massMoon != 0.0) printf("\nThe mass ratio Earth/Moon = %f\n", massEarth/massMoon); printf("\nMoon iron from body 1 = %d", moonFeCountBody1); printf("\nMoon silicate from body 1 = %d", moonSiCountBody1); printf("\nMoon iron from body 2 = %d", moonFeCountBody2); printf("\nMoon silicate from body 2 = %d", moonSiCountBody2); if((moonFeCountBody2 + moonSiCountBody2) == 0) { printf("\nThe Moon is only composed of elements from body 1\n"); } else if((moonFeCountBody1 + moonSiCountBody1) == 0) { printf("\nThe Moon is only composed of elements from body 2\n"); } else { printf("\nMoon ratio body1/body2 = %f\n", (float)(moonFeCountBody1 + moonSiCountBody1)/(float)(moonFeCountBody2 + moonSiCountBody2)); } printf("\nEarth iron from body 1 = %d", earthFeCountBody1); printf("\nEarth silicate from body 1 = %d", earthSiCountBody1); printf("\nEarth iron from body 2 = %d", earthFeCountBody2); printf("\nEarth silicate from body 2 = %d", earthSiCountBody2); if((earthFeCountBody2 + earthSiCountBody2) == 0) { printf("\nThe Earth is only composed of elements from body 1\n"); } else if((earthFeCountBody1 + earthSiCountBody1) == 0) { printf("\nThe Earth is only composed of elements from body 2\n"); } else { printf("\nEarth ratio body1/body2 = %f\n", (float)(earthFeCountBody1 + earthSiCountBody1)/(float)(earthFeCountBody2 + earthSiCountBody2)); } //It is always assumed that the ecliptic plane is the xz-plane. x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; printf("\nAngular momentum of the Earth Moon system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the system = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the system's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; printf("\nAngular momentum of the Earth = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Earth = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Earth's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoon.x*momentumConverter; y = angularMomentumMoon.y*momentumConverter; z = angularMomentumMoon.z*momentumConverter; printf("\nAngular momentum of the Moon = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Moon = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Moon's rotation = %f\n", 90.0 - angle*180.0/Pi); x = centerOfMassEarthMoonSystem.x*lengthConverter; y = centerOfMassEarthMoonSystem.y*lengthConverter; z = centerOfMassEarthMoonSystem.z*lengthConverter; printf("\nCenter of mass of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = centerOfMassEarth.x*lengthConverter; y = centerOfMassEarth.y*lengthConverter; z = centerOfMassEarth.z*lengthConverter; printf("\nCenter of mass of the Earth system = (%f, %f, %f)", x, y, z); x = centerOfMassMoon.x*lengthConverter; y = centerOfMassMoon.y*lengthConverter; z = centerOfMassMoon.z*lengthConverter; printf("\nCenter of mass of the Moon system = (%f, %f, %f)\n", x, y, z); x = linearVelocityEarthMoonSystem.x*velocityConverter; y = linearVelocityEarthMoonSystem.y*velocityConverter; z = linearVelocityEarthMoonSystem.z*velocityConverter; printf("\nLinear Velocity of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = linearVelocityEarth.x*velocityConverter; y = linearVelocityEarth.y*velocityConverter; z = linearVelocityEarth.z*velocityConverter; printf("\nLinear Velocity of the Earth system = (%f, %f, %f)", x, y, z); x = linearVelocityMoon.x*velocityConverter; y = linearVelocityMoon.y*velocityConverter; z = linearVelocityMoon.z*velocityConverter; printf("\nLinear Velocity of the Moon system = (%f, %f, %f)\n", x, y, z); printf("\n*****Stats of the entire system to check the numerical scheme's validity*****\n"); x = centerOfMassUniversalSystem.x*lengthConverter; y = centerOfMassUniversalSystem.y*lengthConverter; z = centerOfMassUniversalSystem.z*lengthConverter; printf("\nCenter of mass of the entire system = (%f, %f, %f)\n", x, y, z); x = linearVelocityUniversalSystem.x*velocityConverter; y = linearVelocityUniversalSystem.y*velocityConverter; z = linearVelocityUniversalSystem.z*velocityConverter; printf("\nLinear velocity of the entire system system = (%f, %f, %f)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the linear velocity of the entire system = %f\n", mag); x = angularMomentumUniversalSystem.x*momentumConverter; y = angularMomentumUniversalSystem.y*momentumConverter; z = angularMomentumUniversalSystem.z*momentumConverter; printf("\nAngular momentum of the entire system system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the entire system = %e\n", mag); printf("\n*************************************************************************\n"); printf("\n******************* Just the good stuff *********************************\n"); printf("\n percent off correct Earth mass = %f ", 100.0*(massEarth*massConverter/(MassOfEarth))); printf("\n percent off correct Moon mass = %f ", 100.0*(massMoon*massConverter/(MassOfMoon))); printf("\n\n Earth mass percent iron = %f mass percent silicate = %f", float(earthFeCountBody1*MassFe + earthFeCountBody2*MassFe)/massEarth, float(earthSiCountBody1*MassSi + earthSiCountBody2*MassSi)/massEarth); printf("\n Moon mass percent iron = %f mass percent silicate = %f", float(moonFeCountBody1*MassFe + moonFeCountBody2*MassFe)/massMoon, float(moonSiCountBody1*MassSi + moonSiCountBody2*MassSi)/massMoon); if((moonFeCountBody2 + moonSiCountBody2) != 0) { printf("\n\n Moon body1/body2 ratio = %f ", float(moonFeCountBody1*MassFe + moonSiCountBody1*MassSi)/float(moonFeCountBody2*MassFe + moonSiCountBody2*MassSi)); } x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); printf("\n Percent off correct angular momentum of the Earth-Moon System = %f ", 100.0*(1.0 - mag/AngularMomentumEarthMoonSystem)); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; mag = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/mag); printf("\n Percent off correct axial tilt of the Earth = %f ", 100.0*(1.0 - angle/EarthAxialTilt)); printf("\n\n*************************************************************************\n\n\n"); } void recordFinalCollisionStat(double time) { double mag, size, angle, x, y, z; double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; findEarthAndMoon(); int earthFeCountBody1 = 0; int earthFeCountBody2 = 0; int earthSiCountBody1 = 0; int earthSiCountBody2 = 0; int moonFeCountBody1 = 0; int moonFeCountBody2 = 0; int moonSiCountBody1 = 0; int moonSiCountBody2 = 0; float massUniversalSystem = getMassCollision(0); float massEarthMoonSystem = getMassCollision(1); float massEarth = getMassCollision(2); float massMoon = getMassCollision(3); float3 centerOfMassUniversalSystem = getCenterOfMassCollision(0); float3 centerOfMassEarthMoonSystem = getCenterOfMassCollision(1); float3 centerOfMassEarth = getCenterOfMassCollision(2); float3 centerOfMassMoon = getCenterOfMassCollision(3); float3 linearVelocityUniversalSystem = getLinearVelocityCollision(0); float3 linearVelocityEarthMoonSystem = getLinearVelocityCollision(1); float3 linearVelocityEarth = getLinearVelocityCollision(2); float3 linearVelocityMoon = getLinearVelocityCollision(3); float3 angularMomentumUniversalSystem = getAngularMomentumCollision(0); float3 angularMomentumEarthMoonSystem = getAngularMomentumCollision(1); float3 angularMomentumEarth = getAngularMomentumCollision(2); float3 angularMomentumMoon = getAngularMomentumCollision(3); for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe1) earthFeCountBody1++; else if(EarthIndex[i] < NFe1 + NFe2) earthFeCountBody2++; else if(EarthIndex[i] < NFe1 + NFe2 + NSi1) earthSiCountBody1++; else earthSiCountBody2++; } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe1) moonFeCountBody1++; else if(MoonIndex[i] < NFe1 + NFe2) moonFeCountBody2++; else if(MoonIndex[i] < NFe1 + NFe2 + NSi1) moonSiCountBody1++; else moonSiCountBody2++; } fprintf(RunStatsFile,"\n\n\n*************************************************************************\n\n"); fprintf(RunStatsFile,"\nThe following are the final stats of the run when time = %f hours\n", time*timeConverter/3600.0); fprintf(RunStatsFile,"\nDistance is measured in Kilometers"); fprintf(RunStatsFile,"\nMass is measured in Kilograms"); fprintf(RunStatsFile,"\nTime is measured in seconds"); fprintf(RunStatsFile,"\nVelocity is measured in Kilometers/second"); fprintf(RunStatsFile,"\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); fprintf(RunStatsFile,"\nThe mass of Earth = %e", massEarth*massConverter); fprintf(RunStatsFile,"\nThe mass of Moon = %e", massMoon*massConverter); if(massMoon != 0.0) fprintf(RunStatsFile,"\nThe mass ratio Earth/Moon = %f\n", massEarth/massMoon); fprintf(RunStatsFile,"\nMoon iron from body 1 = %d", moonFeCountBody1); fprintf(RunStatsFile,"\nMoon silicate from body 1 = %d", moonSiCountBody1); fprintf(RunStatsFile,"\nMoon iron from body 2 = %d", moonFeCountBody2); fprintf(RunStatsFile,"\nMoon silicate from body 2 = %d", moonSiCountBody2); if((moonFeCountBody2 + moonSiCountBody2) == 0) { fprintf(RunStatsFile,"\nThe Moon is only composed of elements from body 1\n"); } else if((moonFeCountBody1 + moonSiCountBody1) == 0) { fprintf(RunStatsFile,"\nThe Moon is only composed of elements from body 2\n"); } else { fprintf(RunStatsFile,"\nMoon ratio body1/body2 = %f\n", (float)(moonFeCountBody1 + moonSiCountBody1)/(float)(moonFeCountBody2 + moonSiCountBody2)); } fprintf(RunStatsFile,"\nEarth iron from body 1 = %d", earthFeCountBody1); fprintf(RunStatsFile,"\nEarth silicate from body 1 = %d", earthSiCountBody1); fprintf(RunStatsFile,"\nEarth iron from body 2 = %d", earthFeCountBody2); fprintf(RunStatsFile,"\nEarth silicate from body 2 = %d", earthSiCountBody2); if((earthFeCountBody2 + earthSiCountBody2) == 0) { fprintf(RunStatsFile,"\nThe Earth is only composed of elements from body 1\n"); } else if((earthFeCountBody1 + earthSiCountBody1) == 0) { fprintf(RunStatsFile,"\nThe Earth is only composed of elements from body 2\n"); } else { fprintf(RunStatsFile,"\nEarth ratio body1/body2 = %f\n", (float)(earthFeCountBody1 + earthSiCountBody1)/(float)(earthFeCountBody2 + earthSiCountBody2)); } //It is always assumed that the ecliptic plane is the xz-plane. x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the Earth Moon system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the system = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(RunStatsFile,"\nAngle off ecliptic plane of the system's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the Earth = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the Earth = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(RunStatsFile,"\nAngle off ecliptic plane of the Earth's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoon.x*momentumConverter; y = angularMomentumMoon.y*momentumConverter; z = angularMomentumMoon.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the Moon = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the Moon = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(RunStatsFile,"\nAngle off ecliptic plane of the Moon's rotation = %f\n", 90.0 - angle*180.0/Pi); x = centerOfMassEarthMoonSystem.x*lengthConverter; y = centerOfMassEarthMoonSystem.y*lengthConverter; z = centerOfMassEarthMoonSystem.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = centerOfMassEarth.x*lengthConverter; y = centerOfMassEarth.y*lengthConverter; z = centerOfMassEarth.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the Earth system = (%f, %f, %f)", x, y, z); x = centerOfMassMoon.x*lengthConverter; y = centerOfMassMoon.y*lengthConverter; z = centerOfMassMoon.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the Moon system = (%f, %f, %f)\n", x, y, z); x = linearVelocityEarthMoonSystem.x*velocityConverter; y = linearVelocityEarthMoonSystem.y*velocityConverter; z = linearVelocityEarthMoonSystem.z*velocityConverter; fprintf(RunStatsFile,"\nLinear Velocity of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = linearVelocityEarth.x*velocityConverter; y = linearVelocityEarth.y*velocityConverter; z = linearVelocityEarth.z*velocityConverter; fprintf(RunStatsFile,"\nLinear Velocity of the Earth system = (%f, %f, %f)", x, y, z); x = linearVelocityMoon.x*velocityConverter; y = linearVelocityMoon.y*velocityConverter; z = linearVelocityMoon.z*velocityConverter; fprintf(RunStatsFile,"\nLinear Velocity of the Moon system = (%f, %f, %f)\n", x, y, z); fprintf(RunStatsFile,"\n*****Stats of the entire system to check the numerical scheme's validity*****\n"); x = centerOfMassUniversalSystem.x*lengthConverter; y = centerOfMassUniversalSystem.y*lengthConverter; z = centerOfMassUniversalSystem.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the entire system = (%f, %f, %f)\n", x, y, z); x = linearVelocityUniversalSystem.x*velocityConverter; y = linearVelocityUniversalSystem.y*velocityConverter; z = linearVelocityUniversalSystem.z*velocityConverter; fprintf(RunStatsFile,"\nLinear velocity of the entire system system = (%f, %f, %f)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the linear velocity of the entire system = %f\n", mag); x = angularMomentumUniversalSystem.x*momentumConverter; y = angularMomentumUniversalSystem.y*momentumConverter; z = angularMomentumUniversalSystem.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the entire system system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the entire system = %e\n", mag); fprintf(RunStatsFile,"\n*************************************************************************\n"); fprintf(RunStatsFile,"\n******************* Just the good stuff *********************************\n"); fprintf(RunStatsFile,"\n percent off correct Earth mass = %f ", 100.0*(massEarth*massConverter/(MassOfEarth))); fprintf(RunStatsFile,"\n percent off correct Moon mass = %f ", 100.0*(massMoon*massConverter/(MassOfMoon))); fprintf(RunStatsFile,"\n\n Earth mass percent iron = %f mass percent silicate = %f", float(earthFeCountBody1*MassFe + earthFeCountBody2*MassFe)/massEarth, float(earthSiCountBody1*MassSi + earthSiCountBody2*MassSi)/massEarth); fprintf(RunStatsFile,"\n Moon mass percent iron = %f mass percent silicate = %f", float(moonFeCountBody1*MassFe + moonFeCountBody2*MassFe)/massMoon, float(moonSiCountBody1*MassSi + moonSiCountBody2*MassSi)/massMoon); if((moonFeCountBody2 + moonSiCountBody2) != 0) { fprintf(RunStatsFile,"\n\n Moon body1/body2 ratio = %f ", float(moonFeCountBody1*MassFe + moonSiCountBody1*MassSi)/float(moonFeCountBody2*MassFe + moonSiCountBody2*MassSi)); } x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\n Percent off correct angular momentum of the Earth-Moon System = %f ", 100.0*(1.0 - mag/AngularMomentumEarthMoonSystem)); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; mag = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/mag); fprintf(RunStatsFile,"\n Percent off correct axial tilt of the Earth = %f ", 100.0*(1.0 - angle/EarthAxialTilt)); fprintf(RunStatsFile,"\n\n*************************************************************************\n\n\n"); } void recordPosAndVel() { fwrite(Pos, sizeof(float4), N, PosAndVelFile); fwrite(Vel, sizeof(float4), N, PosAndVelFile); } void recordContinuePosAndVel(double time) { fwrite(&time, sizeof(double), 1, ContinueRunPosAndVelFile); fwrite(Pos, sizeof(float4), N, ContinueRunPosAndVelFile); fwrite(Vel, sizeof(float4), N, ContinueRunPosAndVelFile); } void drawSimplePictureSeperate() { float3 centerOfMass1 = getCenterOfMassSeperate(1); float3 centerOfMass2 = getCenterOfMassSeperate(2); float3 linearVelocity1 = getLinearVelocitySeperate(1); float3 linearVelocity2 = getLinearVelocitySeperate(2); float3 angularMomentum1 = getAngularMomentumSeperate(1, centerOfMass1, linearVelocity1); float3 angularMomentum2 = getAngularMomentumSeperate(2, centerOfMass2, linearVelocity2); float Stretch; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Coloring all the elements glBegin(GL_POINTS); for(int i=0; i<N; i++) { if(i < NFe1) { glColor3d(1.0,0.0,0.0); } else if(i < NFe1 + NSi1) { glColor3d(1.0,1.0,0.5); } else if(i < NFe1 + NSi1 + NFe2) { glColor3d(1.0,0.0,1.0); } else { glColor3d(0.0,0.5,0.0); } glVertex3f(Pos[i].x, Pos[i].y, Pos[i].z); } glEnd(); glLineWidth(1.0); //Placing a green vector in the direction of the disired linear motion of each body glColor3f(0.0,1.0,0.0); Stretch = 1.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + InitialVelocity1.x*Stretch, centerOfMass1.y + InitialVelocity1.y*Stretch, centerOfMass1.z + InitialVelocity1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + InitialVelocity2.x*Stretch, centerOfMass2.y + InitialVelocity2.y*Stretch, centerOfMass2.z + InitialVelocity2.z*Stretch); glEnd(); //Placing a yellow vector in the direction of the actual linear motion of each body glColor3f(1.0,1.0,0.0); Stretch = 30.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + linearVelocity1.x*Stretch, centerOfMass1.y + linearVelocity1.y*Stretch, centerOfMass1.z + linearVelocity1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + linearVelocity2.x*Stretch, centerOfMass2.y + linearVelocity2.y*Stretch, centerOfMass2.z + linearVelocity2.z*Stretch); glEnd(); //Placing a blue vector in the direction of the disired angular momentum glColor3f(0.0,0.0,1.0); Stretch = 50.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + InitialSpin1.x*Stretch, centerOfMass1.y + InitialSpin1.y*Stretch, centerOfMass1.z + InitialSpin1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + InitialSpin2.x*Stretch, centerOfMass2.y + InitialSpin2.y*Stretch, centerOfMass2.z + InitialSpin2.z*Stretch); glEnd(); //Placing a red vector in the direction of the actual angular momentum glColor3f(1.0,0.0,0.0); Stretch = 50.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + angularMomentum1.x*Stretch, centerOfMass1.y + angularMomentum1.y*Stretch, centerOfMass1.z + angularMomentum1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + angularMomentum2.x*Stretch, centerOfMass2.y + angularMomentum2.y*Stretch, centerOfMass2.z + angularMomentum2.z*Stretch); glEnd(); glutSwapBuffers(); } void drawPictureCollision() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBegin(GL_POINTS); for(int i=0; i<N; i++) { if(i < NFe1) { glColor3d(1.0,0.0,0.0); } else if(i < NFe1 + NFe2) { glColor3d(1.0,0.0,1.0); } else if(i < NFe1 + NFe2 + NSi1) { glColor3d(1.0,1.0,0.5); } else { glColor3d(0.0,0.5,0.0); } glVertex3f(Pos[i].x, Pos[i].y, Pos[i].z); } glEnd(); glutSwapBuffers(); } void drawAnalysisPictureCollision() { int i; findEarthAndMoon(); float massSystem = getMassCollision(0); float massEarth = getMassCollision(1); float massMoon = getMassCollision(2); float3 centerOfMassSystem = getCenterOfMassCollision(0); float3 centerOfMassEarth = getCenterOfMassCollision(1); float3 centerOfMassMoon = getCenterOfMassCollision(2); float3 linearVelocitySystem = getLinearVelocityCollision(0); float3 linearVelocityEarth = getLinearVelocityCollision(1); float3 linearVelocityMoon = getLinearVelocityCollision(2); float3 angularMomentumSystem = getAngularMomentumCollision(0); float3 angularMomentumEarth = getAngularMomentumCollision(1); float3 angularMomentumMoon = getAngularMomentumCollision(2); float Stretch; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Coloring all the elements glPointSize(1.0); glBegin(GL_POINTS); for(i=0; i<N; i++) { if(i < NFe1) { glColor3d(1.0,0.0,0.0); } else if(i < NFe1 + NFe2) { glColor3d(1.0,0.0,1.0); } else if(i < NFe1 + NFe2 + NSi1) { glColor3d(1.0,1.0,0.5); } else { glColor3d(0.0,0.5,0.0); } glVertex3f(Pos[i].x, Pos[i].y, Pos[i].z); } glEnd(); glPointSize(1.0); //Recoloring the Earth elements blue glColor3d(0.0,0.0,1.0); glBegin(GL_POINTS); for(i = 0; i < NumberOfEarthElements; i++) { glVertex3f(Pos[EarthIndex[i]].x, Pos[EarthIndex[i]].y, Pos[EarthIndex[i]].z); } glEnd(); //Recoloring the Moon elements red glColor3d(1.0,0.0,0.0); glBegin(GL_POINTS); for(i = 0; i < NumberOfMoonElements; i++) { glVertex3f(Pos[MoonIndex[i]].x, Pos[MoonIndex[i]].y, Pos[MoonIndex[i]].z); } glEnd(); glLineWidth(1.0); //Placing green vectors in the direction of linear velocity of the Moon Stretch = 1.0; glColor3f(0.0,1.0,0.0); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMassMoon.x, centerOfMassMoon.y, centerOfMassMoon.z); glVertex3f( centerOfMassMoon.x + linearVelocityMoon.x*Stretch, centerOfMassMoon.y + linearVelocityMoon.y*Stretch, centerOfMassMoon.z + linearVelocityMoon.z*Stretch); glEnd(); //Place a white point at the center of mass of the Earth-Moon system glColor3d(1.0,1.0,1.0); glPointSize(10.0); glBegin(GL_POINTS); glVertex3f(centerOfMassSystem.x, centerOfMassSystem.y, centerOfMassSystem.z); glEnd(); //Place a yellow point at the center of mass of the Earth glColor3d(1.0,1.0,0.0); glPointSize(5.0); glBegin(GL_POINTS); glVertex3f(centerOfMassEarth.x, centerOfMassEarth.y, centerOfMassEarth.z); glEnd(); //Place a yellow point at the center of mass of the Moon glColor3d(1.0,1.0,0.0); glPointSize(5.0); glBegin(GL_POINTS); glVertex3f(centerOfMassMoon.x, centerOfMassMoon.y, centerOfMassMoon.z); glEnd(); //Placing white vectors in the direction of the angular momentum of the Earth-Moon system glColor3f(1.0,1.0,1.0); Stretch = 1.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMassSystem.x, centerOfMassSystem.y, centerOfMassSystem.z); glVertex3f( centerOfMassSystem.x + angularMomentumSystem.x*Stretch/massSystem, centerOfMassSystem.y + angularMomentumSystem.y*Stretch/massSystem, centerOfMassSystem.z + angularMomentumSystem.z*Stretch/massSystem); glEnd(); //Placing blue vectors in the direction of the angular momentum of the Earth Stretch = 1.0; glBegin(GL_LINE_LOOP); glColor3f(0.0,0.0,1.0); glVertex3f(centerOfMassEarth.x, centerOfMassEarth.y, centerOfMassEarth.z); glVertex3f( centerOfMassEarth.x + angularMomentumEarth.x*Stretch/massEarth, centerOfMassEarth.y + angularMomentumEarth.y*Stretch/massEarth, centerOfMassEarth.z + angularMomentumEarth.z*Stretch/massEarth); glEnd(); //Placing red vectors in the direction of the angular momentum of the Moon Stretch = 1.0; glColor3f(1.0,0.0,0.0); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMassMoon.x, centerOfMassMoon.y, centerOfMassMoon.z); glVertex3f( centerOfMassMoon.x + angularMomentumMoon.x*Stretch/massMoon, centerOfMassMoon.y + angularMomentumMoon.y*Stretch/massMoon, centerOfMassMoon.z + angularMomentumMoon.z*Stretch/massMoon); glEnd(); glutSwapBuffers(); free(EarthIndex); free(MoonIndex); } void transformInitialConditionsFromSeperateToCollision() { int k; hipMemcpy( PlaceHolder, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Pos2"); k = 0; for(int i = 0; i < NFe1; i++) { Pos[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { Pos[k] = PlaceHolder[i]; k++; } for(int i = NFe1; i < NFe1 + NSi1; i++) { Pos[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { Pos[k] = PlaceHolder[i]; k++; } hipMemcpy( PlaceHolder, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Vel"); k = 0; for(int i = 0; i < NFe1; i++) { Vel[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { Vel[k] = PlaceHolder[i]; k++; } for(int i = NFe1; i < NFe1 + NSi1; i++) { Vel[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { Vel[k] = PlaceHolder[i]; k++; } } void nBodySeperate() { float time = 0.0; int tdraw = 1; int dampCheck = 0; int rest1Check = 0; int spinCheck = 0; hipMemcpy( Pos_DEV0, Pos, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Pos3"); hipMemcpy( Vel_DEV0, Vel, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Vel"); while(time < SetupTime) { hipLaunchKernelGGL(( getForcesSeperate), dim3(GridConfig), dim3(BlockConfig), 0, 0, Pos_DEV0, Vel_DEV0, Force_DEV0, ForceSeperateConstant); if(time < DampTime) { if(dampCheck == 0) { printf("\n************************************************** Damping is on\n"); dampCheck = 1; tdraw = 0; } hipLaunchKernelGGL(( moveBodiesDampedSeperate), dim3(GridConfig), dim3(BlockConfig), 0, 0, Pos_DEV0, Vel_DEV0, Force_DEV0, MoveSeperateConstant, DampRateBody1, DampRateBody2); } else if(time < DampTime + DampRestTime) { if(rest1Check == 0) { printf("\n************************************************** Damp rest stage is on\n"); rest1Check = 1; tdraw = 0; } hipLaunchKernelGGL(( moveBodiesSeperate), dim3(GridConfig), dim3(BlockConfig), 0, 0, Pos_DEV0, Vel_DEV0, Force_DEV0, MoveSeperateConstant); } else { if(spinCheck == 0) { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Pos4"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Vel"); spinBodySeperate(1, InitialSpin1); spinBodySeperate(2, InitialSpin2); hipMemcpy( Pos_DEV0, Pos, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Pos5"); hipMemcpy( Vel_DEV0, Vel, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Vel"); printf("\n************************************************** bodies have been spun\n"); printf("\n************************************************** spin rest stage is on\n"); spinCheck = 1; } hipLaunchKernelGGL(( moveBodiesSeperate), dim3(GridConfig), dim3(BlockConfig), 0, 0, Pos_DEV0, Vel_DEV0, Force_DEV0, MoveSeperateConstant); } if(tdraw == DrawRate) { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Pos6"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Vel"); drawSimplePictureSeperate(); //drawPictureSeperate(); printf("\nSetup time in hours = %f\n", time*UnitTime/3600.0); tdraw = 0; } tdraw++; time += Dt; } } void resetInitialConditions() { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Pos7"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpy Vel"); setBodyPositionSeperate(1, InitialPosition1.x, InitialPosition1.y, InitialPosition1.z); setBodyVelocitySeperate(1, InitialVelocity1.x, InitialVelocity1.y, InitialVelocity1.z); setBodyPositionSeperate(2, InitialPosition2.x, InitialPosition2.y, InitialPosition2.z); setBodyVelocitySeperate(2, InitialVelocity2.x, InitialVelocity2.y, InitialVelocity2.z); printf("\n************************************************** Initial velocities have been given\n"); hipMemcpy( Pos_DEV0, Pos, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Pos8"); hipMemcpy( Vel_DEV0, Vel, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Vel"); printf("\n************************************************** The bodies have been created and intialized\n"); } void copyCreatedBodiesUpToDevice() { if(NumberOfGpus == 1 || UseMultipleGPU == 0) { hipMemcpy( Pos_DEV0, Pos, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Pos9"); hipMemcpy( Vel_DEV0, Vel, N *sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpy Vel"); } else { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyAsync( PosFstHalf_0, Pos, (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync PosFstHalf 0"); hipMemcpyAsync( PosSndHalf_0, Pos+(N/2), (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync PosSndHalf 0"); hipMemcpyAsync( VelFstHalf_0, Vel, (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync VelFstHalf 0"); hipMemcpyAsync( VelSndHalf_0, Vel+(N/2), (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync VelSndHalf 0"); hipSetDevice(1); errorCheck("hipSetDevice 0"); hipMemcpyAsync( PosFstHalf_1, Pos, (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync PosFstHalf 0"); hipMemcpyAsync( PosSndHalf_1, Pos+(N/2), (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync PosSndHalf 0"); hipMemcpyAsync( VelFstHalf_1, Vel, (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync VelFstHalf 0"); hipMemcpyAsync( VelSndHalf_1, Vel+(N/2), (N/2)*sizeof(float4), hipMemcpyHostToDevice ); errorCheck("hipMemcpyAsync VelSndHalf 0"); } } double nBodyCollisionSingleGPU() { int tDraw = 1; int tRecord = 1; while(RunTime <= TotalRunTime) { hipLaunchKernelGGL(( getForcesCollisionSingleGPU), dim3(GridConfig), dim3(BlockConfig), 0, 0, Pos_DEV0, Vel_DEV0, Force_DEV0, ForceCollisionConstant); hipLaunchKernelGGL(( moveBodiesCollisionSingleGPU), dim3(GridConfig), dim3(BlockConfig), 0, 0, Pos_DEV0, Vel_DEV0, Force_DEV0, MoveCollisionConstant); if(tDraw == DrawRate) { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); if (DrawQuality == 1) drawAnalysisPictureCollision(); else if (DrawQuality == 2) drawPictureCollision(); else { printf("\nTSU Error: Invalid draw quality\n"); exit(0); } tDraw = 0; printf("\nCollision run time = %f hours\n", RunTime*UnitTime/3600.0); } tDraw++; if(PrintCollisionStats == 1) { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); printCollisionStatsToScreen(RunTime); PrintCollisionStats = 0; } if(PrintContinueStats == 1) { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); printContinueStatsToScreen(RunTime); PrintContinueStats = 0; } if(WriteToFile == 1 && tRecord == RecordRate) { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); recordPosAndVel(); tRecord = 0; } tRecord++; RunTime += Dt; } RunTime = RunTime - Dt; hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); return(RunTime); } double nBodyCollisionDoubleGPU() { int tDraw = 1; int tRecord = 1; cout << "\nCollision run time start = " << RunTime*UnitTime/3600.0 << " hours." << endl; while(RunTime <= TotalRunTime) { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipLaunchKernelGGL(( getForcesCollisionDoubleGPU0), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosFstHalf_0, PosSndHalf_0, VelFstHalf_0, VelSndHalf_0, ForceFstHalf_0, N, ForceCollisionConstant); errorCheck("getForcesCollisionDoubleGPU 0"); hipLaunchKernelGGL(( moveBodiesCollisionDoubleGPU0), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosFstHalf_0, VelFstHalf_0, ForceFstHalf_0, N, MoveCollisionConstant); errorCheck("moveBodiesCollisionDoubleGPU 0"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipLaunchKernelGGL(( getForcesCollisionDoubleGPU1), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosFstHalf_1, PosSndHalf_1, VelFstHalf_1, VelSndHalf_1, ForceSndHalf_1, N, ForceCollisionConstant); errorCheck("getForcesCollisionDoubleGPU 1"); hipLaunchKernelGGL(( moveBodiesCollisionDoubleGPU1), dim3(GridConfig), dim3(BlockConfig), 0, 0, PosSndHalf_1, VelSndHalf_1, ForceSndHalf_1, N, MoveCollisionConstant); errorCheck("moveBodiesCollisionDoubleGPU 1"); hipDeviceSynchronize(); errorCheck("hipDeviceSynchronize 1"); hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyPeerAsync(PosFstHalf_1,1,PosFstHalf_0,0,(N/2)*sizeof(float4)); errorCheck("hipMemcpyPeerAsync 0 - Pos"); hipMemcpyPeerAsync(VelFstHalf_1,1,VelFstHalf_0,0,(N/2)*sizeof(float4)); errorCheck("hipMemcpyPeerAsync 0 - Vel"); hipDeviceSynchronize(); errorCheck("hipDeviceSynchronize 2"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipMemcpyPeerAsync(PosSndHalf_0,0,PosSndHalf_1,1,(N/2)*sizeof(float4)); errorCheck("hipMemcpyPeerAsync 1 - Pos"); hipMemcpyPeerAsync(VelSndHalf_0,0,VelSndHalf_1,1,(N/2)*sizeof(float4)); errorCheck("hipMemcpyPeerAsync 1 - Vel"); hipDeviceSynchronize(); errorCheck("hipDeviceSynchronize 3"); if(tDraw == DrawRate) { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); hipMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); if (DrawQuality == 1) drawAnalysisPictureCollision(); else if (DrawQuality == 2) drawPictureCollision(); else { printf("\nTSU Error: Invalid draw quality\n"); exit(0); } tDraw = 0; cout << "\nCollision run time = " << RunTime*UnitTime/3600.0 << " hours." << endl; } tDraw++; if(PrintCollisionStats == 1) { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); hipMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); printCollisionStatsToScreen(RunTime); PrintCollisionStats = 0; } if(PrintContinueStats == 1) { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); hipMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); printContinueStatsToScreen(RunTime); PrintContinueStats = 0; } if(WriteToFile == 1 && tRecord == RecordRate) { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); hipMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); recordPosAndVel(); tRecord = 0; } tRecord++; RunTime += Dt; } RunTime = RunTime -Dt; cout << "\nCollision run time end = " << RunTime*UnitTime/3600.0 << " hours." << endl; hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); hipMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); return(RunTime); } void cleanKill(double time) { if(NumberOfGpus == 1 || UseMultipleGPU == 0) { hipMemcpy( Pos, Pos_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpy( Vel, Vel_DEV0, N *sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); } else { hipSetDevice(0); errorCheck("hipSetDevice 0"); hipMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Pos"); hipSetDevice(1); errorCheck("hipSetDevice 1"); hipMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); hipMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), hipMemcpyDeviceToHost ); errorCheck("hipMemcpyAsync Vel"); } recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); exit(0); } static void signalHandler(int signum) { int command; cout << "\n\n******************************************************" << endl; cout << "Enter:666 to kill the run." << endl; cout << "Enter:1 to cleanly terminate the run.\t(not valid in the setup stage)." << endl; cout << "Enter:2 to change the draw rate." << endl; cout << "Enter:3 to change the draw quality.\t(not valid in the setup stage)." << endl; cout << "Enter:4 to set your eye location." << endl; cout << "Enter:5 to set the Center of Mass as your center." << endl; cout << "Enter:6 to print the run stats.\t(not valid in the setup stage)." << endl; cout << "Enter:7 to print the continue stats.\t(not valid in the setup stage)." << endl; cout << "Enter:8 to change the total run time." << endl; cout << "Enter:9 to continue the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { cout << "\n\n******************************************************" << endl; cout << "Are you sure you want to terminate the run?" << endl; cout << "Enter:666 again if you are sure. Enter anything else to continue the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { cleanUpCollision(); exit(0); } } else if(command == 1) { cleanKill(RunTime); } else if(command == 2) { cout << "\nEnter the desired draw rate: "; cin >> DrawRate; cout << "\nDrawRate: " << DrawRate << endl; } else if(command == 3) { cout << "\nEnter the desired draw quality.\n1 for analysis.\n2 for standard." << endl; cin >> DrawQuality; cout << "\nDrawQuality: " << DrawQuality << endl; } else if (command == 4) { cout << "******************************************************" << endl; cout << "Here is where your current Eye is at: " << endl; cout << "EyeX: " << EyeX << endl; cout << "EyeY: " << EyeY << endl; cout << "EyeZ: " << EyeZ << endl; cout << "Changing this will determine how close/far you are." << endl; cout << "******************************************************" << endl; cout << "\nEnter the desired x location of your eye (double): "; cin >> EyeX; cout << "Enter the desired y location of your eye (double): "; cin >> EyeY; cout << "Enter the desired z location of your eye (double): "; cin >> EyeZ; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(-0.2, 0.2, -0.2, 0.2, Near, Far); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); //glutPostRedisplay(); //Display(); } else if (command == 5) { float3 temp = getCenterOfMassCollision(0); cout << "******************************************************" << endl; cout << "Center of Mass in the X-direction: " << temp.x << endl; cout << "Center of Mass in the Y-direction: " << temp.y << endl; cout << "Center of Mass in the Z-direction: " << temp.z << endl; cout << "This is the Center of Mass of the System" << endl; cout << "******************************************************" << endl; CenterX = temp.x; CenterY = temp.y; CenterZ = temp.z; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(-0.2, 0.2, -0.2, 0.2, Near, Far); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); //glutPostRedisplay(); //Display(); } else if (command == 6) { PrintCollisionStats = 1; } else if (command == 7) { PrintContinueStats = 1; } else if (command == 8) { cout << "\nEnter the desired TotalRunTime (float): "; cin >> TotalRunTime; TotalRunTime *= 3600.0/UnitTime; } else if (command == 9) { cout << "\nRun continued." << endl; } else { cout <<"\n\n Invalid Command\n" << endl; } } void typeOfRunCheck() { cout << "\nEnter 0 to create a new Run.\nEnter 1 to create a branch Run.\nEnter 2 to continue an existing Run.\n\n"; cin >> TypeOfRun; } void readRootStartPosAndVelFile() { FILE *temp = fopen("RootStartPosAndVel","rb"); fread(Pos, sizeof(float4), N, temp); fread(Vel, sizeof(float4), N, temp); fclose(temp); fseek(PosAndVelFile,0,SEEK_END); } void readContinuePosAndVel() { ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel","rb"); fread(&RunTime, sizeof(double), 1, ContinueRunPosAndVelFile); fread(Pos, sizeof(float4), N, ContinueRunPosAndVelFile); fread(Vel, sizeof(float4), N, ContinueRunPosAndVelFile); //ContinueRunPosAndVelFile.clear(); fclose(ContinueRunPosAndVelFile); } void control() { double time; struct sigaction sa; sa.sa_handler = signalHandler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler if (sigaction(SIGINT, &sa, NULL) == -1) { printf("\nTSU Error: sigaction error\n"); } //Setup run if (TypeOfRun == 0) { createFolderForNewRun(); readRunParameters(); setRunParameters(); openNewRunFiles(); recordSetupStats(); loadKernalConstantStructures(); allocateCPUMemory(); checkSetupForErrors(); //Create and initialize bodies deviceSetupSeperate(); createBodies(); nBodySeperate(); resetInitialConditions(); recordStatsOfCreatedBodies(); recordStartPosVelOfCreatedBodiesSeperate(); transformInitialConditionsFromSeperateToCollision(); cleanUpSeperate(); //Collide bodies deviceSetupCollision(); copyCreatedBodiesUpToDevice(); if(NumberOfGpus == 1 || UseMultipleGPU == 0) time = nBodyCollisionSingleGPU(); else time = nBodyCollisionDoubleGPU(); recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); printf("\n DONE \n"); exit(0); } else if (TypeOfRun == 1) { createFolderForBranchRun(RootFolderName); readRunParameters(); setRunParameters(); readBranchParameters(); setBranchParameters(); openBranchRunFiles(); allocateCPUMemory(); readRootStartPosAndVelFile(); InitialPosition1.x += BranchPosition1.x; InitialPosition1.y += BranchPosition1.y; InitialPosition1.z += BranchPosition1.z; InitialPosition2.x += BranchPosition2.x; InitialPosition2.y += BranchPosition2.y; InitialPosition2.z += BranchPosition2.z; InitialVelocity1.x += BranchVelocity1.x; InitialVelocity1.y += BranchVelocity1.y; InitialVelocity1.z += BranchVelocity1.z; InitialVelocity2.x += BranchVelocity2.x; InitialVelocity2.y += BranchVelocity2.y; InitialVelocity2.z += BranchVelocity2.z; InitialSpin1.x += BranchSpin1.x; InitialSpin1.y += BranchSpin1.y; InitialSpin1.z += BranchSpin1.z; InitialSpin1.w += BranchSpin1.w; InitialSpin2.x += BranchSpin2.x; InitialSpin2.y += BranchSpin2.y; InitialSpin2.z += BranchSpin2.z; InitialSpin2.w += BranchSpin2.w; recordSetupStats(); loadKernalConstantStructures(); checkSetupForErrors(); deviceSetupSeperate(); //From here down to nBodySeperate is like the create bodies above but all that needs to be done is move and spin setBodyPositionSeperate(1, InitialPosition1.x, InitialPosition1.y, InitialPosition1.z); //setBodyVelocitySeperate(1, InitialVelocity1.x, InitialVelocity1.y, InitialVelocity1.z); setBodyPositionSeperate(2, InitialPosition2.x, InitialPosition2.y, InitialPosition2.z); //setBodyVelocitySeperate(2, InitialVelocity2.x, InitialVelocity2.y, InitialVelocity2.z); //This is really the added spin but must be put in initail to fool nBodySeperate because the original spin is already done InitialSpin1 = BranchSpin1; InitialSpin2 = BranchSpin2; DampTime = -1.0; DampRestTime = -1.0; SetupTime = BranchSpinRestTime; nBodySeperate(); resetInitialConditions(); recordStatsOfCreatedBodies(); recordStartPosVelOfCreatedBodiesSeperate(); transformInitialConditionsFromSeperateToCollision(); cleanUpSeperate(); //Collide bodies TotalRunTime = BranchRunTime; deviceSetupCollision(); copyCreatedBodiesUpToDevice(); if(NumberOfGpus == 1 || UseMultipleGPU == 0) time = nBodyCollisionSingleGPU(); else time = nBodyCollisionDoubleGPU(); recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); printf("\n DONE \n"); exit(0); } else if (TypeOfRun == 2) { chdir(RootFolderName); readRunParameters(); setRunParameters(); loadKernalConstantStructures(); allocateCPUMemory(); checkSetupForErrors(); readContinuePosAndVel(); openContinueRunFiles(); TotalRunTime = AddedRunTime*3600.0/UnitTime + RunTime; //Collide bodies deviceSetupCollision(); copyCreatedBodiesUpToDevice(); if(NumberOfGpus == 1 || UseMultipleGPU == 0) time = nBodyCollisionSingleGPU(); else time = nBodyCollisionDoubleGPU(); recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); printf("\n DONE \n"); exit(0); } else { printf("\n Bad TypeOfRun value \n"); exit(0); } } //https://www.opengl.org/archives/resources/faq/technical/viewing.htm void Display(void) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(Left, Right, Bottom, Top, Front, Back); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); } void reshape(GLint w, GLint h) { glViewport(0, 0, w, h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(Left, Right, Bottom, Top, Front, Back); glMatrixMode(GL_MODELVIEW); } void init() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(Left, Right, Bottom, Top, Front, Back); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); } int main(int argc, char** argv) { if( argc < 1) { printf("\n You need to intire the run type (int 0 new run, 1 branch run, or 2 continue run) on the comand line\n"); exit(0); } else { TypeOfRun = atoi(argv[1]); } if( TypeOfRun == 1) { if(argc < 2) { printf("\n You need to intire a root folder to work from on the comand line\n"); exit(0); } else { strcat(RootFolderName, argv[2]); } } if( TypeOfRun == 2) { if(argc < 2) { printf("\n You need to intire a root folder to work from on the comand line\n"); exit(0); } else { strcat(RootFolderName, argv[2]); } if(argc < 3) { printf("\n You need to intire the extra run time for the continuation\n"); exit(0); } else { AddedRunTime = atof(argv[3]); } } glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB); glutInitWindowSize(XWindowSize,YWindowSize); glutInitWindowPosition(0,0); glutCreateWindow("Giant Impact Hypothesis Simulation"); glutReshapeFunc(reshape); init(); glShadeModel(GL_SMOOTH); glClearColor(0.0, 0.0, 0.0, 0.0); glutDisplayFunc(Display); glutReshapeFunc(reshape); glutIdleFunc(control); glutMainLoop(); return 0; }
8a6bb09f981f98ed518cc23d7261a9c74b1cda53.cu
/* nvcc collider31.cu -o collider31 -lglut -lm -lGLU -lGL --use_fast_math -O3 -Xptxas "-warn-lmem-usage -warn-spills" -arch=sm_52 nvcc collider31.cu -o collider31 -lglut -lm -lGLU -lGL -prec-div=false -prec-sqrt=false -ftz=true -O3 nvcc collider31.cu -o collider31nofast -lglut -lm -lGLU -lGL -O3 */ #include <GL/glut.h> #include <GL/glu.h> #include <GL/gl.h> #include <math.h> #include <stdio.h> #include "stdio.h" #include <stdlib.h> #include <cuda.h> #include <string.h> #include <dirent.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <curand.h> #include <curand_kernel.h> #include <signal.h> #include <iostream> #include <fstream> #include <sstream> #include <time.h> #include <iostream> #include <fstream> #include <sstream> #include <stdio.h> #include <stdlib.h> using namespace std; #define BLOCKSIZE 256 #define NUMBEROFEARTHRADIFORMOONMATERIAL 20.0 //Global to hold the time of the collision double RunTime = 0.0; //Continue and branch run globals int TypeOfRun = 0; char RootFolderName[256] = ""; double AddedRunTime = 0; //Globals for files FILE *RunStatsFile; FILE *PosAndVelFile; FILE *StartPosAndVelFile; FILE *ContinueRunStatsFile; FILE *ContinueRunPosAndVelFile; //Globals to hold positions, velocities, and forces on both the GPU and CPU float4 *PlaceHolder; //needs to be hard defined for cuda float4 *Pos, *Vel, *Force; float4 *Pos_DEV0, *Vel_DEV0, *Force_DEV0; float4 *PosFstHalf_0, *VelFstHalf_0, *ForceFstHalf_0; float4 *PosSndHalf_0, *VelSndHalf_0; float4 *PosFstHalf_1, *VelFstHalf_1; float4 *PosSndHalf_1, *VelSndHalf_1, *ForceSndHalf_1; //Globals to setup the kernals dim3 BlockConfig, GridConfig; int NumberOfGpus, Gpu0Access, Gpu1Access; //Globals to be set by the setRunParameters function double UnitLength = -1.0; double Diameter = -1.0; double UnitMass = -1.0; double MassSi = -1.0; double MassFe = -1.0; double MassOfBody1 = -1.0; double MassOfBody2 = -1.0; double UnitTime = -1.0; double Gravity = -1.0; int NSi = -1; int NSi1 = -1; int NSi2 = -1; int NFe = -1; int NFe1 = -1; int NFe2 = -1; //Globals to be set by the findEarthAndMoon function int NumberOfEarthElements = -1; int NumberOfMoonElements = -1; int *EarthIndex; int *MoonIndex; //Global to trigger printing collision stats to the screen int PrintCollisionStats = 0; //Global to trigger printing continue stats to the screen int PrintContinueStats = 0; //Globals for the run to be read in from the runSetup file float3 InitialPosition1; float3 InitialPosition2; float3 InitialVelocity1; float3 InitialVelocity2; float4 InitialSpin1; float4 InitialSpin2; float3 BranchPosition1; float3 BranchPosition2; float3 BranchVelocity1; float3 BranchVelocity2; float4 BranchSpin1; float4 BranchSpin2; double FractionEarthMassOfBody1; //Mass of body 1 as a proportion of the Earth's mass double FractionEarthMassOfBody2; //Mass of body 2 as a proportion of the Earth's mass double FractionFeBody1; //Percent by mass of iron in body 1 double FractionSiBody1; //Percent by mass of silicate in body 1 double FractionFeBody2; //Percent by mass of iron in body 2 double FractionSiBody2; //Percent by mass of silicate in body 2 float DampRateBody1; float DampRateBody2; float EnergyTargetBody1; float EnergyTargetBody2; int N; float TotalRunTime; float BranchRunTime; float DampTime; float DampRestTime; float EnergyAdjustmentTime; float EnergyAdjustmentRestTime; float SpinRestTime; float BranchSpinRestTime; float SetupTime; float Dt; int WriteToFile; int RecordRate; double DensityFe; //Density of iron in kilograms meterE-3 (Canup science 2012) double DensitySi; //Density of silcate in kilograms meterE-3 (Canup science 2012) double KFe; double KSi; double KRFe; double KRSi; double SDFe; double SDSi; int DrawRate; int DrawQuality; int UseMultipleGPU; double UniversalGravity; //Universal gravitational constant in kilometersE3 kilogramsE-1 and secondsE-2 (??? source) double MassOfEarth; double MassOfMoon; double AngularMomentumEarthMoonSystem; double EarthAxialTilt; double MoonAxialTilt; double Pi; void readRunParameters() { ifstream data; string name; if(TypeOfRun == 0) { data.open("RunSetup"); } else if(TypeOfRun == 1) { data.open("RootSetup"); } else if(TypeOfRun == 2) { data.open("RunSetup"); if(data.is_open() != 1) data.open("RootSetup"); } else { printf("\nTSU Error bad TypeOfRun selected\n"); exit(0); } if(data.is_open() == 1) { getline(data,name,'='); data >> InitialPosition1.x; getline(data,name,'='); data >> InitialPosition1.y; getline(data,name,'='); data >> InitialPosition1.z; getline(data,name,'='); data >> InitialPosition2.x; getline(data,name,'='); data >> InitialPosition2.y; getline(data,name,'='); data >> InitialPosition2.z; getline(data,name,'='); data >> InitialVelocity1.x; getline(data,name,'='); data >> InitialVelocity1.y; getline(data,name,'='); data >> InitialVelocity1.z; getline(data,name,'='); data >> InitialVelocity2.x; getline(data,name,'='); data >> InitialVelocity2.y; getline(data,name,'='); data >> InitialVelocity2.z; getline(data,name,'='); data >> InitialSpin1.x; getline(data,name,'='); data >> InitialSpin1.y; getline(data,name,'='); data >> InitialSpin1.z; getline(data,name,'='); data >> InitialSpin1.w; getline(data,name,'='); data >> InitialSpin2.x; getline(data,name,'='); data >> InitialSpin2.y; getline(data,name,'='); data >> InitialSpin2.z; getline(data,name,'='); data >> InitialSpin2.w; getline(data,name,'='); data >> FractionEarthMassOfBody1; getline(data,name,'='); data >> FractionEarthMassOfBody2; getline(data,name,'='); data >> FractionFeBody1; getline(data,name,'='); data >> FractionSiBody1; getline(data,name,'='); data >> FractionFeBody2; getline(data,name,'='); data >> FractionSiBody2; getline(data,name,'='); data >> DampRateBody1; getline(data,name,'='); data >> DampRateBody2; getline(data,name,'='); data >> EnergyTargetBody1; getline(data,name,'='); data >> EnergyTargetBody2; getline(data,name,'='); data >> N; getline(data,name,'='); data >> TotalRunTime; getline(data,name,'='); data >> DampTime; getline(data,name,'='); data >> DampRestTime; getline(data,name,'='); data >> EnergyAdjustmentTime; getline(data,name,'='); data >> EnergyAdjustmentRestTime; getline(data,name,'='); data >> SpinRestTime; getline(data,name,'='); data >> Dt; getline(data,name,'='); data >> WriteToFile; getline(data,name,'='); data >> RecordRate; getline(data,name,'='); data >> DensityFe; getline(data,name,'='); data >> DensitySi; getline(data,name,'='); data >> KFe; getline(data,name,'='); data >> KSi; getline(data,name,'='); data >> KRFe; getline(data,name,'='); data >> KRSi; getline(data,name,'='); data >> SDFe; getline(data,name,'='); data >> SDSi; getline(data,name,'='); data >> DrawRate; getline(data,name,'='); data >> DrawQuality; getline(data,name,'='); data >> UseMultipleGPU; getline(data,name,'='); data >> UniversalGravity; getline(data,name,'='); data >> MassOfEarth; getline(data,name,'='); data >> MassOfMoon; getline(data,name,'='); data >> AngularMomentumEarthMoonSystem; getline(data,name,'='); data >> EarthAxialTilt; getline(data,name,'='); data >> MoonAxialTilt; getline(data,name,'='); data >> Pi; } else { printf("\nTSU Error could not open run or root Setup file\n"); exit(0); } data.close(); } void readBranchParameters() { ifstream data; string name; data.open("BranchSetup"); if(data.is_open() == 1) { getline(data,name,'='); data >> BranchPosition1.x; getline(data,name,'='); data >> BranchPosition1.y; getline(data,name,'='); data >> BranchPosition1.z; getline(data,name,'='); data >> BranchPosition2.x; getline(data,name,'='); data >> BranchPosition2.y; getline(data,name,'='); data >> BranchPosition2.z; getline(data,name,'='); data >> BranchVelocity1.x; getline(data,name,'='); data >> BranchVelocity1.y; getline(data,name,'='); data >> BranchVelocity1.z; getline(data,name,'='); data >> BranchVelocity2.x; getline(data,name,'='); data >> BranchVelocity2.y; getline(data,name,'='); data >> BranchVelocity2.z; getline(data,name,'='); data >> BranchSpin1.x; getline(data,name,'='); data >> BranchSpin1.y; getline(data,name,'='); data >> BranchSpin1.z; getline(data,name,'='); data >> BranchSpin1.w; getline(data,name,'='); data >> BranchSpin2.x; getline(data,name,'='); data >> BranchSpin2.y; getline(data,name,'='); data >> BranchSpin2.z; getline(data,name,'='); data >> BranchSpin2.w; getline(data,name,'='); data >> BranchSpinRestTime; getline(data,name,'='); data >> BranchRunTime; } else { printf("\nTSU Error could not open Branch Setup file\n"); exit(0); } data.close(); } void setRunParameters() { double massBody1 = MassOfEarth*FractionEarthMassOfBody1; double massBody2 = MassOfEarth*FractionEarthMassOfBody2; if(FractionFeBody1 + FractionSiBody1 != 1.0) { printf("\nTSU Error: body1 fraction don't add to 1\n"); exit(0); } if(FractionFeBody2 + FractionSiBody2 != 1.0) { printf("\nTSU Error: body2 fraction don't add to 1\n"); exit(0); } double totalMassOfFeBody1 = FractionFeBody1*massBody1; double totalMassOfSiBody1 = FractionSiBody1*massBody1; double totalMassOfFeBody2 = FractionFeBody2*massBody2; double totalMassOfSiBody2 = FractionSiBody2*massBody2; double totalMassOfFe = totalMassOfFeBody1 + totalMassOfFeBody2; double totalMassOfSi = totalMassOfSiBody1 + totalMassOfSiBody2; double massFe; double massSi; double diameterOfElement; if(totalMassOfFe != 0.0) NFe = (double)N*(DensitySi/DensityFe)/(totalMassOfSi/totalMassOfFe + DensitySi/DensityFe); else NFe = 0; NSi = N - NFe; if(totalMassOfFe != 0.0) NFe1 = NFe*totalMassOfFeBody1/totalMassOfFe; else NFe1 = 0; NFe2 = NFe - NFe1; if(totalMassOfSi != 0.0) NSi1 = NSi*totalMassOfSiBody1/totalMassOfSi; else NSi1 = 0; NSi2 = NSi - NSi1; if(NFe != 0) massFe = totalMassOfFe/NFe; else massFe = 0.0; if(NSi != 0) massSi = totalMassOfSi/NSi; else massSi = 0.0; if(NSi != 0) diameterOfElement = pow((6.0*massSi)/(Pi*DensitySi), (1.0/3.0)); else diameterOfElement = pow((6.0*massFe)/(Pi*DensityFe), (1.0/3.0)); UnitLength = diameterOfElement; if(NSi != 0) UnitMass = massSi; else UnitMass = massFe; if(NSi != 0) UnitTime = sqrt((6.0*massSi*(double)NSi)/(UniversalGravity*Pi*DensitySi*totalMassOfSi)); else if(NFe != 0) UnitTime = sqrt((6.0*massFe*(double)NFe)/(UniversalGravity*Pi*DensityFe*totalMassOfFe)); else { printf("TSU Error: No mass, function setRunParameters\n"); exit(0); } //In this system this is what sets the length unit, the time unit, and the mass unit. Diameter = 1.0; Gravity = 1.0; if(NSi != 0) { MassSi = 1.0; MassFe = DensityFe/DensitySi; } else if(NFe != 0) { MassFe = 1.0; } else { printf("TSU Error: No mass, function setRunParameters\n"); exit(0); } //Setting mass of bodies in our units MassOfBody1 = massBody1/UnitMass; MassOfBody2 = massBody2/UnitMass; //Putting Initial positions into our units InitialPosition1.x /= UnitLength; InitialPosition1.y /= UnitLength; InitialPosition1.z /= UnitLength; InitialPosition2.x /= UnitLength; InitialPosition2.y /= UnitLength; InitialPosition2.z /= UnitLength; //Putting Initial Velocities into our units InitialVelocity1.x *= UnitTime/UnitLength; InitialVelocity1.y *= UnitTime/UnitLength; InitialVelocity1.z *= UnitTime/UnitLength; InitialVelocity2.x *= UnitTime/UnitLength; InitialVelocity2.y *= UnitTime/UnitLength; InitialVelocity2.z *= UnitTime/UnitLength; //Putting Initial Angule Velocities into our units InitialSpin1.w *= UnitTime/3600.0; InitialSpin2.w *= UnitTime/3600.0; //Putting Run times into our units TotalRunTime *= 3600.0/UnitTime; DampTime *= 3600.0/UnitTime; DampRestTime *= 3600.0/UnitTime; EnergyAdjustmentTime *= 3600.0/UnitTime; EnergyAdjustmentRestTime *= 3600.0/UnitTime; SpinRestTime *= 3600.0/UnitTime; SetupTime = (DampTime + DampRestTime + EnergyAdjustmentTime + EnergyAdjustmentRestTime + SpinRestTime); KFe *= UnitTime*UnitTime*UnitLength/UnitMass; KSi *= UnitTime*UnitTime*UnitLength/UnitMass; } void setBranchParameters() { //Putting Branch positions into our units BranchPosition1.x /= UnitLength; BranchPosition1.y /= UnitLength; BranchPosition1.z /= UnitLength; BranchPosition2.x /= UnitLength; BranchPosition2.y /= UnitLength; BranchPosition2.z /= UnitLength; //Putting Branch Velocities into our units BranchVelocity1.x *= UnitTime/UnitLength; BranchVelocity1.y *= UnitTime/UnitLength; BranchVelocity1.z *= UnitTime/UnitLength; BranchVelocity2.x *= UnitTime/UnitLength; BranchVelocity2.y *= UnitTime/UnitLength; BranchVelocity2.z *= UnitTime/UnitLength; //Putting Branch Angule Velocities into our units BranchSpin1.w *= UnitTime/3600.0; BranchSpin2.w *= UnitTime/3600.0; //Putting Branch Run times into our units BranchSpinRestTime *= 3600.0/UnitTime; BranchRunTime *= 3600.0/UnitTime; } //Globals for setting up the viewing window int XWindowSize = 2500; int YWindowSize = 2500; double Near = 0.2; double Far = 600.0; double ViewBoxSize = 300.0; GLdouble Left = -ViewBoxSize; GLdouble Right = ViewBoxSize; GLdouble Bottom = -ViewBoxSize; GLdouble Top = ViewBoxSize; GLdouble Front = ViewBoxSize; GLdouble Back = -ViewBoxSize; //Direction here your eye is located location double EyeX = 100.0; double EyeY = 100.0; double EyeZ = 100.0; //Where you are looking double CenterX = 0.0; double CenterY = 0.0; double CenterZ = 0.0; //Up vector for viewing double UpX = 0.0; double UpY = 1.0; double UpZ = 0.0; void createFolderForNewRun() { //Create output folder to store run parameters and run positions and velocities time_t t = time(0); struct tm * now = localtime( & t ); int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min; stringstream smonth, sday, stimeHour, stimeMin; smonth << month; sday << day; stimeHour << curTimeHour; stimeMin << curTimeMin; string monthday; if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str(); else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str(); string foldernametemp = "Run:" + monthday; const char *foldername = foldernametemp.c_str(); mkdir(foldername , S_IRWXU|S_IRWXG|S_IRWXO); chdir(foldername); //Copying the RunSetup file into the run folder FILE *runSetupIn; FILE *runSetupOut; long sizeOfFile; char * buffer; runSetupIn = fopen("../RunSetup", "rb"); fseek (runSetupIn , 0 , SEEK_END); sizeOfFile = ftell (runSetupIn); rewind (runSetupIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, runSetupIn); runSetupOut = fopen("RunSetup", "wb"); fwrite (buffer, 1, sizeOfFile, runSetupOut); fclose(runSetupIn); fclose(runSetupOut); free (buffer); } void createFolderForBranchRun(const char* rootFolder) { //Create output folder to store run parameters and run positions and velocities time_t t = time(0); struct tm * now = localtime( & t ); int month = now->tm_mon + 1, day = now->tm_mday, curTimeHour = now->tm_hour, curTimeMin = now->tm_min; stringstream smonth, sday, stimeHour, stimeMin; smonth << month; sday << day; stimeHour << curTimeHour; stimeMin << curTimeMin; string monthday; if (curTimeMin <= 9) monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":0" + stimeMin.str(); else monthday = smonth.str() + "-" + sday.str() + "-" + stimeHour.str() + ":" + stimeMin.str(); string foldernametemp = "BranchRun:" + monthday; const char *foldername = foldernametemp.c_str(); mkdir(foldername , S_IRWXU|S_IRWXG|S_IRWXO); chdir(foldername); FILE *fileIn; FILE *fileOut; long sizeOfFile; char * buffer; char path[256]; //Copying the RunSetup file into the branch run folder strcpy(path, "../"); strcat(path, rootFolder); strcat(path,"/RunSetup"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The RunSetup file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("RootSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileOut); fileOut = fopen("RunSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Copying the RunStatsFile file into the branch run folder strcpy(path, "../"); strcat(path, rootFolder); strcat(path,"/RunStats"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The RunStats file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("RootRunStats", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Copying the Branch Positions and Velocities file into the branch run folder strcpy(path, "../"); strcat(path, rootFolder); strcat(path,"/StartPosAndVel"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The StartPosAndVel file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("RootStartPosAndVel", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); //Copying the Branch setup file into the branch run folder strcpy(path, "../"); strcat(path,"BranchSetup"); fileIn = fopen(path, "rb"); if(fileIn == NULL) { printf("\n\n The BranchSetup file does not exist\n\n"); exit(0); } fseek (fileIn , 0 , SEEK_END); sizeOfFile = ftell (fileIn); rewind (fileIn); buffer = (char*) malloc (sizeof(char)*sizeOfFile); fread (buffer, 1, sizeOfFile, fileIn); fileOut = fopen("BranchSetup", "wb"); fwrite (buffer, 1, sizeOfFile, fileOut); fclose(fileIn); fclose(fileOut); free (buffer); } void openNewRunFiles() { RunStatsFile = fopen("RunStats", "wb"); PosAndVelFile = fopen("PosAndVel", "wb"); StartPosAndVelFile = fopen("StartPosAndVel", "wb"); ContinueRunStatsFile = fopen("ContinueRunStats", "wb"); ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel", "wb"); } void openBranchRunFiles() { RunStatsFile = fopen("RunStats", "wb"); PosAndVelFile = fopen("PosAndVel", "wb"); StartPosAndVelFile = fopen("StartPosAndVel", "wb"); ContinueRunStatsFile = fopen("ContinueRunStats", "wb"); ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel", "wb"); } void openContinueRunFiles() { RunStatsFile = fopen("RunStats", "wb"); PosAndVelFile = fopen("PosAndVel", "ab"); //fseek(PosAndVelFile,0,SEEK_END); ContinueRunStatsFile = fopen("ContinueRunStats", "wb"); ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel", "wb"); } void recordSetupStats() { float mag; fprintf(RunStatsFile, "The conversion parameters to take you to and from our units to the real world units follow\n"); fprintf(RunStatsFile, "\nOur length unit is this many kilometers: UnitLength = %f", UnitLength); fprintf(RunStatsFile, "\nOur mass unit is this many kilograms: UnitMass = %e", UnitMass); fprintf(RunStatsFile, "\nOur time unit is this many seconds: UnitTime = %f\n", UnitTime); fprintf(RunStatsFile, "\nThe initail statistics for this run in our units follow\n"); fprintf(RunStatsFile, "\nDiameter of an element: Diameter = %f", Diameter); fprintf(RunStatsFile, "\nGravity in our units: Gravity = %f", Gravity); fprintf(RunStatsFile, "\nThe mass of a silicate element: MassSi = %f", MassSi); fprintf(RunStatsFile, "\nThe mass of an iron element: MassFe = %f\n", MassFe); fprintf(RunStatsFile, "\nThe push back strength of iron: KFe = %f", KFe); fprintf(RunStatsFile, "\nThe push back strength of silicate: KSi = %f\n", KSi); fprintf(RunStatsFile, "\nThe mass of body one: MassOfBody1 = %f", MassOfBody1); fprintf(RunStatsFile, "\nThe mass of body two: MassOfBody2 = %f\n", MassOfBody2); fprintf(RunStatsFile, "\nThe initial position of body one: (%f, %f, %f)", InitialPosition1.x, InitialPosition1.y, InitialPosition1.z); fprintf(RunStatsFile, "\nThe initial position of body two: (%f, %f, %f)\n", InitialPosition2.x, InitialPosition2.y, InitialPosition2.z); fprintf(RunStatsFile, "\nThe initial velocity of body one: (%f, %f, %f)", InitialVelocity1.x, InitialVelocity1.y, InitialVelocity1.z); fprintf(RunStatsFile, "\nThe initial velocity of body two: (%f, %f, %f)\n", InitialVelocity2.x, InitialVelocity2.y, InitialVelocity2.z); mag = sqrt(InitialSpin1.x*InitialSpin1.x + InitialSpin1.y*InitialSpin1.y + InitialSpin1.z*InitialSpin1.z); fprintf(RunStatsFile, "\nThe initial spin in revolutions per time unit of body one: (%f, %f, %f, %f)", InitialSpin1.x/mag, InitialSpin1.y/mag, InitialSpin1.z/mag, InitialSpin1.w); mag = sqrt(InitialSpin2.x*InitialSpin2.x + InitialSpin2.y*InitialSpin2.y + InitialSpin2.z*InitialSpin2.z); fprintf(RunStatsFile, "\nThe initial spin in revolutions per time unit of body two: (%f, %f, %f, %f)\n", InitialSpin2.x/mag, InitialSpin2.y/mag, InitialSpin2.z/mag, InitialSpin2.w); fprintf(RunStatsFile, "\nTotal number of elements: N = %d", N); fprintf(RunStatsFile, "\nTotal number of iron elements: NFe = %d", NFe); fprintf(RunStatsFile, "\nTotal number of silicate elements: NSi = %d", NSi); fprintf(RunStatsFile, "\nTotal number of iron elements in body1: NFe1 = %d", NFe1); fprintf(RunStatsFile, "\nTotal number of silicate elements in body1: NSi1 = %d", NSi1); fprintf(RunStatsFile, "\nTotal number of iron elements in body2 NFe2: = %d", NFe2); fprintf(RunStatsFile, "\nTotal number of silicate elements in body2: NSi2 = %d\n", NSi2); fprintf(RunStatsFile, "\nTime step in our units: Dt = %f", Dt); fprintf(RunStatsFile, "\nRecord rate: RecordRate = %d", RecordRate); fprintf(RunStatsFile, "\nTotal run time in our units: TotalRunTime = %f\n", TotalRunTime); fprintf(RunStatsFile, "\nDamp time in our units: DampTime = %f", DampTime); fprintf(RunStatsFile, "\nDamp rest time in our units: DampRestTime = %f", DampRestTime); fprintf(RunStatsFile, "\nEnergy adjustment time in our units: EnergyAdjustmentTime = %f", EnergyAdjustmentTime); fprintf(RunStatsFile, "\nEnergy adjustment rest time in our units: EnergyAdjustmentRestTime = %f", EnergyAdjustmentRestTime); fprintf(RunStatsFile, "\nSpin rest time in our units: SpinRestTime = %f", SpinRestTime); fprintf(RunStatsFile, "\nTotal setup time in our units: SetupTime = %f\n", SetupTime); } //Creating structures to hold constants needed in the kernals struct forceSeperateKernalConstantsStruct { float GMassFeFe; float GMassFeSi; float KFeFe; float KSiSi; float KFeSi; float KRFeFe; float KRSiSi; float KRFeSi; float KRMix; float ShellBreakFe; float ShellBreakSi; float ShellBreakFeSi1; float ShellBreakFeSi2; int boarder1; int boarder2; int boarder3; }; struct forceCollisionKernalConstantsStruct { float GMassFeFe; float GMassFeSi; float KFeFe; float KSiSi; float KFeSi; float KRFeFe; float KRSiSi; float KRFeSi; float KRMix; float ShellBreakFe; float ShellBreakSi; float ShellBreakFeSi1; float ShellBreakFeSi2; int NFe; }; struct moveSeperateKernalConstantsStruct { float Dt; float DtOverMassFe; float DtOverMassSi; int boarder1; int boarder2; int boarder3; }; struct moveCollisionKernalConstantsStruct { float Dt; float DtOverMassFe; float DtOverMassSi; int NFe; }; //Globals to hold kernal constants forceSeperateKernalConstantsStruct ForceSeperateConstant; forceCollisionKernalConstantsStruct ForceCollisionConstant; moveSeperateKernalConstantsStruct MoveSeperateConstant; moveCollisionKernalConstantsStruct MoveCollisionConstant; void loadKernalConstantStructures() { //Force kernal seperate ForceSeperateConstant.GMassFeFe = Gravity*MassFe*MassFe; ForceSeperateConstant.GMassFeSi = Gravity*MassFe*MassSi; ForceSeperateConstant.KFeFe = 2.0*KFe; ForceSeperateConstant.KSiSi = 2.0*KSi; ForceSeperateConstant.KFeSi = KFe + KSi; ForceSeperateConstant.KRFeFe = 2.0*KFe*KRFe; ForceSeperateConstant.KRSiSi = 2.0*KSi*KRSi; ForceSeperateConstant.KRFeSi = KFe*KRFe + KSi*KRSi; if(SDFe >= SDSi) ForceSeperateConstant.KRMix = KFe + KSi*KRSi; else ForceSeperateConstant.KRMix = KFe*KRFe + KSi; ForceSeperateConstant.ShellBreakFe = Diameter - Diameter*SDFe; ForceSeperateConstant.ShellBreakSi = Diameter - Diameter*SDSi; if(SDFe >= SDSi) { ForceSeperateConstant.ShellBreakFeSi1 = Diameter - Diameter*SDSi; ForceSeperateConstant.ShellBreakFeSi2 = Diameter - Diameter*SDFe; } else { ForceSeperateConstant.ShellBreakFeSi1 = Diameter - Diameter*SDFe; ForceSeperateConstant.ShellBreakFeSi2 = Diameter - Diameter*SDSi; } ForceSeperateConstant.boarder1 = NFe1; ForceSeperateConstant.boarder2 = NFe1 + NSi1; ForceSeperateConstant.boarder3 = NFe1 + NSi1 + NFe2; //Force kernal Earth Moon System ForceCollisionConstant.GMassFeFe = Gravity*MassFe*MassFe; ForceCollisionConstant.GMassFeSi = Gravity*MassFe*MassSi; ForceCollisionConstant.KFeFe = 2.0*KFe; ForceCollisionConstant.KSiSi = 2.0*KSi; ForceCollisionConstant.KFeSi = KFe + KSi; ForceCollisionConstant.KRFeFe = 2.0*KFe*KRFe; ForceCollisionConstant.KRSiSi = 2.0*KSi*KRSi; ForceCollisionConstant.KRFeSi = KFe*KRFe + KSi*KRSi; if(SDFe >= SDSi) ForceCollisionConstant.KRMix = KFe + KSi*KRSi; else ForceCollisionConstant.KRMix = KFe*KRFe + KSi; ForceCollisionConstant.ShellBreakFe = Diameter - Diameter*SDFe; ForceCollisionConstant.ShellBreakSi = Diameter - Diameter*SDSi; if(SDFe >= SDSi) { ForceCollisionConstant.ShellBreakFeSi1 = Diameter - Diameter*SDSi; ForceCollisionConstant.ShellBreakFeSi2 = Diameter - Diameter*SDFe; } else { ForceCollisionConstant.ShellBreakFeSi1 = Diameter - Diameter*SDFe; ForceCollisionConstant.ShellBreakFeSi2 = Diameter - Diameter*SDSi; } ForceCollisionConstant.NFe = NFe; //Move kernal seperate MoveSeperateConstant.Dt = Dt; MoveSeperateConstant.DtOverMassFe = Dt/MassFe; MoveSeperateConstant.DtOverMassSi = Dt/MassSi; MoveSeperateConstant.boarder1 = NFe1; MoveSeperateConstant.boarder2 = NSi1 + NFe1; MoveSeperateConstant.boarder3 = NFe1 + NSi1 + NFe2; //Move kernal Earth Moon System MoveCollisionConstant.Dt = Dt; MoveCollisionConstant.DtOverMassSi = Dt/MassSi; MoveCollisionConstant.DtOverMassFe = Dt/MassFe; MoveCollisionConstant.NFe = NFe; } void errorCheck(const char *message) { cudaError_t error; error = cudaGetLastError(); if(error != cudaSuccess) { printf("\n CUDA ERROR: %s = %s\n", message, cudaGetErrorString(error)); exit(0); } } void allocateCPUMemory() { PlaceHolder = (float4*)malloc(N*sizeof(float4)); Pos = (float4*)malloc(N*sizeof(float4)); Vel = (float4*)malloc(N*sizeof(float4)); Force = (float4*)malloc(N*sizeof(float4)); } void checkSetupForErrors() { if(N%BLOCKSIZE != 0) { printf("\nTSU Error: Number of Particles is not a multiple of the block size \n\n"); exit(0); } } void deviceSetupSeperate() { BlockConfig.x = BLOCKSIZE; BlockConfig.y = 1; BlockConfig.z = 1; GridConfig.x = (N-1)/BlockConfig.x + 1; GridConfig.y = 1; GridConfig.z = 1; cudaMalloc((void**)&Pos_DEV0, N *sizeof(float4)); errorCheck("cudaMalloc Pos"); cudaMalloc((void**)&Vel_DEV0, N *sizeof(float4)); errorCheck("cudaMalloc Vel"); cudaMalloc((void**)&Force_DEV0, N *sizeof(float4)); errorCheck("cudaMalloc Force"); } void deviceSetupCollision() { cudaGetDeviceCount(&NumberOfGpus); printf("\n***** You have %d GPUs available\n", NumberOfGpus); errorCheck("cudaGetDeviceCount"); cudaDeviceCanAccessPeer(&Gpu0Access,0,1); errorCheck("cudaDeviceCanAccessPeer0"); cudaDeviceCanAccessPeer(&Gpu1Access,1,0); errorCheck("cudaDeviceCanAccessPeer1"); if(1 < NumberOfGpus && UseMultipleGPU == 1) { printf("\n***** You will be using %d GPUs\n", NumberOfGpus); if(Gpu0Access == 0) { printf("\nTSU Error: Device0 can not do peer to peer\n"); } if(Gpu1Access == 0) { printf("\nTSU Error: Device1 can not do peer to peer\n"); } cudaDeviceEnablePeerAccess(1,0); errorCheck("cudaDeviceEnablePeerAccess"); BlockConfig.x = BLOCKSIZE; BlockConfig.y = 1; BlockConfig.z = 1; GridConfig.x = ((N/2)-1)/BlockConfig.x + 1; GridConfig.y = 1; GridConfig.z = 1; cudaSetDevice(0); errorCheck("cudaSetDevice0"); cudaMalloc( (void**)&PosFstHalf_0, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc PFH0"); cudaMalloc( (void**)&PosSndHalf_0, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc PSH0"); cudaMalloc( (void**)&VelFstHalf_0, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc VFH0"); cudaMalloc( (void**)&VelSndHalf_0, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc VSH0"); cudaMalloc( (void**)&ForceFstHalf_0, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc FFH0"); cudaSetDevice(1); errorCheck("cudaSetDevice1"); cudaMalloc( (void**)&PosFstHalf_1, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc PFH1"); cudaMalloc( (void**)&PosSndHalf_1, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc PSH1"); cudaMalloc( (void**)&VelFstHalf_1, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc VFH1"); cudaMalloc( (void**)&VelSndHalf_1, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc VSH1"); cudaMalloc( (void**)&ForceSndHalf_1, (N/2)*sizeof(float4) ); errorCheck("cudaMalloc FSH1"); } else { BlockConfig.x = BLOCKSIZE; BlockConfig.y = 1; BlockConfig.z = 1; GridConfig.x = (N-1)/BlockConfig.x + 1; GridConfig.y = 1; GridConfig.z = 1; cudaMalloc((void**)&Pos_DEV0, N *sizeof(float4)); errorCheck("cudaMalloc P0"); cudaMalloc((void**)&Vel_DEV0, N *sizeof(float4)); errorCheck("cudaMalloc V0"); cudaMalloc((void**)&Force_DEV0, N *sizeof(float4)); errorCheck("cudaMalloc F0"); } } void cleanUpSeperate() { cudaFree(Pos_DEV0); cudaFree(Vel_DEV0); cudaFree(Force_DEV0); fclose(StartPosAndVelFile); } void cleanUpCollision() { fclose(RunStatsFile); fclose(PosAndVelFile); fclose(ContinueRunStatsFile); fclose(ContinueRunPosAndVelFile); if(1 < NumberOfGpus && UseMultipleGPU == 1) { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaFree(PosFstHalf_0); cudaFree(VelFstHalf_0); cudaFree(ForceFstHalf_0); cudaFree(PosSndHalf_0); cudaFree(VelSndHalf_0); cudaSetDevice(1); errorCheck("cudaSetDevice 0"); cudaFree(PosFstHalf_1); cudaFree(VelFstHalf_1); cudaFree(ForceSndHalf_1); cudaFree(PosSndHalf_1); cudaFree(VelSndHalf_1); } else { cudaFree(Pos_DEV0); cudaFree(Vel_DEV0); cudaFree(Force_DEV0); } } void createBodies() { float radius1, radius2, stretch; float volume, mag, radius, seperation; int test, repeatCount; time_t t; printf("\nCreating the raw bodies\n"); //Creating body one //This assumes a 68% packing ratio of a shpere with shperes and then stretches it by strecth //to safely fit all the balls in. stretch = 2.0; volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)NFe1/0.68)*stretch; radius1 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)(NFe1 + NSi1)/0.68)*stretch; radius2 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); srand((unsigned) time(&t)); repeatCount = 0; for(int i=0; i<NFe1; i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*radius1; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = 0; j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 0.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassFe; } for(int i = NFe1; i < (NFe1 + NSi1); i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*(radius2-radius1) + radius1 + Diameter; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = NFe1; j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 1.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassSi; } printf("\nrepeat count body one= %d", repeatCount); //Setting the body one's center of mass location for(int i=0; i<(NFe1 + NSi1); i++) { Pos[i].x += InitialPosition1.x; Pos[i].y += InitialPosition1.y; Pos[i].z += InitialPosition1.z; } //Creating body two //This assumes a 68% packing ratio of a shpere with shperes and then stretches it by strecth //to safely fit all the balls in. stretch = 2.0; volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)NFe2/0.68)*stretch; radius1 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); volume = ((4.0/3.0)*Pi*pow(Diameter,3)*(float)(NFe2 + NSi2)/0.68)*stretch; radius2 = pow(volume/((4.0/3.0)*Pi),(1.0/3.0)); srand((unsigned) time(&t)); repeatCount = 0; for(int i = (NFe1 + NSi1); i < (NFe1 + NSi1 + NFe2); i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*radius1; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = (NFe1 + NSi1); j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 2.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassFe; } for(int i = (NFe1 + NSi1 + NFe2); i < N; i++) { test = 0; while(test == 0) { Pos[i].x = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].y = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; Pos[i].z = ((float)rand()/(float)RAND_MAX)*2.0 - 1.0; mag = sqrt(Pos[i].x*Pos[i].x + Pos[i].y*Pos[i].y + Pos[i].z*Pos[i].z); radius = ((float)rand()/(float)RAND_MAX)*(radius2-radius1) + radius1 + Diameter; Pos[i].x *= radius/mag; Pos[i].y *= radius/mag; Pos[i].z *= radius/mag; test = 1; for(int j = (NFe1 + NSi1 + NFe2); j < i; j++) { seperation = mag = sqrt((Pos[i].x-Pos[j].x)*(Pos[i].x-Pos[j].x) + (Pos[i].y-Pos[j].y)*(Pos[i].y-Pos[j].y) + (Pos[i].z-Pos[j].z)*(Pos[i].z-Pos[j].z)); if(seperation < Diameter) { test = 0; repeatCount++; break; } } } Pos[i].w = 3.0; Vel[i].x = 0.0; Vel[i].y = 0.0; Vel[i].z = 0.0; Vel[i].w = MassSi; } printf("\nrepeat count body two = %d", repeatCount); //Setting the body one's center of mass location for(int i = (NFe1 + NSi1); i < N; i++) { Pos[i].x += InitialPosition2.x; Pos[i].y += InitialPosition2.y; Pos[i].z += InitialPosition2.z; } printf("\n************************************************** Initial bodies have been formed\n"); } __global__ void getForcesSeperate(float4 *pos, float4 *vel, float4 *force, forceSeperateKernalConstantsStruct constant) { int id, ids; int i,j; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; int materialSwitch; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = pos[id].x; posMe.y = pos[id].y; posMe.z = pos[id].z; velMe.x = vel[id].x; velMe.y = vel[id].y; velMe.z = vel[id].z; for(j = 0; j < gridDim.x; j++) { shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = vel[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i = 0; i < blockDim.x; i++) { ids = i + blockDim.x*j; if((id < constant.boarder2 && ids < constant.boarder2) || (constant.boarder2 <= id && constant.boarder2 <= ids)) { if((id < constant.boarder2) && (ids < constant.boarder2)) materialSwitch = constant.boarder1; if((constant.boarder2 <= id) && (constant.boarder2 <= ids)) materialSwitch = constant.boarder3; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id < materialSwitch) test = 1; if(ids < materialSwitch) test++; if(test == 0) //silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } } force[id].x = forceSum.x; force[id].y = forceSum.y; force[id].z = forceSum.z; __syncthreads(); } } __global__ void moveBodiesSeperate(float4 *pos, float4 *vel, float4 * force, moveSeperateKernalConstantsStruct constant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(constant.boarder3 <= id) temp = constant.DtOverMassSi; else if(constant.boarder2 <= id) temp = constant.DtOverMassFe; else if(constant.boarder1 <= id) temp = constant.DtOverMassSi; else temp = constant.DtOverMassFe; vel[id].x += (force[id].x)*temp; vel[id].y += (force[id].y)*temp; vel[id].z += (force[id].z)*temp; pos[id].x += vel[id].x*constant.Dt; pos[id].y += vel[id].y*constant.Dt; pos[id].z += vel[id].z*constant.Dt; } __global__ void moveBodiesDampedSeperate(float4 *pos, float4 *vel, float4 * force, moveSeperateKernalConstantsStruct constant, float DampRateBody1, float DampRateBody2) { float temp; float damp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(constant.boarder3 <= id) { temp = constant.DtOverMassSi; damp = DampRateBody2; } else if(constant.boarder2 <= id) { temp = constant.DtOverMassFe; damp = DampRateBody2; } else if(constant.boarder1 <= id) { temp = constant.DtOverMassSi; damp = DampRateBody1; } else { temp = constant.DtOverMassFe; damp = DampRateBody1; } vel[id].x += (force[id].x-damp*vel[id].x)*temp; vel[id].y += (force[id].y-damp*vel[id].y)*temp; vel[id].z += (force[id].z-damp*vel[id].z)*temp; pos[id].x += vel[id].x*constant.Dt; pos[id].y += vel[id].y*constant.Dt; pos[id].z += vel[id].z*constant.Dt; } __global__ void getForcesCollisionSingleGPU(float4 *pos, float4 *vel, float4 *force, forceCollisionKernalConstantsStruct constant) { int id, ids; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = pos[id].x; posMe.y = pos[id].y; posMe.z = pos[id].z; velMe.x = vel[id].x; velMe.y = vel[id].y; velMe.z = vel[id].z; for(int j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = pos[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = vel[threadIdx.x + blockDim.x*j]; __syncthreads(); for(int i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id < constant.NFe) test = 1; if(ids < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } force[id].x = forceSum.x; force[id].y = forceSum.y; force[id].z = forceSum.z; } __global__ void moveBodiesCollisionSingleGPU(float4 *pos, float4 *vel, float4 * force, moveCollisionKernalConstantsStruct MoveCollisionConstant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(id < MoveCollisionConstant.NFe) temp = MoveCollisionConstant.DtOverMassFe; else temp = MoveCollisionConstant.DtOverMassSi; vel[id].x += (force[id].x)*temp; vel[id].y += (force[id].y)*temp; vel[id].z += (force[id].z)*temp; pos[id].x += vel[id].x*MoveCollisionConstant.Dt; pos[id].y += vel[id].y*MoveCollisionConstant.Dt; pos[id].z += vel[id].z*MoveCollisionConstant.Dt; } __global__ void getForcesCollisionDoubleGPU0(float4 *posFstHalf, float4 *posSndHalf, float4 *velFstHalf, float4 *velSndHalf, float4 *forceFstHalf, int N, forceCollisionKernalConstantsStruct constant) { int id, ids; int i,j; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = posFstHalf[id].x; posMe.y = posFstHalf[id].y; posMe.z = posFstHalf[id].z; velMe.x = velFstHalf[id].x; velMe.y = velFstHalf[id].y; velMe.z = velFstHalf[id].z; for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posFstHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velFstHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id < constant.NFe) test = 1; if(ids < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posSndHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velSndHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); invr = 1.0f/r; test = 0; if(id < constant.NFe) test = 1; if(ids+(N/2) < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } forceFstHalf[id].x = forceSum.x; forceFstHalf[id].y = forceSum.y; forceFstHalf[id].z = forceSum.z; } __global__ void getForcesCollisionDoubleGPU1(float4 *posFstHalf, float4 *posSndHalf, float4 *velFstHalf, float4 *velSndHalf, float4 *forceSndHalf, int N, forceCollisionKernalConstantsStruct constant) { int id, ids; int i,j; int inout; float4 forceSum; float4 posMe; float4 velMe; int test; float force_mag; float4 dp; float4 dv; float r2; float r; float invr; __shared__ float4 shPos[BLOCKSIZE]; __shared__ float4 shVel[BLOCKSIZE]; id = threadIdx.x + blockDim.x*blockIdx.x; forceSum.x = 0.0f; forceSum.y = 0.0f; forceSum.z = 0.0f; posMe.x = posSndHalf[id].x; posMe.y = posSndHalf[id].y; posMe.z = posSndHalf[id].z; velMe.x = velSndHalf[id].x; velMe.y = velSndHalf[id].y; velMe.z = velSndHalf[id].z; for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posFstHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velFstHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); invr = 1.0f/r; test = 0; if(id + (N/2) < constant.NFe) test = 1; if(ids < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } for(j=0; j < gridDim.x; j++) { shPos[threadIdx.x] = posSndHalf[threadIdx.x + blockDim.x*j]; shVel[threadIdx.x] = velSndHalf[threadIdx.x + blockDim.x*j]; __syncthreads(); for(i=0; i < blockDim.x; i++) { ids = i + blockDim.x*j ; dp.x = shPos[i].x - posMe.x; dp.y = shPos[i].y - posMe.y; dp.z = shPos[i].z - posMe.z; r2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; r = sqrt(r2); if(id == ids) invr = 0; else invr = 1.0f/r; test = 0; if(id + (N/2) < constant.NFe) test = 1; if(ids+(N/2) < constant.NFe) test++; if(test == 0) //Silicate silicate force { if(1.0 <= r) { force_mag = 1.0/r2; // G = 1 and mass of silicate elemnet =1 } else if(constant.ShellBreakSi <= r) { force_mag = 1.0 - constant.KSiSi*(1.0 - r2); // because D = 1 G = 1 and mass of silicate = 1 } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = 1.0 - constant.KSiSi*(1.0 - r2); else force_mag = 1.0 - constant.KRSiSi*(1.0 - r2); } } else if(test == 1) //Silicate iron force { if(1.0 <= r) { force_mag = constant.GMassFeSi/r2; } else if(constant.ShellBreakFeSi1 <= r) { force_mag = constant.GMassFeSi -constant.KFeSi*(1.0 - r2); } else if(constant.ShellBreakFeSi2 <= r) { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRMix*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeSi - constant.KFeSi*(1.0 - r2); else force_mag = constant.GMassFeSi - constant.KRFeSi*(1.0 - r2); } } else //Iron iron force { if(1.0 <= r) { force_mag = constant.GMassFeFe/r2; } else if(constant.ShellBreakFe <= r) { force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); } else { dv.x = shVel[i].x - velMe.x; dv.y = shVel[i].y - velMe.y; dv.z = shVel[i].z - velMe.z; inout = dp.x*dv.x + dp.y*dv.y + dp.z*dv.z; if(inout <= 0) force_mag = constant.GMassFeFe - constant.KFeFe*(1.0 - r2); else force_mag = constant.GMassFeFe - constant.KRFeFe*(1.0 - r2); } } forceSum.x += force_mag*dp.x*invr; forceSum.y += force_mag*dp.y*invr; forceSum.z += force_mag*dp.z*invr; } __syncthreads(); } forceSndHalf[id].x = forceSum.x; forceSndHalf[id].y = forceSum.y; forceSndHalf[id].z = forceSum.z; } __global__ void moveBodiesCollisionDoubleGPU0(float4 *posFstHalf, float4 *velFstHalf, float4 * forceFstHalf, int N, moveCollisionKernalConstantsStruct constant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(id < constant.NFe) temp = constant.DtOverMassFe; else temp = constant.DtOverMassSi; velFstHalf[id].x += (forceFstHalf[id].x)*temp; velFstHalf[id].y += (forceFstHalf[id].y)*temp; velFstHalf[id].z += (forceFstHalf[id].z)*temp; posFstHalf[id].x += velFstHalf[id].x*constant.Dt; posFstHalf[id].y += velFstHalf[id].y*constant.Dt; posFstHalf[id].z += velFstHalf[id].z*constant.Dt; } __global__ void moveBodiesCollisionDoubleGPU1(float4 *posSndHalf, float4 *velSndHalf, float4 * forceSndHalf, int N, moveCollisionKernalConstantsStruct constant) { float temp; int id; id = threadIdx.x + blockDim.x*blockIdx.x; if(id + (N/2) < constant.NFe) temp = constant.DtOverMassFe; else temp = constant.DtOverMassSi; velSndHalf[id].x += (forceSndHalf[id].x)*temp; velSndHalf[id].y += (forceSndHalf[id].y)*temp; velSndHalf[id].z += (forceSndHalf[id].z)*temp; posSndHalf[id].x += velSndHalf[id].x*constant.Dt; posSndHalf[id].y += velSndHalf[id].y*constant.Dt; posSndHalf[id].z += velSndHalf[id].z*constant.Dt; } float3 getCenterOfMassSeperate(int scope) { float totalMass; float assumeZero = 0.0000001; float3 centerOfMass; centerOfMass.x = 0.0f; centerOfMass.y = 0.0f; centerOfMass.z = 0.0f; if(scope == 0) //entire system { totalMass = MassOfBody1 + MassOfBody2; if(totalMass < assumeZero) return(centerOfMass); for(int i = 0; i < NFe1; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } else if(scope == 1) //body1 { totalMass = MassOfBody1; if(totalMass < assumeZero) return(centerOfMass); for(int i = 0; i < NFe1; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } else if(scope == 2) //body2 { totalMass = MassOfBody2; if(totalMass < assumeZero) return(centerOfMass); for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } else { printf("\nTSU Error: In getCenterOfMassSeperate function scope invalid\n"); exit(0); } centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; return(centerOfMass); } float3 getLinearVelocitySeperate(int scope) { double totalMass; float assumeZero = 0.0000001; float3 linearVelocity; linearVelocity.x = 0.0f; linearVelocity.y = 0.0f; linearVelocity.z = 0.0f; if(scope == 0) //Entire system { totalMass = MassOfBody1 + MassOfBody2; if(totalMass < assumeZero) return(linearVelocity); for(int i = 0; i < NFe1; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } else if(scope == 1) //body1 { totalMass = MassOfBody1; if(totalMass < assumeZero) return(linearVelocity); for(int i = 0; i < NFe1; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } else if (scope == 2) //body2 { totalMass = MassOfBody2; if(totalMass < assumeZero) return(linearVelocity); for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } else { printf("\nTSU Error: In getLinearVelocitySeperate function scope invalid\n"); exit(0); } linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; return(linearVelocity); } float3 getAngularMomentumSeperate(int scope, float3 center, float3 velocity) { float3 angularMomentum; float3 r; float3 v; angularMomentum.x = 0.0f; angularMomentum.y = 0.0f; angularMomentum.z = 0.0f; if(scope == 0) //entire system { for(int i = 0; i < NFe1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } else if(scope == 1) //body1 { for(int i = 0; i < NFe1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1; i < NFe1 + NSi1; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } else if(scope == 2) //body2 { for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { r.x = Pos[i].x - center.x; r.y = Pos[i].y - center.y; r.z = Pos[i].z - center.z; v.x = Vel[i].x - velocity.x; v.y = Vel[i].y - velocity.y; v.z = Vel[i].z - velocity.z; angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } else { printf("\nTSU Error: In getAngularMomentumSeperate function scope invalid\n"); exit(0); } return(angularMomentum); } void setBodyPositionSeperate(int bodyId, float x, float y, float z) { int start, stop; if(bodyId == 1) { start = 0; stop = NFe1 + NSi1; } else if(bodyId == 2) { start = NFe1 + NSi1; stop = N; } else { printf("\nTSU Error: in setBodyPositionSeperate function bodyId invalid\n"); exit(0); } float3 centerOfMass = getCenterOfMassSeperate(bodyId); for(int i = start; i < stop; i++) { Pos[i].x += x - centerOfMass.x; Pos[i].y += y - centerOfMass.y; Pos[i].z += z - centerOfMass.z; } } void setBodyVelocitySeperate(int bodyId, float vx, float vy, float vz) { int start, stop; if(bodyId == 1) { start = 0; stop = NFe1 + NSi1; } else if(bodyId == 2) { start = NFe1 + NSi1; stop = N; } else { printf("\nTSU Error: in setBodyVelocitySeperate invalid bodyId\n"); exit(0); } float3 RandomlinearVelocity = getLinearVelocitySeperate(bodyId); for(int i = start; i < stop; i++) { Vel[i].x += vx - RandomlinearVelocity.x; Vel[i].y += vy - RandomlinearVelocity.y; Vel[i].z += vz - RandomlinearVelocity.z; } } void spinBodySeperate(int bodyId, float4 spinVector) { float3 r; //vector from center of mass to the position vector float3 centerOfMass; float3 n; //Unit vector perpendicular to the plane of spin float mag; float assumeZero = 0.0000001; int start, stop; if(bodyId == 1) { start = 0; stop = NFe1 + NSi1; } else { start = NFe1 + NSi1; stop = N; } //Making sure the spin vector is a unit vector mag = sqrt(spinVector.x*spinVector.x + spinVector.y*spinVector.y + spinVector.z*spinVector.z); if(assumeZero < mag) { spinVector.x /= mag; spinVector.y /= mag; spinVector.z /= mag; } else { printf("\nTSU Error: In spinBodySeperate. The spin direction vector is zero.\n"); exit(0); } centerOfMass = getCenterOfMassSeperate(bodyId); for(int i = start; i < stop; i++) { //Creating a vector from the center of mass to the point r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; float magsquared = r.x*r.x + r.y*r.y + r.z*r.z; float spinDota = spinVector.x*r.x + spinVector.y*r.y + spinVector.z*r.z; float perpendicularDistance = sqrt(magsquared - spinDota*spinDota); float perpendicularVelocity = spinVector.w*2.0*Pi*perpendicularDistance; //finding unit vector perpendicular to both the position vector and the spin vector n.x = (spinVector.y*r.z - spinVector.z*r.y); n.y = -(spinVector.x*r.z - spinVector.z*r.x); n.z = (spinVector.x*r.y - spinVector.y*r.x); mag = sqrt(n.x*n.x + n.y*n.y + n.z*n.z); if(mag != 0.0) { n.x /= mag; n.y /= mag; n.z /= mag; //Spining the element Vel[i].x += perpendicularVelocity*n.x; Vel[i].y += perpendicularVelocity*n.y; Vel[i].z += perpendicularVelocity*n.z; } } } double vectorMagnitude(float3 v) { return(sqrt(v.x*v.x + v.y*v.y + v.z*v.z)); } void recordStatsOfCreatedBodies() { float radiusOfBody; float massOfBody; float3 r; double mag, d; float3 centerOfMass; float3 linearVelocity; float3 angularMomentum; double lengthConvertion = UnitLength; double massConvertion = UnitMass; double velocityConvertion = UnitLength/UnitTime; double AngularMomentumConvertion = (UnitMass*UnitLength*UnitLength)/(UnitTime); fprintf(RunStatsFile, "\n\n\n*****************************************************************************************************\n"); fprintf(RunStatsFile, "\nThe follow are the statistics of the system right before they are released to collide in real world units\n"); fprintf(RunStatsFile, "\n\n***** Stats for the univeral system *****\n"); centerOfMass = getCenterOfMassSeperate(0); fprintf(RunStatsFile, "\nThe center of mass = (%f, %f, %f) Kilometers from (0, 0, 0)\n", centerOfMass.x*lengthConvertion, centerOfMass.y*lengthConvertion, centerOfMass.z*lengthConvertion); linearVelocity = getLinearVelocitySeperate(0); fprintf(RunStatsFile, "\nThe average linear velocity = (%f, %f, %f)", linearVelocity.x*velocityConvertion, linearVelocity.y*velocityConvertion, linearVelocity.z*velocityConvertion); mag = vectorMagnitude(linearVelocity); fprintf(RunStatsFile, "\nThe magitude of the avergae linear velocity = %f Kilometers/second\n", mag*velocityConvertion); angularMomentum = getAngularMomentumSeperate(0, getCenterOfMassSeperate(0), getLinearVelocitySeperate(0)); fprintf(RunStatsFile, "\nThe angular momentum = (%e, %e, %e)", angularMomentum.x*AngularMomentumConvertion, angularMomentum.y*AngularMomentumConvertion, angularMomentum.z*AngularMomentumConvertion); mag = vectorMagnitude(angularMomentum); fprintf(RunStatsFile, "\nThe magitude of the angular momentum = %e Kilograms*kilometers*kilometers/second\n", mag*AngularMomentumConvertion); fprintf(RunStatsFile, "\n\n***** Stats for Body1 *****\n"); centerOfMass = getCenterOfMassSeperate(1); radiusOfBody = 0.0; massOfBody = 0.0; for(int i = 0; i < NFe1; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassFe; } for(int i = NFe1; i < NSi1; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassSi; } fprintf(RunStatsFile, "\nMass = %e Kilograms\n", massOfBody*massConvertion); fprintf(RunStatsFile, "\nRadius = %f Kilometers\n", radiusOfBody*lengthConvertion); fprintf(RunStatsFile, "\nThe center of mass = (%f, %f, %f) Kilometers from (0, 0, 0)\n", centerOfMass.x*lengthConvertion, centerOfMass.y*lengthConvertion, centerOfMass.z*lengthConvertion); linearVelocity = getLinearVelocitySeperate(1); fprintf(RunStatsFile, "\nThe average linear velocity = (%f, %f, %f)", linearVelocity.x*velocityConvertion, linearVelocity.y*velocityConvertion, linearVelocity.z*velocityConvertion); mag = vectorMagnitude(linearVelocity); fprintf(RunStatsFile, "\nThe magitude of the avergae linear velocity = %f Kilometers/second\n", mag*velocityConvertion); angularMomentum = getAngularMomentumSeperate(1, getCenterOfMassSeperate(1), getLinearVelocitySeperate(1)); fprintf(RunStatsFile, "\nThe angular momentum = (%e, %e, %e)", angularMomentum.x*AngularMomentumConvertion, angularMomentum.y*AngularMomentumConvertion, angularMomentum.z*AngularMomentumConvertion); mag = vectorMagnitude(angularMomentum); fprintf(RunStatsFile, "\nThe magitude of the angular momentum = %e Kilograms*kilometers*kilometers/second\n", mag*AngularMomentumConvertion); fprintf(RunStatsFile, "\n\n***** Stats for Body2 *****\n"); centerOfMass = getCenterOfMassSeperate(2); radiusOfBody = 0.0; massOfBody = 0.0; for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassFe; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfBody) radiusOfBody = d; massOfBody += MassSi; } fprintf(RunStatsFile, "\nMass = %e Kilograms\n", massOfBody*massConvertion); fprintf(RunStatsFile, "\nRadius = %f Kilometers\n", radiusOfBody*lengthConvertion); fprintf(RunStatsFile, "\nThe center of mass = (%f, %f, %f) Kilometers from (0, 0, 0)\n", centerOfMass.x*lengthConvertion, centerOfMass.y*lengthConvertion, centerOfMass.z*lengthConvertion); linearVelocity = getLinearVelocitySeperate(2); fprintf(RunStatsFile, "\nThe average linear velocity = (%f, %f, %f)", linearVelocity.x*velocityConvertion, linearVelocity.y*velocityConvertion, linearVelocity.z*velocityConvertion); mag = vectorMagnitude(linearVelocity); fprintf(RunStatsFile, "\nThe magitude of the avergae linear velocity = %f Kilometers/second\n", mag*velocityConvertion); angularMomentum = getAngularMomentumSeperate(2, getCenterOfMassSeperate(2), getLinearVelocitySeperate(2)); fprintf(RunStatsFile, "\nThe angular momentum = (%e, %e, %e)", angularMomentum.x*AngularMomentumConvertion, angularMomentum.y*AngularMomentumConvertion, angularMomentum.z*AngularMomentumConvertion); mag = vectorMagnitude(angularMomentum); fprintf(RunStatsFile, "\nThe magitude of the angular momentum = %e Kilograms*kilometers*kilometers/second\n", mag*AngularMomentumConvertion); } void recordStartPosVelOfCreatedBodiesSeperate() { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Pos1"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Vel"); fwrite(Pos, sizeof(float4), N, StartPosAndVelFile); fwrite(Vel, sizeof(float4), N, StartPosAndVelFile); } int findEarthAndMoon() { int groupId[N], used[N]; float mag, dx, dy, dz; float touch = Diameter*1.5; int groupNumber, numberOfGroups; int k; for(int i = 0; i < N; i++) { groupId[i] = -1; used[i] = 0; } groupNumber = 0; for(int i = 0; i < N; i++) { if(groupId[i] == -1) { groupId[i] = groupNumber; //find all from this group k = i; while(k < N) { if(groupId[k] == groupNumber && used[k] == 0) { for(int j = i; j < N; j++) { dx = Pos[k].x - Pos[j].x; dy = Pos[k].y - Pos[j].y; dz = Pos[k].z - Pos[j].z; mag = sqrt(dx*dx + dy*dy + dz*dz); if(mag < touch) { groupId[j] = groupNumber; } } used[k] = 1; k = i; } else k++; } } groupNumber++; } numberOfGroups = groupNumber; if(numberOfGroups == 1) { printf("\n No Moon found\n"); } int count; int *groupSize = (int *)malloc(numberOfGroups*sizeof(int)); for(int i = 0; i < numberOfGroups; i++) { count = 0; for(int j = 0; j < N; j++) { if(i == groupId[j]) count++; } groupSize[i] = count; } int earthGroupId = -1; NumberOfEarthElements = 0; for(int i = 0; i < numberOfGroups; i++) { if(groupSize[i] > NumberOfEarthElements) { NumberOfEarthElements = groupSize[i]; earthGroupId = i; } } int moonGroupId = -1; NumberOfMoonElements = 0; for(int i = 0; i < numberOfGroups; i++) { if(groupSize[i] > NumberOfMoonElements && i != earthGroupId) { NumberOfMoonElements = groupSize[i]; moonGroupId = i; } } free(groupSize); EarthIndex = (int *)malloc(NumberOfEarthElements*sizeof(int)); MoonIndex = (int *)malloc(NumberOfMoonElements*sizeof(int)); int earthCount = 0; int moonCount = 0; for(int j = 0; j < N; j++) { if(groupId[j] == earthGroupId) { EarthIndex[earthCount] = j; earthCount++; } else if(groupId[j] == moonGroupId) { MoonIndex[moonCount] = j; moonCount++; } } return(1); } float getMassCollision(int scope) { float mass = 0.0; if(scope == 0) // entire system { for(int i = 0; i < N; i++) { if(i < NFe) mass += MassFe; else mass += MassSi; } } else if(scope == 1) // earth-moon syatem { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) mass += MassFe; else mass += MassSi; } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) mass += MassFe; else mass += MassSi; } } else if(scope == 2) // earth { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) mass += MassFe; else mass += MassSi; } } else if(scope == 3) // moon { for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) mass += MassFe; else mass += MassSi; } } else { printf("\nTSU Error: In getMassCollision function bodyId invalid\n"); exit(0); } return(mass); } float3 getCenterOfMassCollision(int scope) { float totalMass; float3 centerOfMass; centerOfMass.x = 0.0; centerOfMass.y = 0.0; centerOfMass.z = 0.0; if(scope == 0) // Entire System { for(int i = 0; i < N; i++) { if(i < NFe) { centerOfMass.x += Pos[i].x*MassFe; centerOfMass.y += Pos[i].y*MassFe; centerOfMass.z += Pos[i].z*MassFe; } else { centerOfMass.x += Pos[i].x*MassSi; centerOfMass.y += Pos[i].y*MassSi; centerOfMass.z += Pos[i].z*MassSi; } } totalMass = getMassCollision(0); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else if(scope == 1) // Earth-Moon System { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { centerOfMass.x += Pos[EarthIndex[i]].x*MassFe; centerOfMass.y += Pos[EarthIndex[i]].y*MassFe; centerOfMass.z += Pos[EarthIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[EarthIndex[i]].x*MassSi; centerOfMass.y += Pos[EarthIndex[i]].y*MassSi; centerOfMass.z += Pos[EarthIndex[i]].z*MassSi; } } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { centerOfMass.x += Pos[MoonIndex[i]].x*MassFe; centerOfMass.y += Pos[MoonIndex[i]].y*MassFe; centerOfMass.z += Pos[MoonIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[MoonIndex[i]].x*MassSi; centerOfMass.y += Pos[MoonIndex[i]].y*MassSi; centerOfMass.z += Pos[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(1); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else if(scope == 2) // Earth { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { centerOfMass.x += Pos[EarthIndex[i]].x*MassFe; centerOfMass.y += Pos[EarthIndex[i]].y*MassFe; centerOfMass.z += Pos[EarthIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[EarthIndex[i]].x*MassSi; centerOfMass.y += Pos[EarthIndex[i]].y*MassSi; centerOfMass.z += Pos[EarthIndex[i]].z*MassSi; } } totalMass = getMassCollision(2); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else if(scope == 3) // Moon { for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { centerOfMass.x += Pos[MoonIndex[i]].x*MassFe; centerOfMass.y += Pos[MoonIndex[i]].y*MassFe; centerOfMass.z += Pos[MoonIndex[i]].z*MassFe; } else { centerOfMass.x += Pos[MoonIndex[i]].x*MassSi; centerOfMass.y += Pos[MoonIndex[i]].y*MassSi; centerOfMass.z += Pos[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(3); centerOfMass.x /= totalMass; centerOfMass.y /= totalMass; centerOfMass.z /= totalMass; } else { printf("\nTSU Error: In getCenterOfMassCollision function scope invalid\n"); exit(0); } return(centerOfMass); } float3 getLinearVelocityCollision(int scope) { float totalMass; float3 linearVelocity; linearVelocity.x = 0.0; linearVelocity.y = 0.0; linearVelocity.z = 0.0; if(scope == 0) // entire system { for(int i = 0; i < N; i++) { if(i < NFe) { linearVelocity.x += Vel[i].x*MassFe; linearVelocity.y += Vel[i].y*MassFe; linearVelocity.z += Vel[i].z*MassFe; } else { linearVelocity.x += Vel[i].x*MassSi; linearVelocity.y += Vel[i].y*MassSi; linearVelocity.z += Vel[i].z*MassSi; } } totalMass = getMassCollision(0); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else if(scope == 1) // earth-moon system { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { linearVelocity.x += Vel[EarthIndex[i]].x*MassFe; linearVelocity.y += Vel[EarthIndex[i]].y*MassFe; linearVelocity.z += Vel[EarthIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[EarthIndex[i]].x*MassSi; linearVelocity.y += Vel[EarthIndex[i]].y*MassSi; linearVelocity.z += Vel[EarthIndex[i]].z*MassSi; } } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { linearVelocity.x += Vel[MoonIndex[i]].x*MassFe; linearVelocity.y += Vel[MoonIndex[i]].y*MassFe; linearVelocity.z += Vel[MoonIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[MoonIndex[i]].x*MassSi; linearVelocity.y += Vel[MoonIndex[i]].y*MassSi; linearVelocity.z += Vel[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(1); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else if(scope == 2) //earth { for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe) { linearVelocity.x += Vel[EarthIndex[i]].x*MassFe; linearVelocity.y += Vel[EarthIndex[i]].y*MassFe; linearVelocity.z += Vel[EarthIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[EarthIndex[i]].x*MassSi; linearVelocity.y += Vel[EarthIndex[i]].y*MassSi; linearVelocity.z += Vel[EarthIndex[i]].z*MassSi; } } totalMass = getMassCollision(2); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else if(scope == 3) //moon { for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe) { linearVelocity.x += Vel[MoonIndex[i]].x*MassFe; linearVelocity.y += Vel[MoonIndex[i]].y*MassFe; linearVelocity.z += Vel[MoonIndex[i]].z*MassFe; } else { linearVelocity.x += Vel[MoonIndex[i]].x*MassSi; linearVelocity.y += Vel[MoonIndex[i]].y*MassSi; linearVelocity.z += Vel[MoonIndex[i]].z*MassSi; } } totalMass = getMassCollision(3); linearVelocity.x /= totalMass; linearVelocity.y /= totalMass; linearVelocity.z /= totalMass; } else { printf("\nTSU Error: in getlinearVelocityEarthMoonSystem function scope invalid\n"); exit(0); } return(linearVelocity); } float3 getAngularMomentumCollision(int scope) { float3 centerOfMass, linearVelocity, angularMomentum; float3 r; float3 v; angularMomentum.x = 0.0; angularMomentum.y = 0.0; angularMomentum.z = 0.0; if(scope == 0) //Entire system { centerOfMass = getCenterOfMassCollision(0); linearVelocity = getLinearVelocityCollision(0); for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMass.x; r.y = Pos[i].y - centerOfMass.y; r.z = Pos[i].z - centerOfMass.z; v.x = Vel[i].x - linearVelocity.x; v.y = Vel[i].y - linearVelocity.y; v.z = Vel[i].z - linearVelocity.z; if(i < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else if(scope == 1) //Earth-Moon system { centerOfMass = getCenterOfMassCollision(1); linearVelocity = getLinearVelocityCollision(1); for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMass.x; r.y = Pos[EarthIndex[i]].y - centerOfMass.y; r.z = Pos[EarthIndex[i]].z - centerOfMass.z; v.x = Vel[EarthIndex[i]].x - linearVelocity.x; v.y = Vel[EarthIndex[i]].y - linearVelocity.y; v.z = Vel[EarthIndex[i]].z - linearVelocity.z; if(EarthIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } for(int i = 0; i < NumberOfMoonElements; i++) { r.x = Pos[MoonIndex[i]].x - centerOfMass.x; r.y = Pos[MoonIndex[i]].y - centerOfMass.y; r.z = Pos[MoonIndex[i]].z - centerOfMass.z; v.x = Vel[MoonIndex[i]].x - linearVelocity.x; v.y = Vel[MoonIndex[i]].y - linearVelocity.y; v.z = Vel[MoonIndex[i]].z - linearVelocity.z; if(MoonIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else if(scope == 2) //Earth { centerOfMass = getCenterOfMassCollision(2); linearVelocity = getLinearVelocityCollision(2); for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMass.x; r.y = Pos[EarthIndex[i]].y - centerOfMass.y; r.z = Pos[EarthIndex[i]].z - centerOfMass.z; v.x = Vel[EarthIndex[i]].x - linearVelocity.x; v.y = Vel[EarthIndex[i]].y - linearVelocity.y; v.z = Vel[EarthIndex[i]].z - linearVelocity.z; if(EarthIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else if(scope == 3) //Moon { centerOfMass = getCenterOfMassCollision(3); linearVelocity = getLinearVelocityCollision(3); for(int i = 0; i < NumberOfMoonElements; i++) { r.x = Pos[MoonIndex[i]].x - centerOfMass.x; r.y = Pos[MoonIndex[i]].y - centerOfMass.y; r.z = Pos[MoonIndex[i]].z - centerOfMass.z; v.x = Vel[MoonIndex[i]].x - linearVelocity.x; v.y = Vel[MoonIndex[i]].y - linearVelocity.y; v.z = Vel[MoonIndex[i]].z - linearVelocity.z; if(MoonIndex[i] < NFe) { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentum.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentum.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentum.z += (r.x*v.y - r.y*v.x)*MassSi; } } } else { printf("\nTSU Error: in getAngularMomentumCollision function scope invalid\n"); exit(0); } return(angularMomentum); } void printContinueStatsToScreen(double time) { double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; //double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; float3 r, v; double d, mass, mag, size, angle, x, y, z; float massEarth; float3 centerOfMassEarth; float3 linearVelocityEarth; float3 centerOfMassEarthMoonMaterial; float3 averageVelocityEarthMoonMaterial; int earthMaterialFeCountBody1 = 0; int earthMaterialFeCountBody2 = 0; int earthMaterialSiCountBody1 = 0; int earthMaterialSiCountBody2 = 0; float earthMaterialMass = 0.0; int moonMaterialFeCountBody1 = 0; int moonMaterialFeCountBody2 = 0; int moonMaterialSiCountBody1 = 0; int moonMaterialSiCountBody2 = 0; float moonMaterialMass = 0.0; int escapeMaterialFeCountBody1 = 0; int escapeMaterialFeCountBody2 = 0; int escapeMaterialSiCountBody1 = 0; int escapeMaterialSiCountBody2 = 0; float escapeMaterialMass = 0.0; int unusedMaterialFeCountBody1 = 0; int unusedMaterialFeCountBody2 = 0; int unusedMaterialSiCountBody1 = 0; int unusedMaterialSiCountBody2 = 0; float unusedMaterialMass = 0.0; float3 angularMomentumHolder; float3 angularMomentumEarthMoonMaterial; float3 angularMomentumEarthMaterial; float3 angularMomentumMoonMaterial; //Finding radius of what the current Earth is findEarthAndMoon(); centerOfMassEarth = getCenterOfMassCollision(2); massEarth = getMassCollision(2); float radiusOfEarth = 0.0; for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMassEarth.x; r.y = Pos[EarthIndex[i]].y - centerOfMassEarth.y; r.z = Pos[EarthIndex[i]].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfEarth) radiusOfEarth = d; } // Finding Roche limit and setting sphere to create Earth and sphere to create the Moon float densityEarth = massEarth/((Pi*4.0/3.0)*radiusOfEarth*radiusOfEarth*radiusOfEarth); float densitySi = MassSi/((Pi*4.0/3.0)*(Diameter/2.0)*(Diameter/2.0)*(Diameter/2.0)); float rocheLimit = 2.44*radiusOfEarth*pow((densityEarth/densitySi),1.0/3.0); float radiusEarthMaterial = rocheLimit; float radiusMoonMaterial = NUMBEROFEARTHRADIFORMOONMATERIAL*radiusOfEarth; // Finding mass of Earth material, Moon Material // Finding the center of mass and average velocity of the material we estimating will make the Earth-Moon system // Finding Moon mix and Earth mix earthMaterialMass = 0.0; moonMaterialMass = 0.0; centerOfMassEarthMoonMaterial.x = 0.0; centerOfMassEarthMoonMaterial.y = 0.0; centerOfMassEarthMoonMaterial.z = 0.0; averageVelocityEarthMoonMaterial.x = 0.0; averageVelocityEarthMoonMaterial.y = 0.0; averageVelocityEarthMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusEarthMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; earthMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) earthMaterialFeCountBody1++; else if(i < NFe1 + NFe2) earthMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) earthMaterialSiCountBody1++; else earthMaterialSiCountBody2++; } else if(d < radiusMoonMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; moonMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) moonMaterialFeCountBody1++; else if(i < NFe1 + NFe2) moonMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) moonMaterialSiCountBody1++; else moonMaterialSiCountBody2++; } } centerOfMassEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); // Getting a rough estimate of how much of the extra material has escape velocity from what we // considering will make the Earth-Moon system float velocity; float escapeVelocity; escapeMaterialMass = 0.0; unusedMaterialMass = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(radiusMoonMaterial <= d) { r.x = Pos[i].x - centerOfMassEarthMoonMaterial.x; r.y = Pos[i].y - centerOfMassEarthMoonMaterial.y; r.z = Pos[i].z - centerOfMassEarthMoonMaterial.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); v.x = Vel[i].x - averageVelocityEarthMoonMaterial.x; v.y = Vel[i].y - averageVelocityEarthMoonMaterial.y; v.z = Vel[i].z - averageVelocityEarthMoonMaterial.z; velocity = sqrt(v.x*v.x + v.y*v.y + v.z*v.z); escapeVelocity = sqrt(2.0*Gravity*(earthMaterialMass + moonMaterialMass)/d); if(velocity >= escapeVelocity) { if(i < NFe) mass = MassFe; else mass = MassSi; escapeMaterialMass += mass; if(i < NFe1) escapeMaterialFeCountBody1++; else if(i < NFe1 + NFe2) escapeMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) escapeMaterialSiCountBody1++; else escapeMaterialSiCountBody2++; } else { if(i < NFe) mass = MassFe; else mass = MassSi; unusedMaterialMass += mass; if(i < NFe1) unusedMaterialFeCountBody1++; else if(i < NFe1 + NFe2) unusedMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) unusedMaterialSiCountBody1++; else unusedMaterialSiCountBody2++; } } } // Finding the angular momentum of the Earth-Moon material // Finding the angular momentum of the Earth material // Finding the angular momentum of the Moon material linearVelocityEarth = getLinearVelocityCollision(2); angularMomentumEarthMoonMaterial.x = 0.0; angularMomentumEarthMoonMaterial.y = 0.0; angularMomentumEarthMoonMaterial.z = 0.0; angularMomentumEarthMaterial.x = 0.0; angularMomentumEarthMaterial.y = 0.0; angularMomentumEarthMaterial.z = 0.0; angularMomentumMoonMaterial.x = 0.0; angularMomentumMoonMaterial.y = 0.0; angularMomentumMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusMoonMaterial) { v.x = Vel[i].x - linearVelocityEarth.x; v.y = Vel[i].y - linearVelocityEarth.y; v.z = Vel[i].z - linearVelocityEarth.z; if(i < NFe) { angularMomentumHolder.x = (r.y*v.z - r.z*v.y)*MassFe; angularMomentumHolder.y = -(r.x*v.z - r.z*v.x)*MassFe; angularMomentumHolder.z = (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentumHolder.x = (r.y*v.z - r.z*v.y)*MassSi; angularMomentumHolder.y = -(r.x*v.z - r.z*v.x)*MassSi; angularMomentumHolder.z = (r.x*v.y - r.y*v.x)*MassSi; } angularMomentumEarthMoonMaterial.x += angularMomentumHolder.x; angularMomentumEarthMoonMaterial.y += angularMomentumHolder.y; angularMomentumEarthMoonMaterial.z += angularMomentumHolder.z; if(d < radiusEarthMaterial) { angularMomentumEarthMaterial.x += angularMomentumHolder.x; angularMomentumEarthMaterial.y += angularMomentumHolder.y; angularMomentumEarthMaterial.z += angularMomentumHolder.z; } else { angularMomentumMoonMaterial.x += angularMomentumHolder.x; angularMomentumMoonMaterial.y += angularMomentumHolder.y; angularMomentumMoonMaterial.z += angularMomentumHolder.z; } } } printf("\n\n\n*************************************************************************\n"); printf("\nThe following are the three stats to feed to the search program\n"); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); printf("\nAngular momentum of the Earth-Moon system = %e", mag); printf("\nRatio Earth mass to Moon mass = %f", earthMaterialMass/moonMaterialMass); printf("\nMoon compotition ratio = %f", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); printf("\n\n\n*************************************************************************\n"); printf("\nThe following are all the continuation stats of the run when time = %f hours\n", time*timeConverter/3600.0); printf("\nDistance is measured in Kilometers"); printf("\nMass is measured in Kilograms"); printf("\nTime is measured in seconds"); printf("\nVelocity is measured in Kilometers/second"); printf("\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); printf("\nThe radius of Earth = %f", radiusOfEarth*lengthConverter); printf("\nRoche limit = %f", rocheLimit*lengthConverter); printf("\nRoche limit/radius of Earth = %f \n", rocheLimit/radiusOfEarth); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; printf("\nAngular momentum of the Earth-Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Earth-Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Earth-Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarthMaterial.x*momentumConverter; y = angularMomentumEarthMaterial.y*momentumConverter; z = angularMomentumEarthMaterial.z*momentumConverter; printf("\nAngular momentum of the Earth material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Earth material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Earth's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoonMaterial.x*momentumConverter; y = angularMomentumMoonMaterial.y*momentumConverter; z = angularMomentumMoonMaterial.z*momentumConverter; printf("\nAngular momentum of the Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); printf("\nThe mass of Earth material = %e", earthMaterialMass*massConverter); printf("\nThe Earth material count Fe body 1 = %d", earthMaterialFeCountBody1); printf("\nThe Earth material count Fe body 2 = %d", earthMaterialFeCountBody2); printf("\nThe Earth material count Si body 1 = %d", earthMaterialSiCountBody1); printf("\nThe Earth material count Si body 2 = %d", earthMaterialSiCountBody2); printf("\nThe Earth material Body1/Body2 ratio = %f\n", (float)(earthMaterialFeCountBody1 + earthMaterialSiCountBody1)/(float)(earthMaterialFeCountBody2 + earthMaterialSiCountBody2)); printf("\nThe mass of Moon material = %e", moonMaterialMass*massConverter); printf("\nThe Moon material count Fe body 1 = %d", moonMaterialFeCountBody1); printf("\nThe Moon material count Fe body 2 = %d", moonMaterialFeCountBody2); printf("\nThe Moon material count Si body 1 = %d", moonMaterialSiCountBody1); printf("\nThe Moon material count Si body 2 = %d", moonMaterialSiCountBody2); printf("\nThe Moon material Body1/Body2 ratio = %f\n", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); printf("\nThe mass of escape material = %e", escapeMaterialMass*massConverter); printf("\nThe escape material count Fe body 1 = %d", escapeMaterialFeCountBody1); printf("\nThe escape material count Fe body 2 = %d", escapeMaterialFeCountBody2); printf("\nThe escape material count Si body 1 = %d", escapeMaterialSiCountBody1); printf("\nThe escape material count Si body 2 = %d", escapeMaterialSiCountBody2); printf("\nThe escape material Body1/Body2 ratio = %f\n", (float)(escapeMaterialFeCountBody1 + escapeMaterialSiCountBody1)/(float)(escapeMaterialFeCountBody2 + escapeMaterialSiCountBody2)); printf("\nThe mass of unused material = %e", unusedMaterialMass*massConverter); printf("\nThe unused material count Fe body 1 = %d", unusedMaterialFeCountBody1); printf("\nThe unused material count Fe body 2 = %d", unusedMaterialFeCountBody2); printf("\nThe unused material count Si body 1 = %d", unusedMaterialSiCountBody1); printf("\nThe unused material count Si body 2 = %d", unusedMaterialSiCountBody2); printf("\nThe unused material Body1/Body2 ratio = %f\n", (float)(unusedMaterialFeCountBody1 + unusedMaterialSiCountBody1)/(float)(unusedMaterialFeCountBody2 + unusedMaterialSiCountBody2)); printf("\n*************************************************************************\n\n\n"); } void printContinueStatsToFile(double time) { double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; //double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; float3 r, v; double d, mass, mag, size, angle, x, y, z; float massEarth; float3 centerOfMassEarth; float3 linearVelocityEarth; float3 centerOfMassEarthMoonMaterial; float3 averageVelocityEarthMoonMaterial; int earthMaterialFeCountBody1 = 0; int earthMaterialFeCountBody2 = 0; int earthMaterialSiCountBody1 = 0; int earthMaterialSiCountBody2 = 0; float earthMaterialMass = 0.0; int moonMaterialFeCountBody1 = 0; int moonMaterialFeCountBody2 = 0; int moonMaterialSiCountBody1 = 0; int moonMaterialSiCountBody2 = 0; float moonMaterialMass = 0.0; int escapeMaterialFeCountBody1 = 0; int escapeMaterialFeCountBody2 = 0; int escapeMaterialSiCountBody1 = 0; int escapeMaterialSiCountBody2 = 0; float escapeMaterialMass = 0.0; int unusedMaterialFeCountBody1 = 0; int unusedMaterialFeCountBody2 = 0; int unusedMaterialSiCountBody1 = 0; int unusedMaterialSiCountBody2 = 0; float unusedMaterialMass = 0.0; float3 angularMomentumEarthMoonMaterial; float3 angularMomentumEarthMaterial; float3 angularMomentumMoonMaterial; //Finding radius of what the current Earth is findEarthAndMoon(); centerOfMassEarth = getCenterOfMassCollision(2); massEarth = getMassCollision(2); float radiusOfEarth = 0.0; for(int i = 0; i < NumberOfEarthElements; i++) { r.x = Pos[EarthIndex[i]].x - centerOfMassEarth.x; r.y = Pos[EarthIndex[i]].y - centerOfMassEarth.y; r.z = Pos[EarthIndex[i]].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d > radiusOfEarth) radiusOfEarth = d; } // Finding Roche limit and setting sphere to create Earth and sphere to create the Moon float densityEarth = massEarth/((Pi*4.0/3.0)*radiusOfEarth*radiusOfEarth*radiusOfEarth); float densitySi = MassSi/((Pi*4.0/3.0)*(Diameter/2.0)*(Diameter/2.0)*(Diameter/2.0)); float rocheLimit = 2.44*radiusOfEarth*pow((densityEarth/densitySi),1.0/3.0); float radiusEarthMaterial = rocheLimit; float radiusMoonMaterial = NUMBEROFEARTHRADIFORMOONMATERIAL*radiusOfEarth; // Finding mass of Earth material, Moon Material // Finding the center of mass and average velocity of the material we estimating will make the Earth-Moon system // Finding Moon mix and Earth mix earthMaterialMass = 0.0; moonMaterialMass = 0.0; centerOfMassEarthMoonMaterial.x = 0.0; centerOfMassEarthMoonMaterial.y = 0.0; centerOfMassEarthMoonMaterial.z = 0.0; averageVelocityEarthMoonMaterial.x = 0.0; averageVelocityEarthMoonMaterial.y = 0.0; averageVelocityEarthMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusEarthMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; earthMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) earthMaterialFeCountBody1++; else if(i < NFe1 + NFe2) earthMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) earthMaterialSiCountBody1++; else earthMaterialSiCountBody2++; } else if(d < radiusMoonMaterial) { if(i < NFe) mass = MassFe; else mass = MassSi; moonMaterialMass += mass; centerOfMassEarthMoonMaterial.x += mass*Pos->x; centerOfMassEarthMoonMaterial.y += mass*Pos->y; centerOfMassEarthMoonMaterial.z += mass*Pos->z; averageVelocityEarthMoonMaterial.x += mass*Vel->x; averageVelocityEarthMoonMaterial.y += mass*Vel->y; averageVelocityEarthMoonMaterial.z += mass*Vel->z; if(i < NFe1) moonMaterialFeCountBody1++; else if(i < NFe1 + NFe2) moonMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) moonMaterialSiCountBody1++; else moonMaterialSiCountBody2++; } } centerOfMassEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); centerOfMassEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.x /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.y /= (earthMaterialMass + moonMaterialMass); averageVelocityEarthMoonMaterial.z /= (earthMaterialMass + moonMaterialMass); // Getting a rough estimate of how much of the extra material has escape velocity from what we // considering will make the Earth-Moon system float velocity; float escapeVelocity; escapeMaterialMass = 0.0; unusedMaterialMass = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(radiusMoonMaterial <= d) { r.x = Pos[i].x - centerOfMassEarthMoonMaterial.x; r.y = Pos[i].y - centerOfMassEarthMoonMaterial.y; r.z = Pos[i].z - centerOfMassEarthMoonMaterial.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); v.x = Vel[i].x - averageVelocityEarthMoonMaterial.x; v.y = Vel[i].y - averageVelocityEarthMoonMaterial.y; v.z = Vel[i].z - averageVelocityEarthMoonMaterial.z; velocity = sqrt(v.x*v.x + v.y*v.y + v.z*v.z); escapeVelocity = sqrt(2.0*Gravity*(earthMaterialMass + moonMaterialMass)/d); if(velocity >= escapeVelocity) { if(i < NFe) mass = MassFe; else mass = MassSi; escapeMaterialMass += mass; if(i < NFe1) escapeMaterialFeCountBody1++; else if(i < NFe1 + NFe2) escapeMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) escapeMaterialSiCountBody1++; else escapeMaterialSiCountBody2++; } else { if(i < NFe) mass = MassFe; else mass = MassSi; unusedMaterialMass += mass; if(i < NFe1) unusedMaterialFeCountBody1++; else if(i < NFe1 + NFe2) unusedMaterialFeCountBody2++; else if(i < NFe1 + NFe2 + NSi1) unusedMaterialSiCountBody1++; else unusedMaterialSiCountBody2++; } } } // Finding the angular momentum of the Earth-Moon material // Finding the angular momentum of the Earth material // Finding the angular momentum of the Moon material linearVelocityEarth = getLinearVelocityCollision(2); angularMomentumEarthMoonMaterial.x = 0.0; angularMomentumEarthMoonMaterial.y = 0.0; angularMomentumEarthMoonMaterial.z = 0.0; angularMomentumEarthMaterial.x = 0.0; angularMomentumEarthMaterial.y = 0.0; angularMomentumEarthMaterial.z = 0.0; angularMomentumMoonMaterial.x = 0.0; angularMomentumMoonMaterial.y = 0.0; angularMomentumMoonMaterial.z = 0.0; for(int i = 0; i < N; i++) { r.x = Pos[i].x - centerOfMassEarth.x; r.y = Pos[i].y - centerOfMassEarth.y; r.z = Pos[i].z - centerOfMassEarth.z; d = sqrt(r.x*r.x + r.y*r.y + r.z*r.z); if(d < radiusMoonMaterial) { v.x = Vel[i].x - linearVelocityEarth.x; v.y = Vel[i].y - linearVelocityEarth.y; v.z = Vel[i].z - linearVelocityEarth.z; if(i < NFe) { angularMomentumEarthMoonMaterial.x += (r.y*v.z - r.z*v.y)*MassFe; angularMomentumEarthMoonMaterial.y += -(r.x*v.z - r.z*v.x)*MassFe; angularMomentumEarthMoonMaterial.z += (r.x*v.y - r.y*v.x)*MassFe; } else { angularMomentumEarthMoonMaterial.x += (r.y*v.z - r.z*v.y)*MassSi; angularMomentumEarthMoonMaterial.y += -(r.x*v.z - r.z*v.x)*MassSi; angularMomentumEarthMoonMaterial.z += (r.x*v.y - r.y*v.x)*MassSi; } if(radiusEarthMaterial < d) { angularMomentumEarthMaterial.x += angularMomentumEarthMoonMaterial.x; angularMomentumEarthMaterial.y += angularMomentumEarthMoonMaterial.y; angularMomentumEarthMaterial.z += angularMomentumEarthMoonMaterial.z; } else { angularMomentumMoonMaterial.x += angularMomentumEarthMoonMaterial.x; angularMomentumMoonMaterial.y += angularMomentumEarthMoonMaterial.y; angularMomentumMoonMaterial.z += angularMomentumEarthMoonMaterial.z; } } } fprintf(ContinueRunStatsFile, "\n\n\n*************************************************************************\n"); fprintf(ContinueRunStatsFile, "\nThe following are the three stats to feed to the search program\n"); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nAngular momentum of the Earth-Moon system = %e", mag); fprintf(ContinueRunStatsFile, "\nRatio Earth mass to Moon mass = %f", earthMaterialMass/moonMaterialMass); fprintf(ContinueRunStatsFile, "\nMoon compotition ratio = %f", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\n\n\n*************************************************************************\n"); fprintf(ContinueRunStatsFile, "\nThe following are all the continuation stats of the run when time = %f hours\n", time*timeConverter/3600.0); fprintf(ContinueRunStatsFile, "\nDistance is measured in Kilometers"); fprintf(ContinueRunStatsFile, "\nMass is measured in Kilograms"); fprintf(ContinueRunStatsFile, "\nTime is measured in seconds"); fprintf(ContinueRunStatsFile, "\nVelocity is measured in Kilometers/second"); fprintf(ContinueRunStatsFile, "\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); fprintf(ContinueRunStatsFile, "\nThe radius of Earth = %f", radiusOfEarth*lengthConverter); fprintf(ContinueRunStatsFile, "\nRoche limit = %f", rocheLimit*lengthConverter); fprintf(ContinueRunStatsFile, "\nRoche limit/radius of Earth = %f \n", rocheLimit/radiusOfEarth); x = angularMomentumEarthMoonMaterial.x*momentumConverter; y = angularMomentumEarthMoonMaterial.y*momentumConverter; z = angularMomentumEarthMoonMaterial.z*momentumConverter; fprintf(ContinueRunStatsFile, "\nAngular momentum of the Earth-Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nMagnitude of the angular momentum of the Earth-Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(ContinueRunStatsFile, "\nAngle off ecliptic plane of the Earth-Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarthMaterial.x*momentumConverter; y = angularMomentumEarthMaterial.y*momentumConverter; z = angularMomentumEarthMaterial.z*momentumConverter; fprintf(ContinueRunStatsFile, "\nAngular momentum of the Earth material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nMagnitude of the angular momentum of the Earth material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(ContinueRunStatsFile, "\nAngle off ecliptic plane of the Earth's material rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoonMaterial.x*momentumConverter; y = angularMomentumMoonMaterial.y*momentumConverter; z = angularMomentumMoonMaterial.z*momentumConverter; fprintf(ContinueRunStatsFile, "\nAngular momentum of the Moon material = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(ContinueRunStatsFile, "\nMagnitude of the angular momentum of the Moon material = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(ContinueRunStatsFile, "\nAngle off ecliptic plane of the Moon's material rotation = %f\n", 90.0 - angle*180.0/Pi); fprintf(ContinueRunStatsFile, "\nThe mass of Earth material = %e", earthMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe Earth material count Fe body 1 = %d", earthMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe Earth material count Fe body 2 = %d", earthMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe Earth material count Si body 1 = %d", earthMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe Earth material count Si body 2 = %d", earthMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe Earth material Body1/Body2 ratio = %f\n", (float)(earthMaterialFeCountBody1 + earthMaterialSiCountBody1)/(float)(earthMaterialFeCountBody2 + earthMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\nThe mass of Moon material = %e", moonMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe Moon material count Fe body 1 = %d", moonMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe Moon material count Fe body 2 = %d", moonMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe Moon material count Si body 1 = %d", moonMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe Moon material count Si body 2 = %d", moonMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe Moon material Body1/Body2 ratio = %f\n", (float)(moonMaterialFeCountBody1 + moonMaterialSiCountBody1)/(float)(moonMaterialFeCountBody2 + moonMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\nThe mass of escape material = %e", escapeMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe escape material count Fe body 1 = %d", escapeMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe escape material count Fe body 2 = %d", escapeMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe escape material count Si body 1 = %d", escapeMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe escape material count Si body 2 = %d", escapeMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe escape material Body1/Body2 ratio = %f\n", (float)(escapeMaterialFeCountBody1 + escapeMaterialSiCountBody1)/(float)(escapeMaterialFeCountBody2 + escapeMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\nThe mass of unused material = %e", unusedMaterialMass*massConverter); fprintf(ContinueRunStatsFile, "\nThe unused material count Fe body 1 = %d", unusedMaterialFeCountBody1); fprintf(ContinueRunStatsFile, "\nThe unused material count Fe body 2 = %d", unusedMaterialFeCountBody2); fprintf(ContinueRunStatsFile, "\nThe unused material count Si body 1 = %d", unusedMaterialSiCountBody1); fprintf(ContinueRunStatsFile, "\nThe unused material count Si body 2 = %d", unusedMaterialSiCountBody2); fprintf(ContinueRunStatsFile, "\nThe unused material Body1/Body2 ratio = %f\n", (float)(unusedMaterialFeCountBody1 + unusedMaterialSiCountBody1)/(float)(unusedMaterialFeCountBody2 + unusedMaterialSiCountBody2)); fprintf(ContinueRunStatsFile, "\n*************************************************************************\n\n\n"); } void printCollisionStatsToScreen(double time) { double mag, size, angle, x, y, z; double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; findEarthAndMoon(); int earthFeCountBody1 = 0; int earthFeCountBody2 = 0; int earthSiCountBody1 = 0; int earthSiCountBody2 = 0; int moonFeCountBody1 = 0; int moonFeCountBody2 = 0; int moonSiCountBody1 = 0; int moonSiCountBody2 = 0; float massUniversalSystem = getMassCollision(0); float massEarthMoonSystem = getMassCollision(1); float massEarth = getMassCollision(2); float massMoon = getMassCollision(3); float3 centerOfMassUniversalSystem = getCenterOfMassCollision(0); float3 centerOfMassEarthMoonSystem = getCenterOfMassCollision(1); float3 centerOfMassEarth = getCenterOfMassCollision(2); float3 centerOfMassMoon = getCenterOfMassCollision(3); float3 linearVelocityUniversalSystem = getLinearVelocityCollision(0); float3 linearVelocityEarthMoonSystem = getLinearVelocityCollision(1); float3 linearVelocityEarth = getLinearVelocityCollision(2); float3 linearVelocityMoon = getLinearVelocityCollision(3); float3 angularMomentumUniversalSystem = getAngularMomentumCollision(0); float3 angularMomentumEarthMoonSystem = getAngularMomentumCollision(1); float3 angularMomentumEarth = getAngularMomentumCollision(2); float3 angularMomentumMoon = getAngularMomentumCollision(3); for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe1) earthFeCountBody1++; else if(EarthIndex[i] < NFe1 + NFe2) earthFeCountBody2++; else if(EarthIndex[i] < NFe1 + NFe2 + NSi1) earthSiCountBody1++; else earthSiCountBody2++; } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe1) moonFeCountBody1++; else if(MoonIndex[i] < NFe1 + NFe2) moonFeCountBody2++; else if(MoonIndex[i] < NFe1 + NFe2 + NSi1) moonSiCountBody1++; else moonSiCountBody2++; } printf("\n\n\n*************************************************************************\n\n\n"); printf("\nThe following are the stats of the run when time = %f hours\n", time*timeConverter/3600.0); printf("\nDistance is measured in Kilometers"); printf("\nMass is measured in Kilograms"); printf("\nTime is measured in seconds"); printf("\nVelocity is measured in Kilometers/second"); printf("\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); printf("\nThe mass of Earth = %e", massEarth*massConverter); printf("\nThe mass of Moon = %e", massMoon*massConverter); if(massMoon != 0.0) printf("\nThe mass ratio Earth/Moon = %f\n", massEarth/massMoon); printf("\nMoon iron from body 1 = %d", moonFeCountBody1); printf("\nMoon silicate from body 1 = %d", moonSiCountBody1); printf("\nMoon iron from body 2 = %d", moonFeCountBody2); printf("\nMoon silicate from body 2 = %d", moonSiCountBody2); if((moonFeCountBody2 + moonSiCountBody2) == 0) { printf("\nThe Moon is only composed of elements from body 1\n"); } else if((moonFeCountBody1 + moonSiCountBody1) == 0) { printf("\nThe Moon is only composed of elements from body 2\n"); } else { printf("\nMoon ratio body1/body2 = %f\n", (float)(moonFeCountBody1 + moonSiCountBody1)/(float)(moonFeCountBody2 + moonSiCountBody2)); } printf("\nEarth iron from body 1 = %d", earthFeCountBody1); printf("\nEarth silicate from body 1 = %d", earthSiCountBody1); printf("\nEarth iron from body 2 = %d", earthFeCountBody2); printf("\nEarth silicate from body 2 = %d", earthSiCountBody2); if((earthFeCountBody2 + earthSiCountBody2) == 0) { printf("\nThe Earth is only composed of elements from body 1\n"); } else if((earthFeCountBody1 + earthSiCountBody1) == 0) { printf("\nThe Earth is only composed of elements from body 2\n"); } else { printf("\nEarth ratio body1/body2 = %f\n", (float)(earthFeCountBody1 + earthSiCountBody1)/(float)(earthFeCountBody2 + earthSiCountBody2)); } //It is always assumed that the ecliptic plane is the xz-plane. x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; printf("\nAngular momentum of the Earth Moon system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the system = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the system's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; printf("\nAngular momentum of the Earth = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Earth = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Earth's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoon.x*momentumConverter; y = angularMomentumMoon.y*momentumConverter; z = angularMomentumMoon.z*momentumConverter; printf("\nAngular momentum of the Moon = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the Moon = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); printf("\nAngle off ecliptic plane of the Moon's rotation = %f\n", 90.0 - angle*180.0/Pi); x = centerOfMassEarthMoonSystem.x*lengthConverter; y = centerOfMassEarthMoonSystem.y*lengthConverter; z = centerOfMassEarthMoonSystem.z*lengthConverter; printf("\nCenter of mass of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = centerOfMassEarth.x*lengthConverter; y = centerOfMassEarth.y*lengthConverter; z = centerOfMassEarth.z*lengthConverter; printf("\nCenter of mass of the Earth system = (%f, %f, %f)", x, y, z); x = centerOfMassMoon.x*lengthConverter; y = centerOfMassMoon.y*lengthConverter; z = centerOfMassMoon.z*lengthConverter; printf("\nCenter of mass of the Moon system = (%f, %f, %f)\n", x, y, z); x = linearVelocityEarthMoonSystem.x*velocityConverter; y = linearVelocityEarthMoonSystem.y*velocityConverter; z = linearVelocityEarthMoonSystem.z*velocityConverter; printf("\nLinear Velocity of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = linearVelocityEarth.x*velocityConverter; y = linearVelocityEarth.y*velocityConverter; z = linearVelocityEarth.z*velocityConverter; printf("\nLinear Velocity of the Earth system = (%f, %f, %f)", x, y, z); x = linearVelocityMoon.x*velocityConverter; y = linearVelocityMoon.y*velocityConverter; z = linearVelocityMoon.z*velocityConverter; printf("\nLinear Velocity of the Moon system = (%f, %f, %f)\n", x, y, z); printf("\n*****Stats of the entire system to check the numerical scheme's validity*****\n"); x = centerOfMassUniversalSystem.x*lengthConverter; y = centerOfMassUniversalSystem.y*lengthConverter; z = centerOfMassUniversalSystem.z*lengthConverter; printf("\nCenter of mass of the entire system = (%f, %f, %f)\n", x, y, z); x = linearVelocityUniversalSystem.x*velocityConverter; y = linearVelocityUniversalSystem.y*velocityConverter; z = linearVelocityUniversalSystem.z*velocityConverter; printf("\nLinear velocity of the entire system system = (%f, %f, %f)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the linear velocity of the entire system = %f\n", mag); x = angularMomentumUniversalSystem.x*momentumConverter; y = angularMomentumUniversalSystem.y*momentumConverter; z = angularMomentumUniversalSystem.z*momentumConverter; printf("\nAngular momentum of the entire system system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); printf("\nMagnitude of the angular momentum of the entire system = %e\n", mag); printf("\n*************************************************************************\n"); printf("\n******************* Just the good stuff *********************************\n"); printf("\n percent off correct Earth mass = %f ", 100.0*(massEarth*massConverter/(MassOfEarth))); printf("\n percent off correct Moon mass = %f ", 100.0*(massMoon*massConverter/(MassOfMoon))); printf("\n\n Earth mass percent iron = %f mass percent silicate = %f", float(earthFeCountBody1*MassFe + earthFeCountBody2*MassFe)/massEarth, float(earthSiCountBody1*MassSi + earthSiCountBody2*MassSi)/massEarth); printf("\n Moon mass percent iron = %f mass percent silicate = %f", float(moonFeCountBody1*MassFe + moonFeCountBody2*MassFe)/massMoon, float(moonSiCountBody1*MassSi + moonSiCountBody2*MassSi)/massMoon); if((moonFeCountBody2 + moonSiCountBody2) != 0) { printf("\n\n Moon body1/body2 ratio = %f ", float(moonFeCountBody1*MassFe + moonSiCountBody1*MassSi)/float(moonFeCountBody2*MassFe + moonSiCountBody2*MassSi)); } x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); printf("\n Percent off correct angular momentum of the Earth-Moon System = %f ", 100.0*(1.0 - mag/AngularMomentumEarthMoonSystem)); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; mag = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/mag); printf("\n Percent off correct axial tilt of the Earth = %f ", 100.0*(1.0 - angle/EarthAxialTilt)); printf("\n\n*************************************************************************\n\n\n"); } void recordFinalCollisionStat(double time) { double mag, size, angle, x, y, z; double timeConverter = UnitTime; double lengthConverter = UnitLength; double massConverter = UnitMass; double velocityConverter = UnitLength/UnitTime; double momentumConverter = UnitMass*UnitLength*UnitLength/UnitTime; findEarthAndMoon(); int earthFeCountBody1 = 0; int earthFeCountBody2 = 0; int earthSiCountBody1 = 0; int earthSiCountBody2 = 0; int moonFeCountBody1 = 0; int moonFeCountBody2 = 0; int moonSiCountBody1 = 0; int moonSiCountBody2 = 0; float massUniversalSystem = getMassCollision(0); float massEarthMoonSystem = getMassCollision(1); float massEarth = getMassCollision(2); float massMoon = getMassCollision(3); float3 centerOfMassUniversalSystem = getCenterOfMassCollision(0); float3 centerOfMassEarthMoonSystem = getCenterOfMassCollision(1); float3 centerOfMassEarth = getCenterOfMassCollision(2); float3 centerOfMassMoon = getCenterOfMassCollision(3); float3 linearVelocityUniversalSystem = getLinearVelocityCollision(0); float3 linearVelocityEarthMoonSystem = getLinearVelocityCollision(1); float3 linearVelocityEarth = getLinearVelocityCollision(2); float3 linearVelocityMoon = getLinearVelocityCollision(3); float3 angularMomentumUniversalSystem = getAngularMomentumCollision(0); float3 angularMomentumEarthMoonSystem = getAngularMomentumCollision(1); float3 angularMomentumEarth = getAngularMomentumCollision(2); float3 angularMomentumMoon = getAngularMomentumCollision(3); for(int i = 0; i < NumberOfEarthElements; i++) { if(EarthIndex[i] < NFe1) earthFeCountBody1++; else if(EarthIndex[i] < NFe1 + NFe2) earthFeCountBody2++; else if(EarthIndex[i] < NFe1 + NFe2 + NSi1) earthSiCountBody1++; else earthSiCountBody2++; } for(int i = 0; i < NumberOfMoonElements; i++) { if(MoonIndex[i] < NFe1) moonFeCountBody1++; else if(MoonIndex[i] < NFe1 + NFe2) moonFeCountBody2++; else if(MoonIndex[i] < NFe1 + NFe2 + NSi1) moonSiCountBody1++; else moonSiCountBody2++; } fprintf(RunStatsFile,"\n\n\n*************************************************************************\n\n"); fprintf(RunStatsFile,"\nThe following are the final stats of the run when time = %f hours\n", time*timeConverter/3600.0); fprintf(RunStatsFile,"\nDistance is measured in Kilometers"); fprintf(RunStatsFile,"\nMass is measured in Kilograms"); fprintf(RunStatsFile,"\nTime is measured in seconds"); fprintf(RunStatsFile,"\nVelocity is measured in Kilometers/second"); fprintf(RunStatsFile,"\nAngular momentun is measured in Kilograms*Kilometers*Kilometers/seconds\n"); fprintf(RunStatsFile,"\nThe mass of Earth = %e", massEarth*massConverter); fprintf(RunStatsFile,"\nThe mass of Moon = %e", massMoon*massConverter); if(massMoon != 0.0) fprintf(RunStatsFile,"\nThe mass ratio Earth/Moon = %f\n", massEarth/massMoon); fprintf(RunStatsFile,"\nMoon iron from body 1 = %d", moonFeCountBody1); fprintf(RunStatsFile,"\nMoon silicate from body 1 = %d", moonSiCountBody1); fprintf(RunStatsFile,"\nMoon iron from body 2 = %d", moonFeCountBody2); fprintf(RunStatsFile,"\nMoon silicate from body 2 = %d", moonSiCountBody2); if((moonFeCountBody2 + moonSiCountBody2) == 0) { fprintf(RunStatsFile,"\nThe Moon is only composed of elements from body 1\n"); } else if((moonFeCountBody1 + moonSiCountBody1) == 0) { fprintf(RunStatsFile,"\nThe Moon is only composed of elements from body 2\n"); } else { fprintf(RunStatsFile,"\nMoon ratio body1/body2 = %f\n", (float)(moonFeCountBody1 + moonSiCountBody1)/(float)(moonFeCountBody2 + moonSiCountBody2)); } fprintf(RunStatsFile,"\nEarth iron from body 1 = %d", earthFeCountBody1); fprintf(RunStatsFile,"\nEarth silicate from body 1 = %d", earthSiCountBody1); fprintf(RunStatsFile,"\nEarth iron from body 2 = %d", earthFeCountBody2); fprintf(RunStatsFile,"\nEarth silicate from body 2 = %d", earthSiCountBody2); if((earthFeCountBody2 + earthSiCountBody2) == 0) { fprintf(RunStatsFile,"\nThe Earth is only composed of elements from body 1\n"); } else if((earthFeCountBody1 + earthSiCountBody1) == 0) { fprintf(RunStatsFile,"\nThe Earth is only composed of elements from body 2\n"); } else { fprintf(RunStatsFile,"\nEarth ratio body1/body2 = %f\n", (float)(earthFeCountBody1 + earthSiCountBody1)/(float)(earthFeCountBody2 + earthSiCountBody2)); } //It is always assumed that the ecliptic plane is the xz-plane. x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the Earth Moon system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the system = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(RunStatsFile,"\nAngle off ecliptic plane of the system's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the Earth = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the Earth = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(RunStatsFile,"\nAngle off ecliptic plane of the Earth's rotation = %f\n", 90.0 - angle*180.0/Pi); x = angularMomentumMoon.x*momentumConverter; y = angularMomentumMoon.y*momentumConverter; z = angularMomentumMoon.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the Moon = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the Moon = %e", mag); size = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/size); fprintf(RunStatsFile,"\nAngle off ecliptic plane of the Moon's rotation = %f\n", 90.0 - angle*180.0/Pi); x = centerOfMassEarthMoonSystem.x*lengthConverter; y = centerOfMassEarthMoonSystem.y*lengthConverter; z = centerOfMassEarthMoonSystem.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = centerOfMassEarth.x*lengthConverter; y = centerOfMassEarth.y*lengthConverter; z = centerOfMassEarth.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the Earth system = (%f, %f, %f)", x, y, z); x = centerOfMassMoon.x*lengthConverter; y = centerOfMassMoon.y*lengthConverter; z = centerOfMassMoon.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the Moon system = (%f, %f, %f)\n", x, y, z); x = linearVelocityEarthMoonSystem.x*velocityConverter; y = linearVelocityEarthMoonSystem.y*velocityConverter; z = linearVelocityEarthMoonSystem.z*velocityConverter; fprintf(RunStatsFile,"\nLinear Velocity of the Earth-Moon system = (%f, %f, %f)", x, y, z); x = linearVelocityEarth.x*velocityConverter; y = linearVelocityEarth.y*velocityConverter; z = linearVelocityEarth.z*velocityConverter; fprintf(RunStatsFile,"\nLinear Velocity of the Earth system = (%f, %f, %f)", x, y, z); x = linearVelocityMoon.x*velocityConverter; y = linearVelocityMoon.y*velocityConverter; z = linearVelocityMoon.z*velocityConverter; fprintf(RunStatsFile,"\nLinear Velocity of the Moon system = (%f, %f, %f)\n", x, y, z); fprintf(RunStatsFile,"\n*****Stats of the entire system to check the numerical scheme's validity*****\n"); x = centerOfMassUniversalSystem.x*lengthConverter; y = centerOfMassUniversalSystem.y*lengthConverter; z = centerOfMassUniversalSystem.z*lengthConverter; fprintf(RunStatsFile,"\nCenter of mass of the entire system = (%f, %f, %f)\n", x, y, z); x = linearVelocityUniversalSystem.x*velocityConverter; y = linearVelocityUniversalSystem.y*velocityConverter; z = linearVelocityUniversalSystem.z*velocityConverter; fprintf(RunStatsFile,"\nLinear velocity of the entire system system = (%f, %f, %f)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the linear velocity of the entire system = %f\n", mag); x = angularMomentumUniversalSystem.x*momentumConverter; y = angularMomentumUniversalSystem.y*momentumConverter; z = angularMomentumUniversalSystem.z*momentumConverter; fprintf(RunStatsFile,"\nAngular momentum of the entire system system = (%e, %e, %e)", x, y, z); mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\nMagnitude of the angular momentum of the entire system = %e\n", mag); fprintf(RunStatsFile,"\n*************************************************************************\n"); fprintf(RunStatsFile,"\n******************* Just the good stuff *********************************\n"); fprintf(RunStatsFile,"\n percent off correct Earth mass = %f ", 100.0*(massEarth*massConverter/(MassOfEarth))); fprintf(RunStatsFile,"\n percent off correct Moon mass = %f ", 100.0*(massMoon*massConverter/(MassOfMoon))); fprintf(RunStatsFile,"\n\n Earth mass percent iron = %f mass percent silicate = %f", float(earthFeCountBody1*MassFe + earthFeCountBody2*MassFe)/massEarth, float(earthSiCountBody1*MassSi + earthSiCountBody2*MassSi)/massEarth); fprintf(RunStatsFile,"\n Moon mass percent iron = %f mass percent silicate = %f", float(moonFeCountBody1*MassFe + moonFeCountBody2*MassFe)/massMoon, float(moonSiCountBody1*MassSi + moonSiCountBody2*MassSi)/massMoon); if((moonFeCountBody2 + moonSiCountBody2) != 0) { fprintf(RunStatsFile,"\n\n Moon body1/body2 ratio = %f ", float(moonFeCountBody1*MassFe + moonSiCountBody1*MassSi)/float(moonFeCountBody2*MassFe + moonSiCountBody2*MassSi)); } x = angularMomentumEarthMoonSystem.x*momentumConverter; y = angularMomentumEarthMoonSystem.y*momentumConverter; z = angularMomentumEarthMoonSystem.z*momentumConverter; mag = sqrt(x*x + y*y + z*z); fprintf(RunStatsFile,"\n Percent off correct angular momentum of the Earth-Moon System = %f ", 100.0*(1.0 - mag/AngularMomentumEarthMoonSystem)); x = angularMomentumEarth.x*momentumConverter; y = angularMomentumEarth.y*momentumConverter; z = angularMomentumEarth.z*momentumConverter; mag = sqrt(x*x + y*y + z*z) * sqrt(x*x + z*z); angle = acos((x*x + z*z)/mag); fprintf(RunStatsFile,"\n Percent off correct axial tilt of the Earth = %f ", 100.0*(1.0 - angle/EarthAxialTilt)); fprintf(RunStatsFile,"\n\n*************************************************************************\n\n\n"); } void recordPosAndVel() { fwrite(Pos, sizeof(float4), N, PosAndVelFile); fwrite(Vel, sizeof(float4), N, PosAndVelFile); } void recordContinuePosAndVel(double time) { fwrite(&time, sizeof(double), 1, ContinueRunPosAndVelFile); fwrite(Pos, sizeof(float4), N, ContinueRunPosAndVelFile); fwrite(Vel, sizeof(float4), N, ContinueRunPosAndVelFile); } void drawSimplePictureSeperate() { float3 centerOfMass1 = getCenterOfMassSeperate(1); float3 centerOfMass2 = getCenterOfMassSeperate(2); float3 linearVelocity1 = getLinearVelocitySeperate(1); float3 linearVelocity2 = getLinearVelocitySeperate(2); float3 angularMomentum1 = getAngularMomentumSeperate(1, centerOfMass1, linearVelocity1); float3 angularMomentum2 = getAngularMomentumSeperate(2, centerOfMass2, linearVelocity2); float Stretch; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Coloring all the elements glBegin(GL_POINTS); for(int i=0; i<N; i++) { if(i < NFe1) { glColor3d(1.0,0.0,0.0); } else if(i < NFe1 + NSi1) { glColor3d(1.0,1.0,0.5); } else if(i < NFe1 + NSi1 + NFe2) { glColor3d(1.0,0.0,1.0); } else { glColor3d(0.0,0.5,0.0); } glVertex3f(Pos[i].x, Pos[i].y, Pos[i].z); } glEnd(); glLineWidth(1.0); //Placing a green vector in the direction of the disired linear motion of each body glColor3f(0.0,1.0,0.0); Stretch = 1.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + InitialVelocity1.x*Stretch, centerOfMass1.y + InitialVelocity1.y*Stretch, centerOfMass1.z + InitialVelocity1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + InitialVelocity2.x*Stretch, centerOfMass2.y + InitialVelocity2.y*Stretch, centerOfMass2.z + InitialVelocity2.z*Stretch); glEnd(); //Placing a yellow vector in the direction of the actual linear motion of each body glColor3f(1.0,1.0,0.0); Stretch = 30.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + linearVelocity1.x*Stretch, centerOfMass1.y + linearVelocity1.y*Stretch, centerOfMass1.z + linearVelocity1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + linearVelocity2.x*Stretch, centerOfMass2.y + linearVelocity2.y*Stretch, centerOfMass2.z + linearVelocity2.z*Stretch); glEnd(); //Placing a blue vector in the direction of the disired angular momentum glColor3f(0.0,0.0,1.0); Stretch = 50.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + InitialSpin1.x*Stretch, centerOfMass1.y + InitialSpin1.y*Stretch, centerOfMass1.z + InitialSpin1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + InitialSpin2.x*Stretch, centerOfMass2.y + InitialSpin2.y*Stretch, centerOfMass2.z + InitialSpin2.z*Stretch); glEnd(); //Placing a red vector in the direction of the actual angular momentum glColor3f(1.0,0.0,0.0); Stretch = 50.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass1.x, centerOfMass1.y, centerOfMass1.z); glVertex3f(centerOfMass1.x + angularMomentum1.x*Stretch, centerOfMass1.y + angularMomentum1.y*Stretch, centerOfMass1.z + angularMomentum1.z*Stretch); glEnd(); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMass2.x, centerOfMass2.y, centerOfMass2.z); glVertex3f(centerOfMass2.x + angularMomentum2.x*Stretch, centerOfMass2.y + angularMomentum2.y*Stretch, centerOfMass2.z + angularMomentum2.z*Stretch); glEnd(); glutSwapBuffers(); } void drawPictureCollision() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBegin(GL_POINTS); for(int i=0; i<N; i++) { if(i < NFe1) { glColor3d(1.0,0.0,0.0); } else if(i < NFe1 + NFe2) { glColor3d(1.0,0.0,1.0); } else if(i < NFe1 + NFe2 + NSi1) { glColor3d(1.0,1.0,0.5); } else { glColor3d(0.0,0.5,0.0); } glVertex3f(Pos[i].x, Pos[i].y, Pos[i].z); } glEnd(); glutSwapBuffers(); } void drawAnalysisPictureCollision() { int i; findEarthAndMoon(); float massSystem = getMassCollision(0); float massEarth = getMassCollision(1); float massMoon = getMassCollision(2); float3 centerOfMassSystem = getCenterOfMassCollision(0); float3 centerOfMassEarth = getCenterOfMassCollision(1); float3 centerOfMassMoon = getCenterOfMassCollision(2); float3 linearVelocitySystem = getLinearVelocityCollision(0); float3 linearVelocityEarth = getLinearVelocityCollision(1); float3 linearVelocityMoon = getLinearVelocityCollision(2); float3 angularMomentumSystem = getAngularMomentumCollision(0); float3 angularMomentumEarth = getAngularMomentumCollision(1); float3 angularMomentumMoon = getAngularMomentumCollision(2); float Stretch; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); //Coloring all the elements glPointSize(1.0); glBegin(GL_POINTS); for(i=0; i<N; i++) { if(i < NFe1) { glColor3d(1.0,0.0,0.0); } else if(i < NFe1 + NFe2) { glColor3d(1.0,0.0,1.0); } else if(i < NFe1 + NFe2 + NSi1) { glColor3d(1.0,1.0,0.5); } else { glColor3d(0.0,0.5,0.0); } glVertex3f(Pos[i].x, Pos[i].y, Pos[i].z); } glEnd(); glPointSize(1.0); //Recoloring the Earth elements blue glColor3d(0.0,0.0,1.0); glBegin(GL_POINTS); for(i = 0; i < NumberOfEarthElements; i++) { glVertex3f(Pos[EarthIndex[i]].x, Pos[EarthIndex[i]].y, Pos[EarthIndex[i]].z); } glEnd(); //Recoloring the Moon elements red glColor3d(1.0,0.0,0.0); glBegin(GL_POINTS); for(i = 0; i < NumberOfMoonElements; i++) { glVertex3f(Pos[MoonIndex[i]].x, Pos[MoonIndex[i]].y, Pos[MoonIndex[i]].z); } glEnd(); glLineWidth(1.0); //Placing green vectors in the direction of linear velocity of the Moon Stretch = 1.0; glColor3f(0.0,1.0,0.0); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMassMoon.x, centerOfMassMoon.y, centerOfMassMoon.z); glVertex3f( centerOfMassMoon.x + linearVelocityMoon.x*Stretch, centerOfMassMoon.y + linearVelocityMoon.y*Stretch, centerOfMassMoon.z + linearVelocityMoon.z*Stretch); glEnd(); //Place a white point at the center of mass of the Earth-Moon system glColor3d(1.0,1.0,1.0); glPointSize(10.0); glBegin(GL_POINTS); glVertex3f(centerOfMassSystem.x, centerOfMassSystem.y, centerOfMassSystem.z); glEnd(); //Place a yellow point at the center of mass of the Earth glColor3d(1.0,1.0,0.0); glPointSize(5.0); glBegin(GL_POINTS); glVertex3f(centerOfMassEarth.x, centerOfMassEarth.y, centerOfMassEarth.z); glEnd(); //Place a yellow point at the center of mass of the Moon glColor3d(1.0,1.0,0.0); glPointSize(5.0); glBegin(GL_POINTS); glVertex3f(centerOfMassMoon.x, centerOfMassMoon.y, centerOfMassMoon.z); glEnd(); //Placing white vectors in the direction of the angular momentum of the Earth-Moon system glColor3f(1.0,1.0,1.0); Stretch = 1.0; glBegin(GL_LINE_LOOP); glVertex3f(centerOfMassSystem.x, centerOfMassSystem.y, centerOfMassSystem.z); glVertex3f( centerOfMassSystem.x + angularMomentumSystem.x*Stretch/massSystem, centerOfMassSystem.y + angularMomentumSystem.y*Stretch/massSystem, centerOfMassSystem.z + angularMomentumSystem.z*Stretch/massSystem); glEnd(); //Placing blue vectors in the direction of the angular momentum of the Earth Stretch = 1.0; glBegin(GL_LINE_LOOP); glColor3f(0.0,0.0,1.0); glVertex3f(centerOfMassEarth.x, centerOfMassEarth.y, centerOfMassEarth.z); glVertex3f( centerOfMassEarth.x + angularMomentumEarth.x*Stretch/massEarth, centerOfMassEarth.y + angularMomentumEarth.y*Stretch/massEarth, centerOfMassEarth.z + angularMomentumEarth.z*Stretch/massEarth); glEnd(); //Placing red vectors in the direction of the angular momentum of the Moon Stretch = 1.0; glColor3f(1.0,0.0,0.0); glBegin(GL_LINE_LOOP); glVertex3f(centerOfMassMoon.x, centerOfMassMoon.y, centerOfMassMoon.z); glVertex3f( centerOfMassMoon.x + angularMomentumMoon.x*Stretch/massMoon, centerOfMassMoon.y + angularMomentumMoon.y*Stretch/massMoon, centerOfMassMoon.z + angularMomentumMoon.z*Stretch/massMoon); glEnd(); glutSwapBuffers(); free(EarthIndex); free(MoonIndex); } void transformInitialConditionsFromSeperateToCollision() { int k; cudaMemcpy( PlaceHolder, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Pos2"); k = 0; for(int i = 0; i < NFe1; i++) { Pos[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { Pos[k] = PlaceHolder[i]; k++; } for(int i = NFe1; i < NFe1 + NSi1; i++) { Pos[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { Pos[k] = PlaceHolder[i]; k++; } cudaMemcpy( PlaceHolder, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Vel"); k = 0; for(int i = 0; i < NFe1; i++) { Vel[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1; i < NFe1 + NSi1 + NFe2; i++) { Vel[k] = PlaceHolder[i]; k++; } for(int i = NFe1; i < NFe1 + NSi1; i++) { Vel[k] = PlaceHolder[i]; k++; } for(int i = NFe1 + NSi1 + NFe2; i < N; i++) { Vel[k] = PlaceHolder[i]; k++; } } void nBodySeperate() { float time = 0.0; int tdraw = 1; int dampCheck = 0; int rest1Check = 0; int spinCheck = 0; cudaMemcpy( Pos_DEV0, Pos, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Pos3"); cudaMemcpy( Vel_DEV0, Vel, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Vel"); while(time < SetupTime) { getForcesSeperate<<<GridConfig, BlockConfig>>>(Pos_DEV0, Vel_DEV0, Force_DEV0, ForceSeperateConstant); if(time < DampTime) { if(dampCheck == 0) { printf("\n************************************************** Damping is on\n"); dampCheck = 1; tdraw = 0; } moveBodiesDampedSeperate<<<GridConfig, BlockConfig>>>(Pos_DEV0, Vel_DEV0, Force_DEV0, MoveSeperateConstant, DampRateBody1, DampRateBody2); } else if(time < DampTime + DampRestTime) { if(rest1Check == 0) { printf("\n************************************************** Damp rest stage is on\n"); rest1Check = 1; tdraw = 0; } moveBodiesSeperate<<<GridConfig, BlockConfig>>>(Pos_DEV0, Vel_DEV0, Force_DEV0, MoveSeperateConstant); } else { if(spinCheck == 0) { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Pos4"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Vel"); spinBodySeperate(1, InitialSpin1); spinBodySeperate(2, InitialSpin2); cudaMemcpy( Pos_DEV0, Pos, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Pos5"); cudaMemcpy( Vel_DEV0, Vel, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Vel"); printf("\n************************************************** bodies have been spun\n"); printf("\n************************************************** spin rest stage is on\n"); spinCheck = 1; } moveBodiesSeperate<<<GridConfig, BlockConfig>>>(Pos_DEV0, Vel_DEV0, Force_DEV0, MoveSeperateConstant); } if(tdraw == DrawRate) { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Pos6"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Vel"); drawSimplePictureSeperate(); //drawPictureSeperate(); printf("\nSetup time in hours = %f\n", time*UnitTime/3600.0); tdraw = 0; } tdraw++; time += Dt; } } void resetInitialConditions() { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Pos7"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpy Vel"); setBodyPositionSeperate(1, InitialPosition1.x, InitialPosition1.y, InitialPosition1.z); setBodyVelocitySeperate(1, InitialVelocity1.x, InitialVelocity1.y, InitialVelocity1.z); setBodyPositionSeperate(2, InitialPosition2.x, InitialPosition2.y, InitialPosition2.z); setBodyVelocitySeperate(2, InitialVelocity2.x, InitialVelocity2.y, InitialVelocity2.z); printf("\n************************************************** Initial velocities have been given\n"); cudaMemcpy( Pos_DEV0, Pos, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Pos8"); cudaMemcpy( Vel_DEV0, Vel, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Vel"); printf("\n************************************************** The bodies have been created and intialized\n"); } void copyCreatedBodiesUpToDevice() { if(NumberOfGpus == 1 || UseMultipleGPU == 0) { cudaMemcpy( Pos_DEV0, Pos, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Pos9"); cudaMemcpy( Vel_DEV0, Vel, N *sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpy Vel"); } else { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync( PosFstHalf_0, Pos, (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync PosFstHalf 0"); cudaMemcpyAsync( PosSndHalf_0, Pos+(N/2), (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync PosSndHalf 0"); cudaMemcpyAsync( VelFstHalf_0, Vel, (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync VelFstHalf 0"); cudaMemcpyAsync( VelSndHalf_0, Vel+(N/2), (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync VelSndHalf 0"); cudaSetDevice(1); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync( PosFstHalf_1, Pos, (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync PosFstHalf 0"); cudaMemcpyAsync( PosSndHalf_1, Pos+(N/2), (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync PosSndHalf 0"); cudaMemcpyAsync( VelFstHalf_1, Vel, (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync VelFstHalf 0"); cudaMemcpyAsync( VelSndHalf_1, Vel+(N/2), (N/2)*sizeof(float4), cudaMemcpyHostToDevice ); errorCheck("cudaMemcpyAsync VelSndHalf 0"); } } double nBodyCollisionSingleGPU() { int tDraw = 1; int tRecord = 1; while(RunTime <= TotalRunTime) { getForcesCollisionSingleGPU<<<GridConfig, BlockConfig>>>(Pos_DEV0, Vel_DEV0, Force_DEV0, ForceCollisionConstant); moveBodiesCollisionSingleGPU<<<GridConfig, BlockConfig>>>(Pos_DEV0, Vel_DEV0, Force_DEV0, MoveCollisionConstant); if(tDraw == DrawRate) { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); if (DrawQuality == 1) drawAnalysisPictureCollision(); else if (DrawQuality == 2) drawPictureCollision(); else { printf("\nTSU Error: Invalid draw quality\n"); exit(0); } tDraw = 0; printf("\nCollision run time = %f hours\n", RunTime*UnitTime/3600.0); } tDraw++; if(PrintCollisionStats == 1) { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); printCollisionStatsToScreen(RunTime); PrintCollisionStats = 0; } if(PrintContinueStats == 1) { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); printContinueStatsToScreen(RunTime); PrintContinueStats = 0; } if(WriteToFile == 1 && tRecord == RecordRate) { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); recordPosAndVel(); tRecord = 0; } tRecord++; RunTime += Dt; } RunTime = RunTime - Dt; cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); return(RunTime); } double nBodyCollisionDoubleGPU() { int tDraw = 1; int tRecord = 1; cout << "\nCollision run time start = " << RunTime*UnitTime/3600.0 << " hours." << endl; while(RunTime <= TotalRunTime) { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); getForcesCollisionDoubleGPU0<<<GridConfig, BlockConfig>>>(PosFstHalf_0, PosSndHalf_0, VelFstHalf_0, VelSndHalf_0, ForceFstHalf_0, N, ForceCollisionConstant); errorCheck("getForcesCollisionDoubleGPU 0"); moveBodiesCollisionDoubleGPU0<<<GridConfig, BlockConfig>>>(PosFstHalf_0, VelFstHalf_0, ForceFstHalf_0, N, MoveCollisionConstant); errorCheck("moveBodiesCollisionDoubleGPU 0"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); getForcesCollisionDoubleGPU1<<<GridConfig, BlockConfig>>>(PosFstHalf_1, PosSndHalf_1, VelFstHalf_1, VelSndHalf_1, ForceSndHalf_1, N, ForceCollisionConstant); errorCheck("getForcesCollisionDoubleGPU 1"); moveBodiesCollisionDoubleGPU1<<<GridConfig, BlockConfig>>>(PosSndHalf_1, VelSndHalf_1, ForceSndHalf_1, N, MoveCollisionConstant); errorCheck("moveBodiesCollisionDoubleGPU 1"); cudaDeviceSynchronize(); errorCheck("cudaDeviceSynchronize 1"); cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyPeerAsync(PosFstHalf_1,1,PosFstHalf_0,0,(N/2)*sizeof(float4)); errorCheck("cudaMemcpyPeerAsync 0 - Pos"); cudaMemcpyPeerAsync(VelFstHalf_1,1,VelFstHalf_0,0,(N/2)*sizeof(float4)); errorCheck("cudaMemcpyPeerAsync 0 - Vel"); cudaDeviceSynchronize(); errorCheck("cudaDeviceSynchronize 2"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); cudaMemcpyPeerAsync(PosSndHalf_0,0,PosSndHalf_1,1,(N/2)*sizeof(float4)); errorCheck("cudaMemcpyPeerAsync 1 - Pos"); cudaMemcpyPeerAsync(VelSndHalf_0,0,VelSndHalf_1,1,(N/2)*sizeof(float4)); errorCheck("cudaMemcpyPeerAsync 1 - Vel"); cudaDeviceSynchronize(); errorCheck("cudaDeviceSynchronize 3"); if(tDraw == DrawRate) { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); cudaMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); cudaMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); if (DrawQuality == 1) drawAnalysisPictureCollision(); else if (DrawQuality == 2) drawPictureCollision(); else { printf("\nTSU Error: Invalid draw quality\n"); exit(0); } tDraw = 0; cout << "\nCollision run time = " << RunTime*UnitTime/3600.0 << " hours." << endl; } tDraw++; if(PrintCollisionStats == 1) { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); cudaMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); cudaMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); printCollisionStatsToScreen(RunTime); PrintCollisionStats = 0; } if(PrintContinueStats == 1) { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); cudaMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); cudaMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); printContinueStatsToScreen(RunTime); PrintContinueStats = 0; } if(WriteToFile == 1 && tRecord == RecordRate) { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); cudaMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); cudaMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); recordPosAndVel(); tRecord = 0; } tRecord++; RunTime += Dt; } RunTime = RunTime -Dt; cout << "\nCollision run time end = " << RunTime*UnitTime/3600.0 << " hours." << endl; cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); cudaMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); cudaMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); return(RunTime); } void cleanKill(double time) { if(NumberOfGpus == 1 || UseMultipleGPU == 0) { cudaMemcpy( Pos, Pos_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpy( Vel, Vel_DEV0, N *sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); } else { cudaSetDevice(0); errorCheck("cudaSetDevice 0"); cudaMemcpyAsync(Pos, PosFstHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaMemcpyAsync(Pos+(N/2), PosSndHalf_0, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Pos"); cudaSetDevice(1); errorCheck("cudaSetDevice 1"); cudaMemcpyAsync(Vel, VelFstHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); cudaMemcpyAsync(Vel+(N/2), VelSndHalf_1, (N/2)*sizeof(float4), cudaMemcpyDeviceToHost ); errorCheck("cudaMemcpyAsync Vel"); } recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); exit(0); } static void signalHandler(int signum) { int command; cout << "\n\n******************************************************" << endl; cout << "Enter:666 to kill the run." << endl; cout << "Enter:1 to cleanly terminate the run.\t(not valid in the setup stage)." << endl; cout << "Enter:2 to change the draw rate." << endl; cout << "Enter:3 to change the draw quality.\t(not valid in the setup stage)." << endl; cout << "Enter:4 to set your eye location." << endl; cout << "Enter:5 to set the Center of Mass as your center." << endl; cout << "Enter:6 to print the run stats.\t(not valid in the setup stage)." << endl; cout << "Enter:7 to print the continue stats.\t(not valid in the setup stage)." << endl; cout << "Enter:8 to change the total run time." << endl; cout << "Enter:9 to continue the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { cout << "\n\n******************************************************" << endl; cout << "Are you sure you want to terminate the run?" << endl; cout << "Enter:666 again if you are sure. Enter anything else to continue the run." << endl; cout << "******************************************************\n\nCommand: "; cin >> command; if(command == 666) { cleanUpCollision(); exit(0); } } else if(command == 1) { cleanKill(RunTime); } else if(command == 2) { cout << "\nEnter the desired draw rate: "; cin >> DrawRate; cout << "\nDrawRate: " << DrawRate << endl; } else if(command == 3) { cout << "\nEnter the desired draw quality.\n1 for analysis.\n2 for standard." << endl; cin >> DrawQuality; cout << "\nDrawQuality: " << DrawQuality << endl; } else if (command == 4) { cout << "******************************************************" << endl; cout << "Here is where your current Eye is at: " << endl; cout << "EyeX: " << EyeX << endl; cout << "EyeY: " << EyeY << endl; cout << "EyeZ: " << EyeZ << endl; cout << "Changing this will determine how close/far you are." << endl; cout << "******************************************************" << endl; cout << "\nEnter the desired x location of your eye (double): "; cin >> EyeX; cout << "Enter the desired y location of your eye (double): "; cin >> EyeY; cout << "Enter the desired z location of your eye (double): "; cin >> EyeZ; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(-0.2, 0.2, -0.2, 0.2, Near, Far); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); //glutPostRedisplay(); //Display(); } else if (command == 5) { float3 temp = getCenterOfMassCollision(0); cout << "******************************************************" << endl; cout << "Center of Mass in the X-direction: " << temp.x << endl; cout << "Center of Mass in the Y-direction: " << temp.y << endl; cout << "Center of Mass in the Z-direction: " << temp.z << endl; cout << "This is the Center of Mass of the System" << endl; cout << "******************************************************" << endl; CenterX = temp.x; CenterY = temp.y; CenterZ = temp.z; glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum(-0.2, 0.2, -0.2, 0.2, Near, Far); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); //glutPostRedisplay(); //Display(); } else if (command == 6) { PrintCollisionStats = 1; } else if (command == 7) { PrintContinueStats = 1; } else if (command == 8) { cout << "\nEnter the desired TotalRunTime (float): "; cin >> TotalRunTime; TotalRunTime *= 3600.0/UnitTime; } else if (command == 9) { cout << "\nRun continued." << endl; } else { cout <<"\n\n Invalid Command\n" << endl; } } void typeOfRunCheck() { cout << "\nEnter 0 to create a new Run.\nEnter 1 to create a branch Run.\nEnter 2 to continue an existing Run.\n\n"; cin >> TypeOfRun; } void readRootStartPosAndVelFile() { FILE *temp = fopen("RootStartPosAndVel","rb"); fread(Pos, sizeof(float4), N, temp); fread(Vel, sizeof(float4), N, temp); fclose(temp); fseek(PosAndVelFile,0,SEEK_END); } void readContinuePosAndVel() { ContinueRunPosAndVelFile = fopen("ContinueRunPosAndVel","rb"); fread(&RunTime, sizeof(double), 1, ContinueRunPosAndVelFile); fread(Pos, sizeof(float4), N, ContinueRunPosAndVelFile); fread(Vel, sizeof(float4), N, ContinueRunPosAndVelFile); //ContinueRunPosAndVelFile.clear(); fclose(ContinueRunPosAndVelFile); } void control() { double time; struct sigaction sa; sa.sa_handler = signalHandler; sigemptyset(&sa.sa_mask); sa.sa_flags = SA_RESTART; // Restart functions if interrupted by handler if (sigaction(SIGINT, &sa, NULL) == -1) { printf("\nTSU Error: sigaction error\n"); } //Setup run if (TypeOfRun == 0) { createFolderForNewRun(); readRunParameters(); setRunParameters(); openNewRunFiles(); recordSetupStats(); loadKernalConstantStructures(); allocateCPUMemory(); checkSetupForErrors(); //Create and initialize bodies deviceSetupSeperate(); createBodies(); nBodySeperate(); resetInitialConditions(); recordStatsOfCreatedBodies(); recordStartPosVelOfCreatedBodiesSeperate(); transformInitialConditionsFromSeperateToCollision(); cleanUpSeperate(); //Collide bodies deviceSetupCollision(); copyCreatedBodiesUpToDevice(); if(NumberOfGpus == 1 || UseMultipleGPU == 0) time = nBodyCollisionSingleGPU(); else time = nBodyCollisionDoubleGPU(); recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); printf("\n DONE \n"); exit(0); } else if (TypeOfRun == 1) { createFolderForBranchRun(RootFolderName); readRunParameters(); setRunParameters(); readBranchParameters(); setBranchParameters(); openBranchRunFiles(); allocateCPUMemory(); readRootStartPosAndVelFile(); InitialPosition1.x += BranchPosition1.x; InitialPosition1.y += BranchPosition1.y; InitialPosition1.z += BranchPosition1.z; InitialPosition2.x += BranchPosition2.x; InitialPosition2.y += BranchPosition2.y; InitialPosition2.z += BranchPosition2.z; InitialVelocity1.x += BranchVelocity1.x; InitialVelocity1.y += BranchVelocity1.y; InitialVelocity1.z += BranchVelocity1.z; InitialVelocity2.x += BranchVelocity2.x; InitialVelocity2.y += BranchVelocity2.y; InitialVelocity2.z += BranchVelocity2.z; InitialSpin1.x += BranchSpin1.x; InitialSpin1.y += BranchSpin1.y; InitialSpin1.z += BranchSpin1.z; InitialSpin1.w += BranchSpin1.w; InitialSpin2.x += BranchSpin2.x; InitialSpin2.y += BranchSpin2.y; InitialSpin2.z += BranchSpin2.z; InitialSpin2.w += BranchSpin2.w; recordSetupStats(); loadKernalConstantStructures(); checkSetupForErrors(); deviceSetupSeperate(); //From here down to nBodySeperate is like the create bodies above but all that needs to be done is move and spin setBodyPositionSeperate(1, InitialPosition1.x, InitialPosition1.y, InitialPosition1.z); //setBodyVelocitySeperate(1, InitialVelocity1.x, InitialVelocity1.y, InitialVelocity1.z); setBodyPositionSeperate(2, InitialPosition2.x, InitialPosition2.y, InitialPosition2.z); //setBodyVelocitySeperate(2, InitialVelocity2.x, InitialVelocity2.y, InitialVelocity2.z); //This is really the added spin but must be put in initail to fool nBodySeperate because the original spin is already done InitialSpin1 = BranchSpin1; InitialSpin2 = BranchSpin2; DampTime = -1.0; DampRestTime = -1.0; SetupTime = BranchSpinRestTime; nBodySeperate(); resetInitialConditions(); recordStatsOfCreatedBodies(); recordStartPosVelOfCreatedBodiesSeperate(); transformInitialConditionsFromSeperateToCollision(); cleanUpSeperate(); //Collide bodies TotalRunTime = BranchRunTime; deviceSetupCollision(); copyCreatedBodiesUpToDevice(); if(NumberOfGpus == 1 || UseMultipleGPU == 0) time = nBodyCollisionSingleGPU(); else time = nBodyCollisionDoubleGPU(); recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); printf("\n DONE \n"); exit(0); } else if (TypeOfRun == 2) { chdir(RootFolderName); readRunParameters(); setRunParameters(); loadKernalConstantStructures(); allocateCPUMemory(); checkSetupForErrors(); readContinuePosAndVel(); openContinueRunFiles(); TotalRunTime = AddedRunTime*3600.0/UnitTime + RunTime; //Collide bodies deviceSetupCollision(); copyCreatedBodiesUpToDevice(); if(NumberOfGpus == 1 || UseMultipleGPU == 0) time = nBodyCollisionSingleGPU(); else time = nBodyCollisionDoubleGPU(); recordFinalCollisionStat(time); recordContinuePosAndVel(time); printContinueStatsToFile(time); cleanUpCollision(); printf("\n DONE \n"); exit(0); } else { printf("\n Bad TypeOfRun value \n"); exit(0); } } //https://www.opengl.org/archives/resources/faq/technical/viewing.htm void Display(void) { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(Left, Right, Bottom, Top, Front, Back); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); } void reshape(GLint w, GLint h) { glViewport(0, 0, w, h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(Left, Right, Bottom, Top, Front, Back); glMatrixMode(GL_MODELVIEW); } void init() { glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glLoadIdentity(); glMatrixMode(GL_PROJECTION); glLoadIdentity(); glOrtho(Left, Right, Bottom, Top, Front, Back); glMatrixMode(GL_MODELVIEW); gluLookAt(EyeX, EyeY, EyeZ, CenterX, CenterY, CenterZ, UpX, UpY, UpZ); glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); } int main(int argc, char** argv) { if( argc < 1) { printf("\n You need to intire the run type (int 0 new run, 1 branch run, or 2 continue run) on the comand line\n"); exit(0); } else { TypeOfRun = atoi(argv[1]); } if( TypeOfRun == 1) { if(argc < 2) { printf("\n You need to intire a root folder to work from on the comand line\n"); exit(0); } else { strcat(RootFolderName, argv[2]); } } if( TypeOfRun == 2) { if(argc < 2) { printf("\n You need to intire a root folder to work from on the comand line\n"); exit(0); } else { strcat(RootFolderName, argv[2]); } if(argc < 3) { printf("\n You need to intire the extra run time for the continuation\n"); exit(0); } else { AddedRunTime = atof(argv[3]); } } glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_DEPTH | GLUT_RGB); glutInitWindowSize(XWindowSize,YWindowSize); glutInitWindowPosition(0,0); glutCreateWindow("Giant Impact Hypothesis Simulation"); glutReshapeFunc(reshape); init(); glShadeModel(GL_SMOOTH); glClearColor(0.0, 0.0, 0.0, 0.0); glutDisplayFunc(Display); glutReshapeFunc(reshape); glutIdleFunc(control); glutMainLoop(); return 0; }
549e778ee11be5da8e7d79034132f5402d31a365.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void rotateCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, float inXStart, float inYStart, uint32_t width, uint32_t height, float cosAngle, float sinAngle ) { uint32_t outX = blockDim.x * blockIdx.x + threadIdx.x; uint32_t outY = blockDim.y * blockIdx.y + threadIdx.y; // Only do something if this thread is for a valid pixel in the output if ( outX < width && outY < height ) { // Both input coordinates are shifted using the cosAngle, sinAngle, outX, and outY. The shift // comes from inverse rotating the horizontal and vertical iterations over the output. // Note that inverse rotation by X axis is [cos(angle), -sin(angle)], // and the inverse rotation by Y axis is [sin(angle), cos(angle)]. const float exactInX = inXStart + cosAngle * outX + sinAngle * outY; const float exactInY = inYStart - sinAngle * outX + cosAngle * outY; const int32_t inX = static_cast<int32_t>(exactInX); const int32_t inY = static_cast<int32_t>(exactInY); // Shift to the output pixel out = out + outY * rowSizeOut + outX; // Note that we will be taking an average with next pixels, so next pixels need to be in the image too if ( inX < 0 || inX >= width - 1 || inY < 0 || inY >= height - 1 ) { *out = 0; // We do not actually know what is beyond the image, so set value to 0 } else { // Shift to the input pixel in = in + inY * rowSizeIn + inX; // Now we use a bilinear approximation to find the pixel intensity value. That is, we take an // average of pixels (inX, inY), (inX + 1, inY), (inX, inY + 1), and (inX + 1, inY + 1). // We add an offset of 0.5 so that conversion to integer is done using rounding. const float probX = exactInX - inX; const float probY = exactInY - inY; const float mean = *in * (1 - probX) * (1 - probY) + *(in + 1) * probX * (1 - probY) + *(in + rowSizeIn) * (1 - probX) * probY + *(in + rowSizeIn + 1) * probX * probY + 0.5f; *out = static_cast<uint8_t>(mean); } } }
549e778ee11be5da8e7d79034132f5402d31a365.cu
#include "includes.h" __global__ void rotateCuda( const uint8_t * in, uint32_t rowSizeIn, uint8_t * out, uint32_t rowSizeOut, float inXStart, float inYStart, uint32_t width, uint32_t height, float cosAngle, float sinAngle ) { uint32_t outX = blockDim.x * blockIdx.x + threadIdx.x; uint32_t outY = blockDim.y * blockIdx.y + threadIdx.y; // Only do something if this thread is for a valid pixel in the output if ( outX < width && outY < height ) { // Both input coordinates are shifted using the cosAngle, sinAngle, outX, and outY. The shift // comes from inverse rotating the horizontal and vertical iterations over the output. // Note that inverse rotation by X axis is [cos(angle), -sin(angle)], // and the inverse rotation by Y axis is [sin(angle), cos(angle)]. const float exactInX = inXStart + cosAngle * outX + sinAngle * outY; const float exactInY = inYStart - sinAngle * outX + cosAngle * outY; const int32_t inX = static_cast<int32_t>(exactInX); const int32_t inY = static_cast<int32_t>(exactInY); // Shift to the output pixel out = out + outY * rowSizeOut + outX; // Note that we will be taking an average with next pixels, so next pixels need to be in the image too if ( inX < 0 || inX >= width - 1 || inY < 0 || inY >= height - 1 ) { *out = 0; // We do not actually know what is beyond the image, so set value to 0 } else { // Shift to the input pixel in = in + inY * rowSizeIn + inX; // Now we use a bilinear approximation to find the pixel intensity value. That is, we take an // average of pixels (inX, inY), (inX + 1, inY), (inX, inY + 1), and (inX + 1, inY + 1). // We add an offset of 0.5 so that conversion to integer is done using rounding. const float probX = exactInX - inX; const float probY = exactInY - inY; const float mean = *in * (1 - probX) * (1 - probY) + *(in + 1) * probX * (1 - probY) + *(in + rowSizeIn) * (1 - probX) * probY + *(in + rowSizeIn + 1) * probX * probY + 0.5f; *out = static_cast<uint8_t>(mean); } } }
4bffbcaed0b37227c1ea802e85537e559a4f7dda.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <utility> #include <iostream> #include <bitset> #include <math.h> #include <time.h> #include <chrono> #include <hip/hip_runtime.h> #include <iostream> #include <chrono> #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif void cudaTest(hipError_t error) { if (error != hipSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } } void print(uint* host_data, uint n) { std::cout << "\n"; for (uint i = 0; i < n; i++) { std::cout << host_data[i] << " "; } std::cout << "\n"; } int main(void) { uint num_of_segments; uint num_of_elements; uint i; scanf("%d", &num_of_segments); uint mem_size_seg = sizeof(uint) * (num_of_segments + 1); uint *h_seg = (uint *) malloc(mem_size_seg); for (i = 0; i < num_of_segments + 1; i++) scanf("%d", &h_seg[i]); scanf("%d", &num_of_elements); uint mem_size_vec = sizeof(uint) * num_of_elements; uint *h_vec_aux = (uint *) malloc(mem_size_vec); uint *h_value = (uint *) malloc(mem_size_vec); for (i = 0; i < num_of_elements; i++) { scanf("%d", &h_vec_aux[i]); h_value[i] = i; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); uint *d_value, *d_value_out, *d_vec, *d_vec_out; void *d_temp = NULL; size_t temp_bytes = 0; cudaTest(hipMalloc((void **) &d_vec, mem_size_vec)); cudaTest(hipMalloc((void **) &d_value, mem_size_vec)); cudaTest(hipMalloc((void **) &d_vec_out, mem_size_vec)); cudaTest(hipMalloc((void **) &d_value_out, mem_size_vec)); uint *h_vec = (uint *) malloc(mem_size_vec); uint *h_norm = (uint *) malloc(mem_size_seg); for (uint k = 0; k < EXECUTIONS; k++) { for(uint j = 0; j < num_of_elements; j++) h_vec[j] = h_vec_aux[j]; std::chrono::high_resolution_clock::time_point start1 = std::chrono::high_resolution_clock::now(); uint previousMax = 0; for (i = 0; i < num_of_segments; i++) { uint currentMin = h_vec[h_seg[i]]; uint currentMax = h_vec[h_seg[i]]; for (uint j = h_seg[i] + 1; j < h_seg[i + 1]; j++) { if (h_vec[j] < currentMin) currentMin = h_vec[j]; else if (h_vec[j] > currentMax) currentMax = h_vec[j]; } int normalize = previousMax - currentMin; if(normalize > 0) { h_norm[i] = ++normalize; for (uint j = h_seg[i]; j < h_seg[i + 1]; j++) { h_vec[j] += normalize; } } else { h_norm[i] = 0; normalize = 0; } previousMax = currentMax + normalize; } std::chrono::high_resolution_clock::time_point stop1 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_span = std::chrono::duration_cast< std::chrono::duration<double>>(stop1 - start1); cudaTest(hipMemcpy(d_vec, h_vec, mem_size_vec, hipMemcpyHostToDevice)); cudaTest(hipMemcpy(d_value, h_value, mem_size_vec, hipMemcpyHostToDevice)); if(temp_bytes == 0) { hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); hipMalloc((void **) &d_temp, temp_bytes); } hipcub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("Async kernel error: %s\n", hipGetErrorString(errAsync)); cudaTest(hipMemcpy(h_vec, d_vec_out, mem_size_vec, hipMemcpyDeviceToHost)); start1 = std::chrono::high_resolution_clock::now(); for (i = 0; i < num_of_segments; i++) { for (uint j = h_seg[i]; j < h_seg[i + 1]; j++) { h_vec[j] -= h_norm[i]; } } stop1 = std::chrono::high_resolution_clock::now(); time_span += std::chrono::duration_cast<std::chrono::duration<double>>( stop1 - start1); if (ELAPSED_TIME == 1) { std::cout << time_span.count()*1000 << "\n"; } hipDeviceSynchronize(); } hipFree (d_vec); hipFree (d_vec_out); hipFree (d_value); hipFree (d_value_out); hipFree (d_temp); if (ELAPSED_TIME != 1) { print(h_vec, num_of_elements); } free(h_seg); free(h_vec); free(h_norm); free(h_vec_aux); free(h_value); return 0; }
4bffbcaed0b37227c1ea802e85537e559a4f7dda.cu
/* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <cub/util_allocator.cuh> #include <cub/device/device_radix_sort.cuh> #include <stdio.h> #include <stdlib.h> #include <algorithm> #include <utility> #include <iostream> #include <bitset> #include <math.h> #include <time.h> #include <chrono> #include <cuda.h> #include <iostream> #include <chrono> #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif void cudaTest(cudaError_t error) { if (error != cudaSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } } void print(uint* host_data, uint n) { std::cout << "\n"; for (uint i = 0; i < n; i++) { std::cout << host_data[i] << " "; } std::cout << "\n"; } int main(void) { uint num_of_segments; uint num_of_elements; uint i; scanf("%d", &num_of_segments); uint mem_size_seg = sizeof(uint) * (num_of_segments + 1); uint *h_seg = (uint *) malloc(mem_size_seg); for (i = 0; i < num_of_segments + 1; i++) scanf("%d", &h_seg[i]); scanf("%d", &num_of_elements); uint mem_size_vec = sizeof(uint) * num_of_elements; uint *h_vec_aux = (uint *) malloc(mem_size_vec); uint *h_value = (uint *) malloc(mem_size_vec); for (i = 0; i < num_of_elements; i++) { scanf("%d", &h_vec_aux[i]); h_value[i] = i; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); uint *d_value, *d_value_out, *d_vec, *d_vec_out; void *d_temp = NULL; size_t temp_bytes = 0; cudaTest(cudaMalloc((void **) &d_vec, mem_size_vec)); cudaTest(cudaMalloc((void **) &d_value, mem_size_vec)); cudaTest(cudaMalloc((void **) &d_vec_out, mem_size_vec)); cudaTest(cudaMalloc((void **) &d_value_out, mem_size_vec)); uint *h_vec = (uint *) malloc(mem_size_vec); uint *h_norm = (uint *) malloc(mem_size_seg); for (uint k = 0; k < EXECUTIONS; k++) { for(uint j = 0; j < num_of_elements; j++) h_vec[j] = h_vec_aux[j]; std::chrono::high_resolution_clock::time_point start1 = std::chrono::high_resolution_clock::now(); uint previousMax = 0; for (i = 0; i < num_of_segments; i++) { uint currentMin = h_vec[h_seg[i]]; uint currentMax = h_vec[h_seg[i]]; for (uint j = h_seg[i] + 1; j < h_seg[i + 1]; j++) { if (h_vec[j] < currentMin) currentMin = h_vec[j]; else if (h_vec[j] > currentMax) currentMax = h_vec[j]; } int normalize = previousMax - currentMin; if(normalize > 0) { h_norm[i] = ++normalize; for (uint j = h_seg[i]; j < h_seg[i + 1]; j++) { h_vec[j] += normalize; } } else { h_norm[i] = 0; normalize = 0; } previousMax = currentMax + normalize; } std::chrono::high_resolution_clock::time_point stop1 = std::chrono::high_resolution_clock::now(); std::chrono::duration<double> time_span = std::chrono::duration_cast< std::chrono::duration<double>>(stop1 - start1); cudaTest(cudaMemcpy(d_vec, h_vec, mem_size_vec, cudaMemcpyHostToDevice)); cudaTest(cudaMemcpy(d_value, h_value, mem_size_vec, cudaMemcpyHostToDevice)); if(temp_bytes == 0) { cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); cudaMalloc((void **) &d_temp, temp_bytes); } cub::DeviceRadixSort::SortPairs(d_temp, temp_bytes, d_vec, d_vec_out, d_value, d_value_out, num_of_elements); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("Async kernel error: %s\n", cudaGetErrorString(errAsync)); cudaTest(cudaMemcpy(h_vec, d_vec_out, mem_size_vec, cudaMemcpyDeviceToHost)); start1 = std::chrono::high_resolution_clock::now(); for (i = 0; i < num_of_segments; i++) { for (uint j = h_seg[i]; j < h_seg[i + 1]; j++) { h_vec[j] -= h_norm[i]; } } stop1 = std::chrono::high_resolution_clock::now(); time_span += std::chrono::duration_cast<std::chrono::duration<double>>( stop1 - start1); if (ELAPSED_TIME == 1) { std::cout << time_span.count()*1000 << "\n"; } cudaDeviceSynchronize(); } cudaFree (d_vec); cudaFree (d_vec_out); cudaFree (d_value); cudaFree (d_value_out); cudaFree (d_temp); if (ELAPSED_TIME != 1) { print(h_vec, num_of_elements); } free(h_seg); free(h_vec); free(h_norm); free(h_vec_aux); free(h_value); return 0; }
056e634276a5a7b4bebea3f3db39e26243c717c3.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019, ByteDance CORPORATION. All rights reserved. #include <hip/hip_runtime.h> #include <unistd.h> #include <string> #include "../model/decoder.h" #include "../model/encoder.h" #include "model_config.pb.h" #include "../proto/transformer_weight.h" #include "../server/custom.h" #include "../server/model_config.h" #include "../server/model_config_cuda.h" #include "../tools/util.h" #include "../kernels/embKernels.h" /** @file Transformer server based on tensorrt inference server. */ #define LOG_ERROR std::cerr #define LOG_INFO std::cout #ifdef FP16_MODE const lightseq::cuda::OperationType OPTYPE = lightseq::cuda::OperationType::FP16; #else const lightseq::cuda::OperationType OPTYPE = lightseq::cuda::OperationType::FP32; #endif namespace nvidia { namespace inferenceserver { namespace custom { namespace transformer { // Integer error codes. TRTIS requires that success must be 0. All // other codes are interpreted by TRTIS as failures. enum ErrorCodes { kSuccess, kUnknown, kInvalidModelConfig, kGpuNotSupported, kInputOutputShape, kInputName, kOutputName, kInputOutputDataType, kInputContents, kInputSize, kOutputBuffer, kCudaDevice, kCudaMalloc, kCudaMemcpy, kCudaExecute, kCudaStream, kCublas, kCpuExecute, kWeightLoad, kModelSize }; // Context object. All state must be kept in this object. class Context { public: Context(const std::string& instance_name, const ModelConfig& config, const int gpu_device); ~Context(); // Initialize the context. Validate that the model configuration, // etc. is something that we can handle. int Init(); // Perform custom execution on the payloads. int Execute(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn); private: typedef lightseq::cuda::OperationTypeTraits<OPTYPE> _optraits; int FreeCudaBuffers(); int AllocateCudaBuffers(void** pdata, size_t byte_size); int GetInputTensorGPU(CustomGetNextInputFn_t input_fn, void* input_context, const char* name, const size_t expected_byte_size, void* input); int ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn); // The name of this instance of the backend. const std::string instance_name_; // The model configuration. const ModelConfig model_config_; // The GPU device ID to execute on or CUSTOM_NO_GPU_DEVICE if should // execute on CPU. const int gpu_device_; // The data-type of the input and output tensors. Must be either // INT32 or FP32. DataType datatype_; int datatype_bytesize_; // CUDA memory buffers for input and output tensors. void* d_input_; void* d_input_copy_; void* d_padding_mask_; void* d_encoder_output_; void* d_buf_; void* d_output_; void* d_src_lang_id_; void* d_trg_lang_id_; // The contexts executing on a GPU, the CUDA stream to use for the // execution. hipStream_t stream_; hipblasHandle_t hd_; lightseq::cuda::TransformerWeight<OPTYPE> tw_; std::shared_ptr<lightseq::cuda::Decoder<OPTYPE>> decoder_; std::shared_ptr<lightseq::cuda::Encoder<OPTYPE>> encoder_; }; Context::Context(const std::string& instance_name, const ModelConfig& model_config, const int gpu_device) : instance_name_(instance_name), model_config_(model_config), gpu_device_(gpu_device), datatype_(DataType::TYPE_INVALID), d_input_(nullptr), d_input_copy_(nullptr), d_padding_mask_(nullptr), d_encoder_output_(nullptr), d_buf_(nullptr), d_output_(nullptr), d_src_lang_id_(nullptr), d_trg_lang_id_(nullptr), stream_(nullptr), hd_(nullptr) {} Context::~Context() { FreeCudaBuffers(); if (hd_ != nullptr) { hipblasStatus_t cuerr = hipblasDestroy(hd_); if (cuerr != HIPBLAS_STATUS_SUCCESS) { LOG_ERROR << "Failed to destroy cublas handle."; } hd_ = nullptr; } if (stream_ != nullptr) { hipError_t cuerr = hipStreamDestroy(stream_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to destroy cuda stream: " << hipGetErrorString(cuerr); } stream_ = nullptr; } } int Context::FreeCudaBuffers() { if (d_input_ != nullptr) { hipError_t cuerr = hipFree(d_input_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_input_ = nullptr; } if (d_input_copy_ != nullptr) { hipError_t cuerr = hipFree(d_input_copy_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_input_copy_ = nullptr; } if (d_padding_mask_ != nullptr) { hipError_t cuerr = hipFree(d_padding_mask_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_padding_mask_ = nullptr; } if (d_encoder_output_ != nullptr) { hipError_t cuerr = hipFree(d_encoder_output_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_encoder_output_ = nullptr; } if (d_buf_ != nullptr) { hipError_t cuerr = hipFree(d_buf_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_buf_ = nullptr; } if (d_output_ != nullptr) { hipError_t cuerr = hipFree(d_output_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_output_ = nullptr; } if (d_src_lang_id_ != nullptr) { hipError_t cuerr = hipFree(d_src_lang_id_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_src_lang_id_ = nullptr; } if (d_trg_lang_id_ != nullptr) { hipError_t cuerr = hipFree(d_trg_lang_id_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to free cuda memory: " << hipGetErrorString(cuerr); } d_trg_lang_id_ = nullptr; } return kSuccess; } int Context::AllocateCudaBuffers(void** pdata, size_t byte_size) { // Allocate GPU memory buffers large enough for each input and // output. For performance we allocate once during initialization // instead of doing it each time we execute. if (*pdata != nullptr) { LOG_ERROR << "given pointer own gpu memory before allocate" << std::endl; return kCudaMalloc; } hipError_t cuerr = hipMalloc(pdata, byte_size); if (cuerr != hipSuccess) { LOG_ERROR << "unable to allocate memory in function AllocateCudaBuffers" << hipGetErrorString(cuerr); return kCudaMalloc; } cuerr = hipStreamSynchronize(stream_); if (cuerr != hipSuccess) { LOG_ERROR << "Stream synchronize failed after hipMalloc" << hipGetErrorString(cuerr) << std::endl; return kCudaMalloc; } return kSuccess; } int Context::Init() { // Very important to set the CUDA device before performing any // CUDA API calls. The device is maintained per-CPU-thread, and // the same CPU thread will always be used with this instance of // the backend, so only need to set the device once. LOG_INFO << "Trtis instance init start" << std::endl; hipError_t cuerr = hipSetDevice(gpu_device_); if (cuerr != hipSuccess) { LOG_ERROR << "Failed to set CUDA device to " << gpu_device_ << ": " << hipGetErrorString(cuerr); return kCudaDevice; } const int cuda_stream_priority = GetCudaStreamPriority(model_config_.optimization().priority()); cuerr = hipStreamCreateWithPriority(&stream_, hipStreamDefault, cuda_stream_priority); if (cuerr != hipSuccess) { LOG_ERROR << "Unable to create stream" << hipGetErrorString(cuerr); return kCudaStream; } hipblasStatus_t cublaserr = hipblasCreate(&hd_); if (cublaserr != HIPBLAS_STATUS_SUCCESS) { LOG_ERROR << "Failed to creat cublas handle"; return kCublas; } cublaserr = hipblasSetStream(hd_, stream_); if (cublaserr != HIPBLAS_STATUS_SUCCESS) { LOG_ERROR << "Failed to set stream for cublas handle"; return kCublas; } if (model_config_.input_size() != 1) { return kInputOutputShape; } datatype_ = model_config_.input(0).data_type(); if (datatype_ != DataType::TYPE_INT32) { return kInputOutputDataType; } datatype_bytesize_ = GetDataTypeByteSize(datatype_); if (model_config_.input(0).name() != "src_ids:0") { return kInputName; } if (model_config_.output_size() != 1) { return kInputOutputShape; } if (model_config_.output(0).data_type() != datatype_) { return kInputOutputDataType; } if (model_config_.output(0).name() != "trg_ids:0") { return kOutputName; } char* mz = getenv("MODEL_ZOO"); if (mz == NULL) { LOG_ERROR << "plz set environment variable MODEL_ZOO !" << std::endl; return kWeightLoad; } std::string model_path = mz; model_path += "/" + model_config_.name(); std::string weight_path = model_path + "/" + "transformer.pb"; std::ifstream fproto(weight_path.c_str()); if (!fproto.good()) { weight_path = model_path + "/" + "transformer.hdf5"; std::ifstream fhdf5(weight_path.c_str()); if (!fhdf5.good()) { LOG_ERROR << "Neither transformer.pb nor transformer.hdf5 " << "exists under " << model_path << std::endl; return kWeightLoad; } } LOG_INFO << "Load model weight from " << weight_path << std::endl; std::string res = tw_.initializing(weight_path); if (!res.empty()) { LOG_ERROR << res << std::endl; return kWeightLoad; } tw_.print_model_config(); int max_batch_size = model_config_.max_batch_size(); int err; err = AllocateCudaBuffers( &d_input_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers( &d_input_copy_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers( &d_padding_mask_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } // FIXME err = AllocateCudaBuffers( &d_encoder_output_, max_batch_size * tw_._max_step * tw_._hidden_size * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers( &d_output_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers(&d_src_lang_id_, max_batch_size * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers(&d_trg_lang_id_, max_batch_size * datatype_bytesize_); if (err != kSuccess) { return err; } encoder_ = std::make_shared<lightseq::cuda::Encoder<OPTYPE>>( max_batch_size, reinterpret_cast<int*>(d_input_), reinterpret_cast<int*>(d_padding_mask_), reinterpret_cast<_optraits::DataType*>(d_encoder_output_), tw_, stream_, hd_, reinterpret_cast<int*>(d_src_lang_id_)); res = encoder_->check(); if (!res.empty()) { LOG_ERROR << res << std::endl; return kModelSize; } decoder_ = std::make_shared<lightseq::cuda::Decoder<OPTYPE>>( max_batch_size, reinterpret_cast<int*>(d_padding_mask_), reinterpret_cast<_optraits::DataType*>(d_encoder_output_), reinterpret_cast<int*>(d_output_), tw_, stream_, hd_, false, reinterpret_cast<int*>(d_trg_lang_id_)); res = decoder_->check(); if (!res.empty()) { LOG_ERROR << res << std::endl; return kModelSize; } long buf_bytesize = max(encoder_->compute_buffer_bytesize(), decoder_->compute_buffer_bytesize()); err = AllocateCudaBuffers(&d_buf_, buf_bytesize); if (err != kSuccess) { return err; } // encoder and decoder use the same buffer to save gpu memory useage encoder_->init_buffer(d_buf_); decoder_->init_buffer(d_buf_); // Wait for all init finish. cuerr = hipStreamSynchronize(stream_); if (cuerr != hipSuccess) { LOG_ERROR << "failed to init GPU for transformer: " << hipGetErrorString(cuerr) << std::endl; return kCudaExecute; } LOG_INFO << "transformer, release-version[" << __DATE__ << " " << __TIME__ << "], Trtis instance init succeed!" << std::endl; return kSuccess; } int Context::GetInputTensorGPU(CustomGetNextInputFn_t input_fn, void* input_context, const char* name, const size_t expected_byte_size, void* input) { // The values for an input tensor are not necessarily in one // contiguous chunk, so we copy the chunks into 'input', which // points to CUDA memory. uint64_t total_content_byte_size = 0; while (true) { const void* content; uint64_t content_byte_size = expected_byte_size; if (!input_fn(input_context, name, &content, &content_byte_size)) { return kInputContents; } // If 'content' returns nullptr we have all the input. if (content == nullptr) { break; } // If the total amount of content received exceeds what we expect // then something is wrong. if ((total_content_byte_size + content_byte_size) > expected_byte_size) { return kInputSize; } hipError_t cuerr = hipMemcpyAsync( reinterpret_cast<char*>(input) + total_content_byte_size, content, content_byte_size, hipMemcpyHostToDevice, stream_); if (cuerr != hipSuccess) { LOG_ERROR << "failed to copy input values to GPU for transformer: " << hipGetErrorString(cuerr) << std::endl; LOG_ERROR << "try to copy " << total_content_byte_size + content_byte_size << " bytes from input" << std::endl; return kCudaMemcpy; } total_content_byte_size += content_byte_size; } // Make sure we end up with exactly the amount of input we expect. if (total_content_byte_size != expected_byte_size) { return kInputSize; } return kSuccess; } int Context::ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn) { // Each payload represents a related set of inputs and required // outputs. Each payload may have a different batch size. The total // batch-size of all payloads will not exceed the max-batch-size // specified in the model configuration. if (payload_cnt == 0) { return kSuccess; } std::vector<int64_t> shape( payloads[0].input_shape_dims[0], payloads[0].input_shape_dims[0] + payloads[0].input_shape_dim_cnts[0]); int err; for (uint32_t pidx = 0; pidx < payload_cnt; ++pidx) { CustomPayload& payload = payloads[pidx]; // For this payload the expected size of the input and output // tensors is determined by the batch-size of this payload. uint64_t batch_seq_len = payload.input_shape_dims[0][0]; if (batch_seq_len > tw_._max_step) { LOG_ERROR << "too long seq_len: " << batch_seq_len << ", skip this request" << std::endl; return kInputSize; } const uint64_t batchn_element_count = payload.batch_size * batch_seq_len; const uint64_t batchn_byte_size = batchn_element_count * datatype_bytesize_; // Copy the input tensors into the appropriate CUDA memory buffer. err = GetInputTensorGPU(input_fn, payload.input_context, "src_ids:0", batchn_byte_size, tw_._multilg_type == 0 ? d_input_ : d_input_copy_); if (err != kSuccess) { payload.error_code = err; continue; } // for multilg if (tw_._multilg_type != 0) { // multilg request: src_lang_id, trg_lang_id, src_token0, src_token1... lightseq::cuda::launch_split_multilg_request( (int*)d_input_copy_, (int*)d_src_lang_id_, (int*)d_trg_lang_id_, (int*)d_input_, payload.batch_size, batch_seq_len, stream_); } if (tw_._multilg_type == 1) { batch_seq_len -= 2; } if (tw_._multilg_type == 2) { batch_seq_len -= 1; } encoder_->run_one_infer(payload.batch_size, batch_seq_len); decoder_->run_one_infer(payload.batch_size, batch_seq_len); // The output shape is [payload-batch-size, shape] if the model // configuration supports batching, or just [shape] if the // model configuration does not support batching. std::vector<int64_t> output_shape = {payload.batch_size, decoder_->_cur_step + 1}; int64_t output_bytesize = output_shape[0] * output_shape[1] * datatype_bytesize_; const char* output_name = "trg_ids:0"; void* obuffer; if (!output_fn(payload.output_context, output_name, output_shape.size(), &output_shape[0], output_bytesize, &obuffer)) { payload.error_code = kOutputBuffer; break; } // If no error but the 'obuffer' is returned as nullptr, then // skip writing this output. if (obuffer == nullptr) { continue; } hipError_t cuerr = hipGetLastError(); if (cuerr != hipSuccess) { LOG_ERROR << "failed to launch kernel: " << hipGetErrorString(cuerr) << std::endl; payload.error_code = kCudaExecute; break; } cuerr = hipMemcpyAsync(obuffer, d_output_, output_bytesize, hipMemcpyDeviceToHost, stream_); if (cuerr != hipSuccess) { LOG_ERROR << "failed to copy output values from GPU for transformer: " << hipGetErrorString(cuerr) << std::endl; payload.error_code = kCudaMemcpy; break; } } // Wait for all compute and memcpy to complete. hipError_t cuerr = hipStreamSynchronize(stream_); if (cuerr != hipSuccess) { LOG_ERROR << "failed to synchronize GPU for transformer: " << hipGetErrorString(cuerr) << std::endl; return kCudaExecute; } return kSuccess; } int Context::Execute(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn) { if (gpu_device_ == CUSTOM_NO_GPU_DEVICE) { return kCpuExecute; } else { return ExecuteGPU(payload_cnt, payloads, input_fn, output_fn); } } ///////////// extern "C" { int CustomInitialize(const CustomInitializeData* data, void** custom_context) { // Convert the serialized model config to a ModelConfig object. ModelConfig model_config; if (!model_config.ParseFromString(std::string( data->serialized_model_config, data->serialized_model_config_size))) { return kInvalidModelConfig; } // Create the context and validate that the model configuration is // something that we can handle. Context* context = new Context(std::string(data->instance_name), model_config, data->gpu_device_id); int err = context->Init(); if (err != kSuccess) { return err; } *custom_context = static_cast<void*>(context); return kSuccess; } int CustomFinalize(void* custom_context) { if (custom_context != nullptr) { Context* context = static_cast<Context*>(custom_context); delete context; } return kSuccess; } const char* CustomErrorString(void* custom_context, int errcode) { switch (errcode) { case kSuccess: return "success"; case kInvalidModelConfig: return "invalid model configuration"; case kGpuNotSupported: return "execution on GPU not supported"; case kInputOutputShape: return "model must have two inputs and two outputs with the same shape"; case kInputName: return "model inputs must be named 'src_ids:0' and 'INPUT1'"; case kOutputName: return "model outputs must be named 'trg_ids:0' and 'OUTPUT1'"; case kInputOutputDataType: return "model inputs and outputs must have TYPE_INT32 or TYPE_FP32 " "data-type"; case kInputContents: return "unable to get input tensor values"; case kInputSize: return "unexpected size for input tensor"; case kOutputBuffer: return "unable to get buffer for output tensor values"; case kCudaDevice: return "hipSetDevice failed"; case kCudaMalloc: return "hipMalloc failed"; case kCudaMemcpy: return "hipMemcpy failed"; case kCudaExecute: return "cuda execution failed"; case kCudaStream: return "failed to create CUDA stream"; case kCublas: return "failed to create Cublas handle"; case kCpuExecute: return "cpu execution failed"; case kWeightLoad: return "load transformer weight in .pb failed"; case kModelSize: return "inappropriate transformer model size"; default: break; } return "unknown error"; } int CustomExecute(void* custom_context, const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn) { if (custom_context == nullptr) { return kUnknown; } Context* context = static_cast<Context*>(custom_context); return context->Execute(payload_cnt, payloads, input_fn, output_fn); } } // extern "C" } // namespace transformer } // namespace custom } // namespace inferenceserver } // namespace nvidia
056e634276a5a7b4bebea3f3db39e26243c717c3.cu
// Copyright (c) 2019, ByteDance CORPORATION. All rights reserved. #include <cuda.h> #include <unistd.h> #include <string> #include "../model/decoder.h" #include "../model/encoder.h" #include "model_config.pb.h" #include "../proto/transformer_weight.h" #include "../server/custom.h" #include "../server/model_config.h" #include "../server/model_config_cuda.h" #include "../tools/util.h" #include "../kernels/embKernels.h" /** @file Transformer server based on tensorrt inference server. */ #define LOG_ERROR std::cerr #define LOG_INFO std::cout #ifdef FP16_MODE const lightseq::cuda::OperationType OPTYPE = lightseq::cuda::OperationType::FP16; #else const lightseq::cuda::OperationType OPTYPE = lightseq::cuda::OperationType::FP32; #endif namespace nvidia { namespace inferenceserver { namespace custom { namespace transformer { // Integer error codes. TRTIS requires that success must be 0. All // other codes are interpreted by TRTIS as failures. enum ErrorCodes { kSuccess, kUnknown, kInvalidModelConfig, kGpuNotSupported, kInputOutputShape, kInputName, kOutputName, kInputOutputDataType, kInputContents, kInputSize, kOutputBuffer, kCudaDevice, kCudaMalloc, kCudaMemcpy, kCudaExecute, kCudaStream, kCublas, kCpuExecute, kWeightLoad, kModelSize }; // Context object. All state must be kept in this object. class Context { public: Context(const std::string& instance_name, const ModelConfig& config, const int gpu_device); ~Context(); // Initialize the context. Validate that the model configuration, // etc. is something that we can handle. int Init(); // Perform custom execution on the payloads. int Execute(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn); private: typedef lightseq::cuda::OperationTypeTraits<OPTYPE> _optraits; int FreeCudaBuffers(); int AllocateCudaBuffers(void** pdata, size_t byte_size); int GetInputTensorGPU(CustomGetNextInputFn_t input_fn, void* input_context, const char* name, const size_t expected_byte_size, void* input); int ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn); // The name of this instance of the backend. const std::string instance_name_; // The model configuration. const ModelConfig model_config_; // The GPU device ID to execute on or CUSTOM_NO_GPU_DEVICE if should // execute on CPU. const int gpu_device_; // The data-type of the input and output tensors. Must be either // INT32 or FP32. DataType datatype_; int datatype_bytesize_; // CUDA memory buffers for input and output tensors. void* d_input_; void* d_input_copy_; void* d_padding_mask_; void* d_encoder_output_; void* d_buf_; void* d_output_; void* d_src_lang_id_; void* d_trg_lang_id_; // The contexts executing on a GPU, the CUDA stream to use for the // execution. cudaStream_t stream_; cublasHandle_t hd_; lightseq::cuda::TransformerWeight<OPTYPE> tw_; std::shared_ptr<lightseq::cuda::Decoder<OPTYPE>> decoder_; std::shared_ptr<lightseq::cuda::Encoder<OPTYPE>> encoder_; }; Context::Context(const std::string& instance_name, const ModelConfig& model_config, const int gpu_device) : instance_name_(instance_name), model_config_(model_config), gpu_device_(gpu_device), datatype_(DataType::TYPE_INVALID), d_input_(nullptr), d_input_copy_(nullptr), d_padding_mask_(nullptr), d_encoder_output_(nullptr), d_buf_(nullptr), d_output_(nullptr), d_src_lang_id_(nullptr), d_trg_lang_id_(nullptr), stream_(nullptr), hd_(nullptr) {} Context::~Context() { FreeCudaBuffers(); if (hd_ != nullptr) { cublasStatus_t cuerr = cublasDestroy(hd_); if (cuerr != CUBLAS_STATUS_SUCCESS) { LOG_ERROR << "Failed to destroy cublas handle."; } hd_ = nullptr; } if (stream_ != nullptr) { cudaError_t cuerr = cudaStreamDestroy(stream_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to destroy cuda stream: " << cudaGetErrorString(cuerr); } stream_ = nullptr; } } int Context::FreeCudaBuffers() { if (d_input_ != nullptr) { cudaError_t cuerr = cudaFree(d_input_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_input_ = nullptr; } if (d_input_copy_ != nullptr) { cudaError_t cuerr = cudaFree(d_input_copy_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_input_copy_ = nullptr; } if (d_padding_mask_ != nullptr) { cudaError_t cuerr = cudaFree(d_padding_mask_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_padding_mask_ = nullptr; } if (d_encoder_output_ != nullptr) { cudaError_t cuerr = cudaFree(d_encoder_output_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_encoder_output_ = nullptr; } if (d_buf_ != nullptr) { cudaError_t cuerr = cudaFree(d_buf_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_buf_ = nullptr; } if (d_output_ != nullptr) { cudaError_t cuerr = cudaFree(d_output_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_output_ = nullptr; } if (d_src_lang_id_ != nullptr) { cudaError_t cuerr = cudaFree(d_src_lang_id_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_src_lang_id_ = nullptr; } if (d_trg_lang_id_ != nullptr) { cudaError_t cuerr = cudaFree(d_trg_lang_id_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to free cuda memory: " << cudaGetErrorString(cuerr); } d_trg_lang_id_ = nullptr; } return kSuccess; } int Context::AllocateCudaBuffers(void** pdata, size_t byte_size) { // Allocate GPU memory buffers large enough for each input and // output. For performance we allocate once during initialization // instead of doing it each time we execute. if (*pdata != nullptr) { LOG_ERROR << "given pointer own gpu memory before allocate" << std::endl; return kCudaMalloc; } cudaError_t cuerr = cudaMalloc(pdata, byte_size); if (cuerr != cudaSuccess) { LOG_ERROR << "unable to allocate memory in function AllocateCudaBuffers" << cudaGetErrorString(cuerr); return kCudaMalloc; } cuerr = cudaStreamSynchronize(stream_); if (cuerr != cudaSuccess) { LOG_ERROR << "Stream synchronize failed after cudaMalloc" << cudaGetErrorString(cuerr) << std::endl; return kCudaMalloc; } return kSuccess; } int Context::Init() { // Very important to set the CUDA device before performing any // CUDA API calls. The device is maintained per-CPU-thread, and // the same CPU thread will always be used with this instance of // the backend, so only need to set the device once. LOG_INFO << "Trtis instance init start" << std::endl; cudaError_t cuerr = cudaSetDevice(gpu_device_); if (cuerr != cudaSuccess) { LOG_ERROR << "Failed to set CUDA device to " << gpu_device_ << ": " << cudaGetErrorString(cuerr); return kCudaDevice; } const int cuda_stream_priority = GetCudaStreamPriority(model_config_.optimization().priority()); cuerr = cudaStreamCreateWithPriority(&stream_, cudaStreamDefault, cuda_stream_priority); if (cuerr != cudaSuccess) { LOG_ERROR << "Unable to create stream" << cudaGetErrorString(cuerr); return kCudaStream; } cublasStatus_t cublaserr = cublasCreate(&hd_); if (cublaserr != CUBLAS_STATUS_SUCCESS) { LOG_ERROR << "Failed to creat cublas handle"; return kCublas; } cublaserr = cublasSetStream(hd_, stream_); if (cublaserr != CUBLAS_STATUS_SUCCESS) { LOG_ERROR << "Failed to set stream for cublas handle"; return kCublas; } if (model_config_.input_size() != 1) { return kInputOutputShape; } datatype_ = model_config_.input(0).data_type(); if (datatype_ != DataType::TYPE_INT32) { return kInputOutputDataType; } datatype_bytesize_ = GetDataTypeByteSize(datatype_); if (model_config_.input(0).name() != "src_ids:0") { return kInputName; } if (model_config_.output_size() != 1) { return kInputOutputShape; } if (model_config_.output(0).data_type() != datatype_) { return kInputOutputDataType; } if (model_config_.output(0).name() != "trg_ids:0") { return kOutputName; } char* mz = getenv("MODEL_ZOO"); if (mz == NULL) { LOG_ERROR << "plz set environment variable MODEL_ZOO !" << std::endl; return kWeightLoad; } std::string model_path = mz; model_path += "/" + model_config_.name(); std::string weight_path = model_path + "/" + "transformer.pb"; std::ifstream fproto(weight_path.c_str()); if (!fproto.good()) { weight_path = model_path + "/" + "transformer.hdf5"; std::ifstream fhdf5(weight_path.c_str()); if (!fhdf5.good()) { LOG_ERROR << "Neither transformer.pb nor transformer.hdf5 " << "exists under " << model_path << std::endl; return kWeightLoad; } } LOG_INFO << "Load model weight from " << weight_path << std::endl; std::string res = tw_.initializing(weight_path); if (!res.empty()) { LOG_ERROR << res << std::endl; return kWeightLoad; } tw_.print_model_config(); int max_batch_size = model_config_.max_batch_size(); int err; err = AllocateCudaBuffers( &d_input_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers( &d_input_copy_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers( &d_padding_mask_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } // FIXME err = AllocateCudaBuffers( &d_encoder_output_, max_batch_size * tw_._max_step * tw_._hidden_size * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers( &d_output_, max_batch_size * tw_._max_step * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers(&d_src_lang_id_, max_batch_size * datatype_bytesize_); if (err != kSuccess) { return err; } err = AllocateCudaBuffers(&d_trg_lang_id_, max_batch_size * datatype_bytesize_); if (err != kSuccess) { return err; } encoder_ = std::make_shared<lightseq::cuda::Encoder<OPTYPE>>( max_batch_size, reinterpret_cast<int*>(d_input_), reinterpret_cast<int*>(d_padding_mask_), reinterpret_cast<_optraits::DataType*>(d_encoder_output_), tw_, stream_, hd_, reinterpret_cast<int*>(d_src_lang_id_)); res = encoder_->check(); if (!res.empty()) { LOG_ERROR << res << std::endl; return kModelSize; } decoder_ = std::make_shared<lightseq::cuda::Decoder<OPTYPE>>( max_batch_size, reinterpret_cast<int*>(d_padding_mask_), reinterpret_cast<_optraits::DataType*>(d_encoder_output_), reinterpret_cast<int*>(d_output_), tw_, stream_, hd_, false, reinterpret_cast<int*>(d_trg_lang_id_)); res = decoder_->check(); if (!res.empty()) { LOG_ERROR << res << std::endl; return kModelSize; } long buf_bytesize = max(encoder_->compute_buffer_bytesize(), decoder_->compute_buffer_bytesize()); err = AllocateCudaBuffers(&d_buf_, buf_bytesize); if (err != kSuccess) { return err; } // encoder and decoder use the same buffer to save gpu memory useage encoder_->init_buffer(d_buf_); decoder_->init_buffer(d_buf_); // Wait for all init finish. cuerr = cudaStreamSynchronize(stream_); if (cuerr != cudaSuccess) { LOG_ERROR << "failed to init GPU for transformer: " << cudaGetErrorString(cuerr) << std::endl; return kCudaExecute; } LOG_INFO << "transformer, release-version[" << __DATE__ << " " << __TIME__ << "], Trtis instance init succeed!" << std::endl; return kSuccess; } int Context::GetInputTensorGPU(CustomGetNextInputFn_t input_fn, void* input_context, const char* name, const size_t expected_byte_size, void* input) { // The values for an input tensor are not necessarily in one // contiguous chunk, so we copy the chunks into 'input', which // points to CUDA memory. uint64_t total_content_byte_size = 0; while (true) { const void* content; uint64_t content_byte_size = expected_byte_size; if (!input_fn(input_context, name, &content, &content_byte_size)) { return kInputContents; } // If 'content' returns nullptr we have all the input. if (content == nullptr) { break; } // If the total amount of content received exceeds what we expect // then something is wrong. if ((total_content_byte_size + content_byte_size) > expected_byte_size) { return kInputSize; } cudaError_t cuerr = cudaMemcpyAsync( reinterpret_cast<char*>(input) + total_content_byte_size, content, content_byte_size, cudaMemcpyHostToDevice, stream_); if (cuerr != cudaSuccess) { LOG_ERROR << "failed to copy input values to GPU for transformer: " << cudaGetErrorString(cuerr) << std::endl; LOG_ERROR << "try to copy " << total_content_byte_size + content_byte_size << " bytes from input" << std::endl; return kCudaMemcpy; } total_content_byte_size += content_byte_size; } // Make sure we end up with exactly the amount of input we expect. if (total_content_byte_size != expected_byte_size) { return kInputSize; } return kSuccess; } int Context::ExecuteGPU(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn) { // Each payload represents a related set of inputs and required // outputs. Each payload may have a different batch size. The total // batch-size of all payloads will not exceed the max-batch-size // specified in the model configuration. if (payload_cnt == 0) { return kSuccess; } std::vector<int64_t> shape( payloads[0].input_shape_dims[0], payloads[0].input_shape_dims[0] + payloads[0].input_shape_dim_cnts[0]); int err; for (uint32_t pidx = 0; pidx < payload_cnt; ++pidx) { CustomPayload& payload = payloads[pidx]; // For this payload the expected size of the input and output // tensors is determined by the batch-size of this payload. uint64_t batch_seq_len = payload.input_shape_dims[0][0]; if (batch_seq_len > tw_._max_step) { LOG_ERROR << "too long seq_len: " << batch_seq_len << ", skip this request" << std::endl; return kInputSize; } const uint64_t batchn_element_count = payload.batch_size * batch_seq_len; const uint64_t batchn_byte_size = batchn_element_count * datatype_bytesize_; // Copy the input tensors into the appropriate CUDA memory buffer. err = GetInputTensorGPU(input_fn, payload.input_context, "src_ids:0", batchn_byte_size, tw_._multilg_type == 0 ? d_input_ : d_input_copy_); if (err != kSuccess) { payload.error_code = err; continue; } // for multilg if (tw_._multilg_type != 0) { // multilg request: src_lang_id, trg_lang_id, src_token0, src_token1... lightseq::cuda::launch_split_multilg_request( (int*)d_input_copy_, (int*)d_src_lang_id_, (int*)d_trg_lang_id_, (int*)d_input_, payload.batch_size, batch_seq_len, stream_); } if (tw_._multilg_type == 1) { batch_seq_len -= 2; } if (tw_._multilg_type == 2) { batch_seq_len -= 1; } encoder_->run_one_infer(payload.batch_size, batch_seq_len); decoder_->run_one_infer(payload.batch_size, batch_seq_len); // The output shape is [payload-batch-size, shape] if the model // configuration supports batching, or just [shape] if the // model configuration does not support batching. std::vector<int64_t> output_shape = {payload.batch_size, decoder_->_cur_step + 1}; int64_t output_bytesize = output_shape[0] * output_shape[1] * datatype_bytesize_; const char* output_name = "trg_ids:0"; void* obuffer; if (!output_fn(payload.output_context, output_name, output_shape.size(), &output_shape[0], output_bytesize, &obuffer)) { payload.error_code = kOutputBuffer; break; } // If no error but the 'obuffer' is returned as nullptr, then // skip writing this output. if (obuffer == nullptr) { continue; } cudaError_t cuerr = cudaGetLastError(); if (cuerr != cudaSuccess) { LOG_ERROR << "failed to launch kernel: " << cudaGetErrorString(cuerr) << std::endl; payload.error_code = kCudaExecute; break; } cuerr = cudaMemcpyAsync(obuffer, d_output_, output_bytesize, cudaMemcpyDeviceToHost, stream_); if (cuerr != cudaSuccess) { LOG_ERROR << "failed to copy output values from GPU for transformer: " << cudaGetErrorString(cuerr) << std::endl; payload.error_code = kCudaMemcpy; break; } } // Wait for all compute and memcpy to complete. cudaError_t cuerr = cudaStreamSynchronize(stream_); if (cuerr != cudaSuccess) { LOG_ERROR << "failed to synchronize GPU for transformer: " << cudaGetErrorString(cuerr) << std::endl; return kCudaExecute; } return kSuccess; } int Context::Execute(const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn) { if (gpu_device_ == CUSTOM_NO_GPU_DEVICE) { return kCpuExecute; } else { return ExecuteGPU(payload_cnt, payloads, input_fn, output_fn); } } ///////////// extern "C" { int CustomInitialize(const CustomInitializeData* data, void** custom_context) { // Convert the serialized model config to a ModelConfig object. ModelConfig model_config; if (!model_config.ParseFromString(std::string( data->serialized_model_config, data->serialized_model_config_size))) { return kInvalidModelConfig; } // Create the context and validate that the model configuration is // something that we can handle. Context* context = new Context(std::string(data->instance_name), model_config, data->gpu_device_id); int err = context->Init(); if (err != kSuccess) { return err; } *custom_context = static_cast<void*>(context); return kSuccess; } int CustomFinalize(void* custom_context) { if (custom_context != nullptr) { Context* context = static_cast<Context*>(custom_context); delete context; } return kSuccess; } const char* CustomErrorString(void* custom_context, int errcode) { switch (errcode) { case kSuccess: return "success"; case kInvalidModelConfig: return "invalid model configuration"; case kGpuNotSupported: return "execution on GPU not supported"; case kInputOutputShape: return "model must have two inputs and two outputs with the same shape"; case kInputName: return "model inputs must be named 'src_ids:0' and 'INPUT1'"; case kOutputName: return "model outputs must be named 'trg_ids:0' and 'OUTPUT1'"; case kInputOutputDataType: return "model inputs and outputs must have TYPE_INT32 or TYPE_FP32 " "data-type"; case kInputContents: return "unable to get input tensor values"; case kInputSize: return "unexpected size for input tensor"; case kOutputBuffer: return "unable to get buffer for output tensor values"; case kCudaDevice: return "cudaSetDevice failed"; case kCudaMalloc: return "cudaMalloc failed"; case kCudaMemcpy: return "cudaMemcpy failed"; case kCudaExecute: return "cuda execution failed"; case kCudaStream: return "failed to create CUDA stream"; case kCublas: return "failed to create Cublas handle"; case kCpuExecute: return "cpu execution failed"; case kWeightLoad: return "load transformer weight in .pb failed"; case kModelSize: return "inappropriate transformer model size"; default: break; } return "unknown error"; } int CustomExecute(void* custom_context, const uint32_t payload_cnt, CustomPayload* payloads, CustomGetNextInputFn_t input_fn, CustomGetOutputFn_t output_fn) { if (custom_context == nullptr) { return kUnknown; } Context* context = static_cast<Context*>(custom_context); return context->Execute(payload_cnt, payloads, input_fn, output_fn); } } // extern "C" } // namespace transformer } // namespace custom } // namespace inferenceserver } // namespace nvidia
98b81066f9c7d8c127b1722a3cdf424ee89a85f7.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t const &z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t tanh(scalar_t const &z) { const auto exp_n2z = exp(-2 * z); return (1.0 - exp_n2z) / (1.0 + exp_n2z); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid_with_output(scalar_t const &a) { return a * (1.0 - a); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh_with_output(scalar_t const &a) { return 1.0 - (a * a); } template <typename scalar_t> __global__ void forward_part_0( const scalar_t* __restrict__ hidden, const scalar_t* __restrict__ cell, scalar_t* __restrict__ hiddens_storage, scalar_t* __restrict__ cells_storage, scalar_t* __restrict__ current_gate, const scalar_t* __restrict__ mean_fig, const scalar_t* __restrict__ var_fig, const scalar_t epsilon, scalar_t* __restrict__ stds_storage, scalar_t* __restrict__ normalized_storage, const scalar_t* __restrict__ gamma_fig, const scalar_t* __restrict__ bias_fig, scalar_t* __restrict__ activated_storage, scalar_t* __restrict__ forgotten_cell, const scalar_t* __restrict__ dropout_candidate_cell, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int process_idx = blockIdx.z; if (process_idx < 4) { if (process_idx < 3) //Normalizations and stuff { const int mean_var_std_idx = batch * 3 + process_idx; const scalar_t std = sqrt(var_fig[mean_var_std_idx] + epsilon); if (column == 0) { stds_storage[mean_var_std_idx] = std; } const int gate_val_storage_idx = batch * state_size_3 + process_idx * state_size + column; const int gate_val_local_idx = gate_val_storage_idx + batch * state_size; scalar_t gate_val = (current_gate[gate_val_local_idx] - mean_fig[mean_var_std_idx]) / std; normalized_storage[gate_val_storage_idx] = gate_val; const int gamma_bias_idx = process_idx * state_size + column; if (process_idx < 2) //forget gate & input gate { gate_val = sigmoid((gate_val * gamma_fig[gamma_bias_idx]) + bias_fig[gamma_bias_idx]); if (process_idx == 0) //forget cell memory { const int local_state_idx = batch * state_size + column; forgotten_cell[local_state_idx] = gate_val * cell[local_state_idx]; } activated_storage[gate_val_storage_idx] = gate_val; current_gate[gate_val_local_idx] = gate_val; } else //candidate cell { gate_val = tanh((gate_val * gamma_fig[gamma_bias_idx]) + bias_fig[gamma_bias_idx]); activated_storage[gate_val_storage_idx] = gate_val; current_gate[gate_val_local_idx] = gate_val * dropout_candidate_cell[batch * state_size + column]; } } else //Hidden, Cell Storage { const int local_state_idx = batch * state_size + column; hiddens_storage[local_state_idx] = hidden[local_state_idx]; cells_storage[local_state_idx] = cell[local_state_idx]; } } } } } template <typename scalar_t> __global__ void forward_part_1( const scalar_t* __restrict__ forgotten_cell, const scalar_t* __restrict__ current_gate, scalar_t* __restrict__ cell, const int64_t batch_size, const int64_t state_size, const int64_t gate_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; const int local_input_gate_idx = batch * gate_size + state_size + column; cell[local_state_idx] = forgotten_cell[local_state_idx] + current_gate[local_input_gate_idx] * current_gate[local_input_gate_idx + state_size]; } } } template <typename scalar_t> __global__ void forward_part_2( scalar_t* __restrict__ cell, const scalar_t* __restrict__ mean, const scalar_t* __restrict__ var, const scalar_t epsilon, scalar_t* __restrict__ new_cell_stds_storage, scalar_t* __restrict__ new_cell_normalized_storage, const scalar_t* __restrict__ gamma_new_cell, const scalar_t* __restrict__ beta_new_cell, scalar_t* __restrict__ hc, scalar_t* __restrict__ current_gate, const scalar_t* __restrict__ weight_co, const int64_t batch_size, const int64_t state_size, const int64_t state_size_2) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int cell_idx = batch * state_size + column; const scalar_t std = sqrt(var[batch] + epsilon); if (column == 0) { new_cell_stds_storage[batch] = std; } scalar_t cell_val = (cell[cell_idx] - mean[batch]) / std; new_cell_normalized_storage[cell_idx] = cell_val; cell_val = (cell_val * gamma_new_cell[column]) + beta_new_cell[column]; cell[cell_idx] = cell_val; const int hc_idx = cell_idx + (batch + 1) * state_size; hc[hc_idx] = cell_val; current_gate[hc_idx + (batch + 1) * state_size_2] += cell_val * weight_co[column]; } } } template <typename scalar_t> __global__ void forward_part_3( const scalar_t* __restrict__ current_gate, const scalar_t* __restrict__ mean, const scalar_t* __restrict__ var, const scalar_t epsilon, scalar_t* __restrict__ output_gate_stds_storage, scalar_t* __restrict__ output_gate_normalized_storage, const scalar_t* __restrict__ gamma_o, const scalar_t* __restrict__ bias_o, scalar_t* __restrict__ output_gate_activated_storage, const scalar_t* __restrict__ cell, scalar_t* __restrict__ tanh_new_cell_storage, scalar_t* __restrict__ hidden, scalar_t* __restrict__ hc, scalar_t* __restrict__ outputs, const scalar_t* __restrict__ dropout_output, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int state_and_output_gate_storage_idx = batch * state_size + column; const int hc_idx = state_and_output_gate_storage_idx + batch * state_size; const int output_gate_idx = state_and_output_gate_storage_idx + (batch + 1) * state_size_3; const scalar_t std = sqrt(var[batch] + epsilon); if (column == 0) { output_gate_stds_storage[batch] = std; } scalar_t output_gate_val = (current_gate[output_gate_idx] - mean[batch]) / std; output_gate_normalized_storage[state_and_output_gate_storage_idx] = output_gate_val; output_gate_val = sigmoid((output_gate_val * gamma_o[column]) + bias_o[column]); output_gate_activated_storage[state_and_output_gate_storage_idx] = output_gate_val; const scalar_t tanh_cell = tanh(cell[state_and_output_gate_storage_idx]); tanh_new_cell_storage[state_and_output_gate_storage_idx] = tanh_cell; const scalar_t hidden_val = output_gate_val * tanh_cell; hidden[state_and_output_gate_storage_idx] = hidden_val; hc[hc_idx] = hidden_val; outputs[state_and_output_gate_storage_idx] = hidden_val * dropout_output[state_and_output_gate_storage_idx]; } } } std::vector<at::Tensor> ln_peephole_lstm_layer_cuda_forward( at::Tensor const &input, at::Tensor const &weight_ih, at::Tensor const &weight_hh, at::Tensor const &weight_ch, at::Tensor const &bias, at::Tensor const &gamma_f, at::Tensor const &gamma_i, at::Tensor const &gamma_g, at::Tensor const &gamma_o, at::Tensor const &gamma_new_cell, at::Tensor const &beta_new_cell, at::Tensor &hidden, at::Tensor &cell, double const &epsilon, double const &dropout_p, bool const &dropout_on_output, bool const &training, int64_t const &sequence_length, int64_t const &batch_size, int64_t const &input_size, int64_t const &state_size, int64_t const &state_size_2, int64_t const &state_size_3, int64_t const &gate_size) { const auto options = weight_ih.options(); auto hiddens = at::empty({ sequence_length, batch_size, state_size }, options); auto cells = at::empty({ sequence_length + 1, batch_size, state_size }, options); auto gates_fig_stds = at::empty({ sequence_length, batch_size, 3, 1 }, options); auto gates_fig_normalized = at::empty({ sequence_length, batch_size, 3, state_size }, options); auto gates_fig = at::empty({ sequence_length, batch_size, 3, state_size }, options); auto gates_o_stds = at::empty({ sequence_length, batch_size, 1 }, options); auto gates_o_normalized = at::empty({ sequence_length, batch_size, state_size }, options); auto gates_o = at::empty({ sequence_length, batch_size, state_size }, options); auto new_cells_stds = at::empty({ sequence_length, batch_size, 1 }, options); auto new_cells_normalized = at::empty({ sequence_length, batch_size, state_size }, options); auto tanh_new_cells = at::empty({ sequence_length, batch_size, state_size }, options); auto outputs = at::empty({ sequence_length, batch_size, state_size }, options); at::Tensor dropout; if (dropout_p <= 0. || !training) { dropout = at::ones({ 2, sequence_length, batch_size, state_size }, options); } else { if (dropout_p >= 1.) { dropout = at::zeros({ 2, sequence_length, batch_size, state_size }, options); } else { dropout = at::bernoulli(at::zeros({ 2, sequence_length, batch_size, state_size }, options), (1 - dropout_p)).div(1 - dropout_p); } if (!dropout_on_output) { dropout[1] = 1; } } const auto dropout_candidate_cell = dropout[0]; const auto dropout_output = dropout[1]; const auto ih = at::matmul(input, weight_ih.t()); hidden = hidden.clone(); cell = cell.clone(); auto hc = at::cat({ hidden, cell }, 1); const auto weight_hc_h = at::cat({ weight_hh.t(), at::cat({ weight_ch.slice(0, 0, state_size).diag(), weight_ch.slice(0, state_size, state_size_2).diag(), at::zeros({ state_size_2, state_size }, options) }).t() }); const auto weight_co = weight_ch.slice(0, state_size_2); const auto gamma_fig = at::stack({ gamma_f, gamma_i, gamma_g }); const auto bias_fig = bias.slice(0, 0, state_size_3).view({ 3, state_size }); const auto bias_o = bias.slice(0, state_size_3); at::Tensor current_gate; auto forgotten_cell = at::empty_like(cell); at::Tensor mean; at::Tensor var; const dim3 threads(32, 8); const dim3 blocks_0((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y, 4); const dim3 blocks_1((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y); AT_DISPATCH_FLOATING_TYPES(ih.type(), "ln_peephole_lstm_layer_cuda_forward", ([&] { for (int i = 0; i < sequence_length; i++) { current_gate = at::addmm(ih[i], hc, weight_hc_h).view({ batch_size, 4, state_size }); mean = current_gate.slice(1, 0, 3).mean(/*dim=*/2, /*keepdim=*/false); var = current_gate.slice(1, 0, 3).var(/*dim=*/2, /*unbiased=*/false, /*keepdim=*/false); hipLaunchKernelGGL(( forward_part_0<scalar_t>) , dim3(blocks_0), dim3(threads), 0, 0, hidden.data<scalar_t>(), cell.data<scalar_t>(), hiddens[i].data<scalar_t>(), cells[i].data<scalar_t>(), current_gate.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), epsilon, gates_fig_stds[i].data<scalar_t>(), gates_fig_normalized[i].data<scalar_t>(), gamma_fig.data<scalar_t>(), bias_fig.data<scalar_t>(), gates_fig[i].data<scalar_t>(), forgotten_cell.data<scalar_t>(), dropout_candidate_cell[i].data<scalar_t>(), batch_size, state_size, state_size_3); hipLaunchKernelGGL(( forward_part_1<scalar_t>) , dim3(blocks_1), dim3(threads), 0, 0, forgotten_cell.data<scalar_t>(), current_gate.data<scalar_t>(), cell.data<scalar_t>(), batch_size, state_size, gate_size); mean = cell.mean(/*dim=*/1, /*keepdim=*/false); var = cell.var(/*dim=*/1, /*unbiased=*/false, /*keepdim=*/false); hipLaunchKernelGGL(( forward_part_2<scalar_t>) , dim3(blocks_1), dim3(threads), 0, 0, cell.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), epsilon, new_cells_stds[i].data<scalar_t>(), new_cells_normalized[i].data<scalar_t>(), gamma_new_cell.data<scalar_t>(), beta_new_cell.data<scalar_t>(), hc.data<scalar_t>(), current_gate.data<scalar_t>(), weight_co.data<scalar_t>(), batch_size, state_size, state_size_2); mean = current_gate.select(1, 3).mean(/*dim=*/1, /*keepdim=*/false); var = current_gate.select(1, 3).var(/*dim=*/1, /*unbiased=*/false, /*keepdim=*/false); hipLaunchKernelGGL(( forward_part_3<scalar_t>) , dim3(blocks_1), dim3(threads), 0, 0, current_gate.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), epsilon, gates_o_stds[i].data<scalar_t>(), gates_o_normalized[i].data<scalar_t>(), gamma_o.data<scalar_t>(), bias_o.data<scalar_t>(), gates_o[i].data<scalar_t>(), cell.data<scalar_t>(), tanh_new_cells[i].data<scalar_t>(), hidden.data<scalar_t>(), hc.data<scalar_t>(), outputs[i].data<scalar_t>(), dropout_output[i].data<scalar_t>(), batch_size, state_size, state_size_3); } })); cells[sequence_length] = cell; return { outputs, hc.slice(1, 0, state_size).contiguous(), hc.slice(1, state_size).contiguous(), input, hiddens, cells, gates_fig, gates_fig_normalized, gates_fig_stds, gates_o, gates_o_normalized, gates_o_stds, new_cells_normalized, new_cells_stds, tanh_new_cells, dropout }; } //////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename scalar_t> __global__ void backward_preparation( scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ dropout_output, const scalar_t* __restrict__ dropout_candidate_cell, const scalar_t* __restrict__ cells, const scalar_t* __restrict__ gates_fig, const scalar_t* __restrict__ gates_o, scalar_t* __restrict__ grad_gates_layer_normalized, scalar_t* __restrict__ gates_fig_stds, scalar_t* __restrict__ gates_o_stds, scalar_t* __restrict__ new_cells_stds, const scalar_t* __restrict__ tanh_new_cells, scalar_t* __restrict__ grad_new_cells_wrt_tanh_new_cell, const int64_t n_total_batches, const int64_t state_size, const int64_t state_size_2, const int64_t state_size_3, const int64_t gate_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < n_total_batches) { const int process_idx = blockIdx.z; if (process_idx < 6) { if (process_idx == 0) { grad_gates_layer_normalized[batch * gate_size + column] = cells[batch * state_size + column] * d_sigmoid_with_output(gates_fig[batch * state_size_3 + column]); if (column == 0) { gates_fig_stds[batch * 3] *= state_size; } } else{if (process_idx == 1) { const int dropout_idx = batch * state_size + column; const int candidate_cell_idx = dropout_idx + (batch + 1) * state_size_2; const int input_gate_idx = candidate_cell_idx - state_size; const int store_idx = input_gate_idx + batch * state_size; grad_gates_layer_normalized[store_idx] = gates_fig[candidate_cell_idx] * d_sigmoid_with_output(gates_fig[input_gate_idx]) * dropout_candidate_cell[dropout_idx]; if (column == 0) { gates_fig_stds[batch * 3 + 1] *= state_size; } } else{if (process_idx == 2) { const int dropout_idx = batch * state_size + column; const int candidate_cell_idx = dropout_idx + (batch + 1) * state_size_2; const int input_gate_idx = candidate_cell_idx - state_size; const int store_idx = candidate_cell_idx + batch * state_size; grad_gates_layer_normalized[store_idx] = gates_fig[input_gate_idx] * d_tanh_with_output(gates_fig[candidate_cell_idx]) * dropout_candidate_cell[dropout_idx]; if (column == 0) { gates_fig_stds[batch * 3 + 2] *= state_size; } } else{if (process_idx == 3) { const int tanh_and_output_idx = batch * state_size + column; grad_gates_layer_normalized[batch * gate_size + column + state_size_3] = tanh_new_cells[tanh_and_output_idx] * d_sigmoid_with_output(gates_o[tanh_and_output_idx]); if (column == 0) { gates_o_stds[batch] *= state_size; } } else{if (process_idx == 4) { const int index = batch * state_size + column; grad_output[index] *= dropout_output[index]; if (column == 0) { new_cells_stds[batch] *= state_size; } } else{if (process_idx == 5) { const int index = batch * state_size + column; grad_new_cells_wrt_tanh_new_cell[index] = d_tanh_with_output(tanh_new_cells[index]) * gates_o[index]; }}}}}} } } } } template <typename scalar_t> __global__ void backward_loop_part_0( const scalar_t* __restrict__ grad_hidden, scalar_t* __restrict__ grad_new_cell_wrt_tanh_new_cell, const scalar_t* __restrict__ grad_output, scalar_t* __restrict__ grad_gate_layer_normalized, const scalar_t* __restrict__ gamma_o, scalar_t* __restrict__ grad_output_gate_normalized, const scalar_t* __restrict__ output_gate_normalized, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; const int gate_idx = local_state_idx + (batch + 1) * state_size_3; scalar_t grad_val = grad_hidden[local_state_idx] + grad_output[local_state_idx]; grad_new_cell_wrt_tanh_new_cell[local_state_idx] *= grad_val; grad_val *= grad_gate_layer_normalized[gate_idx]; grad_gate_layer_normalized[gate_idx] = grad_val; grad_val *= gamma_o[column]; grad_output_gate_normalized[local_state_idx] = grad_val; grad_output_gate_normalized[batch_size * state_size + local_state_idx] = grad_val * output_gate_normalized[local_state_idx]; } } } template <typename scalar_t> __global__ void backward_loop_part_1( const scalar_t* __restrict__ grad_output_gate_normalized, const scalar_t* __restrict__ grad_output_gate_normalized_sums, const scalar_t* __restrict__ output_gate_normalized, const scalar_t* __restrict__ output_gate_std, scalar_t* __restrict__ grad_gate_raw, const scalar_t* __restrict__ weight_co, const scalar_t* __restrict__ grad_new_cell_wrt_tanh_new_cell, scalar_t* __restrict__ grad_cell, scalar_t* __restrict__ grad_new_cell, scalar_t* __restrict__ grad_new_cell_normalized, const scalar_t* __restrict__ new_cell_normalized, const scalar_t* __restrict__ gamma_new_cell, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; const int gate_idx = local_state_idx + (batch + 1) * state_size_3; scalar_t grad_val = (state_size * grad_output_gate_normalized[local_state_idx] - grad_output_gate_normalized_sums[batch] - output_gate_normalized[local_state_idx] * grad_output_gate_normalized_sums[batch_size + batch]) / output_gate_std[batch]; grad_gate_raw[gate_idx] = grad_val; grad_val = grad_val * weight_co[column] + grad_new_cell_wrt_tanh_new_cell[local_state_idx] + grad_cell[local_state_idx]; grad_new_cell[local_state_idx] = grad_val; grad_val *= gamma_new_cell[column]; grad_cell[local_state_idx] = grad_val; grad_new_cell_normalized[local_state_idx] = grad_val; grad_new_cell_normalized[batch_size * state_size + local_state_idx] = grad_val * new_cell_normalized[local_state_idx]; } } } template <typename scalar_t> __global__ void backward_loop_part_2( scalar_t* __restrict__ grad_cell, const scalar_t* __restrict__ grad_cell_sums, const scalar_t* __restrict__ new_cell_normalized, const scalar_t* __restrict__ new_cell_std, const int64_t batch_size, const int64_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; grad_cell[local_state_idx] = (state_size * grad_cell[local_state_idx] - grad_cell_sums[batch] - new_cell_normalized[local_state_idx] * grad_cell_sums[batch_size + batch]) / new_cell_std[batch]; } } } template <typename scalar_t> __global__ void backward_loop_part_3( const scalar_t* __restrict__ grad_cell, scalar_t* __restrict__ grad_gate_layer_normalized, const scalar_t* __restrict__ gamma_f, const scalar_t* __restrict__ gamma_i, const scalar_t* __restrict__ gamma_g, scalar_t* __restrict__ grad_fig_gate_normalized, const scalar_t* __restrict__ fig_gate_normalized, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int process_idx = blockIdx.z; if (process_idx < 3) { const int local_state_idx = batch * state_size + column; const int fig_idx = batch * state_size_3 + process_idx * state_size + column; const int gate_idx = fig_idx + batch * state_size; scalar_t grad_val = grad_cell[local_state_idx] * grad_gate_layer_normalized[gate_idx]; grad_gate_layer_normalized[gate_idx] = grad_val; if (process_idx == 0) { grad_val *= gamma_f[column]; } else{if (process_idx == 1) { grad_val *= gamma_i[column]; } else { grad_val *= gamma_g[column]; }} grad_fig_gate_normalized[fig_idx] = grad_val; grad_fig_gate_normalized[batch_size * state_size_3 + fig_idx] = grad_val * fig_gate_normalized[fig_idx]; } } } } template <typename scalar_t> __global__ void backward_loop_part_4( const scalar_t* __restrict__ grad_fig_gate_normalized, const scalar_t* __restrict__ grad_fig_gate_normalized_sums, const scalar_t* __restrict__ gate_fig_normalized, const scalar_t* __restrict__ gate_fig_std, scalar_t* __restrict__ grad_gate_raw, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int process_idx = blockIdx.z; if (process_idx < 3) { const int fig_idx = batch * state_size_3 + process_idx * state_size + column; const int reduced_fig_idx = batch * 3 + process_idx; scalar_t grad_val = (state_size * grad_fig_gate_normalized[fig_idx] - grad_fig_gate_normalized_sums[reduced_fig_idx] - gate_fig_normalized[fig_idx] * grad_fig_gate_normalized_sums[batch_size * 3 + reduced_fig_idx]) / gate_fig_std[reduced_fig_idx]; grad_gate_raw[fig_idx + batch * state_size] = grad_val; } } } } template <typename scalar_t> __global__ void backward_loop_part_5( const scalar_t* __restrict__ grad_hci, const scalar_t* __restrict__ forget_gate, scalar_t* __restrict__ grad_hidden, scalar_t* __restrict__ grad_cell, scalar_t* __restrict__ grad_input, const int64_t batch_size, const int64_t input_size, const int64_t state_size, const int64_t state_size_2, const int64_t X_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < X_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int grad_idx = batch * X_size + column; if (column < state_size) { grad_hidden[batch * state_size + column] = grad_hci[grad_idx]; } else{if (column < state_size_2) { const int local_state_idx = (batch - 1) * state_size + column; grad_cell[local_state_idx] = grad_hci[grad_idx] + grad_cell[local_state_idx] * forget_gate[local_state_idx]; } else { grad_input[batch * input_size + column - state_size_2] = grad_hci[grad_idx]; }} } } } template <typename scalar_t> __global__ void backward_final( scalar_t* __restrict__ sum_to_get_grads, const scalar_t* __restrict__ grad_gates_raw, const scalar_t* __restrict__ cells, const scalar_t* __restrict__ grad_gates_layer_normalized, const scalar_t* __restrict__ gates_fig_normalized, const scalar_t* __restrict__ gates_o_normalized, const scalar_t* __restrict__ grad_new_cells, const scalar_t* __restrict__ new_cells_normalized, const int64_t n_total_batches, const int64_t batch_size, const int64_t state_size, const int64_t state_size_2, const int64_t state_size_3, const int64_t gate_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < n_total_batches) { const int process_idx = blockIdx.z; if (process_idx < 13) { const int store_idx = batch * (gate_size * 3 + state_size) + process_idx * state_size + column; /*if (process_idx == 0) { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column] * cells[batch * state_size + column]; } else{ if (process_idx == 1) { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size] * cells[batch * state_size + column]; } else{ if (process_idx == 2) { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size_3] * cells[(batch + batch_size) * state_size + column]; } else{ if (process_idx == 3) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column]; } else{ if (process_idx == 4) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size]; } else{ if (process_idx == 5) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_2]; } else{ if (process_idx == 6) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_3]; } else{ if (process_idx == 7) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column] * gates_fig_normalized[batch * state_size_3 + column]; } else{ if (process_idx == 8) { const int norm_idx = batch * state_size_3 + column + state_size; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; } else{ if (process_idx == 9) { const int norm_idx = batch * state_size_3 + column + state_size_2; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; } else{ if (process_idx == 10) { const int norm_idx = batch * state_size + column; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + (batch + 1) * state_size_3] * gates_o_normalized[norm_idx]; } else{ if (process_idx == 11) { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column] * new_cells_normalized[batch * state_size + column]; } else { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column]; }}}}}}}}}}}}*/ switch (process_idx) { case 0: { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column] * cells[batch * state_size + column]; break; } case 1: { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size] * cells[batch * state_size + column]; break; } case 2: { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size_3] * cells[(batch + batch_size) * state_size + column]; break; } case 3: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column]; break; } case 4: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size]; break; } case 5: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_2]; break; } case 6: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_3]; break; } case 7: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column] * gates_fig_normalized[batch * state_size_3 + column]; break; } case 8: { const int norm_idx = batch * state_size_3 + column + state_size; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; break; } case 9: { const int norm_idx = batch * state_size_3 + column + state_size_2; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; break; } case 10: { const int norm_idx = batch * state_size + column; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + (batch + 1) * state_size_3] * gates_o_normalized[norm_idx]; break; } case 11: { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column] * new_cells_normalized[batch * state_size + column]; break; } case 12: { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column]; break; } } } } } } std::vector<at::Tensor> ln_peephole_lstm_layer_cuda_backward( at::Tensor &grad_output, at::Tensor &grad_hidden, at::Tensor &grad_cell, at::Tensor const &input, at::Tensor const &hiddens, at::Tensor const &cells, at::Tensor const &gates_fig, at::Tensor const &gates_fig_normalized, at::Tensor &gates_fig_stds, at::Tensor const &gates_o, at::Tensor const &gates_o_normalized, at::Tensor &gates_o_stds, at::Tensor const &new_cells_normalized, at::Tensor &new_cells_stds, at::Tensor &tanh_new_cells, at::Tensor const &dropout, at::Tensor const &weight_ih, at::Tensor const &weight_hh, at::Tensor const &weight_ch, at::Tensor const &gamma_f, at::Tensor const &gamma_i, at::Tensor const &gamma_g, at::Tensor const &gamma_o, at::Tensor const &gamma_new_cell) { grad_output = grad_output.clone(); grad_hidden = grad_hidden.clone(); grad_cell = grad_cell.clone(); const auto sequence_length = input.size(0); const auto batch_size = input.size(1); const auto state_size = hiddens.size(2); const auto state_size_2 = state_size + state_size; const auto state_size_3 = state_size_2 + state_size; const auto gate_size = state_size_3 + state_size; const auto input_size = input.size(2); const auto X_size = input_size + state_size_2; const auto n_total_batches = batch_size * sequence_length; const auto dropout_candidate_cell = dropout[0]; const auto dropout_output = dropout[1]; const auto forget_gates = gates_fig.select(2, 0).clone(); const auto weights = at::cat({ weight_hh, at::cat({ weight_ch.slice(0, 0, state_size).diag(), weight_ch.slice(0, state_size, state_size_2).diag(), at::zeros({ state_size_2, state_size }, weight_ch.options()) }), weight_ih }, 1); const auto weight_co = weight_ch.slice(0, state_size_2); auto grad_input = at::empty_like(input); auto grad_gates_layer_normalized = at::empty({ sequence_length, batch_size, gate_size }, gates_fig.options()); auto grad_gates_raw = at::empty_like(grad_gates_layer_normalized); auto grad_new_cells = at::empty_like(tanh_new_cells); auto grad_new_cells_wrt_tanh_new_cell = at::empty_like(tanh_new_cells); auto grad_output_gate_normalized = at::empty({ batch_size * 2, state_size }, grad_gates_raw.options()); auto grad_new_cell_normalized = at::empty({ batch_size * 2, state_size }, grad_cell.options()); auto grad_fig_gate_normalized = at::empty({ batch_size * 2, 3, state_size }, grad_gates_raw.options()); at::Tensor sum_to_get_grads; const dim3 threads(64, 8); const dim3 blocks_0((state_size + threads.x - 1) / threads.x, (n_total_batches + threads.y - 1) / threads.y, 6); const dim3 blocks_1((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y); const dim3 blocks_2((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y, 3); const dim3 blocks_3((X_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y); const dim3 blocks_4((state_size + threads.x - 1) / threads.x, (n_total_batches + threads.y - 1) / threads.y, 13); AT_DISPATCH_FLOATING_TYPES(gates_fig.type(), "ln_peephole_lstm_layer_cuda_backward", ([&] { hipLaunchKernelGGL(( backward_preparation<scalar_t>) , dim3(blocks_0), dim3(threads), 0, 0, grad_output.data<scalar_t>(), dropout_output.data<scalar_t>(), dropout_candidate_cell.data<scalar_t>(), cells.data<scalar_t>(), gates_fig.data<scalar_t>(), gates_o.data<scalar_t>(), grad_gates_layer_normalized.data<scalar_t>(), gates_fig_stds.data<scalar_t>(), gates_o_stds.data<scalar_t>(), new_cells_stds.data<scalar_t>(), tanh_new_cells.data<scalar_t>(), grad_new_cells_wrt_tanh_new_cell.data<scalar_t>(), n_total_batches, state_size, state_size_2, state_size_3, gate_size); for (int i = sequence_length - 1; i >= 0; i--) { hipLaunchKernelGGL(( backward_loop_part_0<scalar_t>) , dim3(blocks_1), dim3(threads), 0, 0, grad_hidden.data<scalar_t>(), grad_new_cells_wrt_tanh_new_cell[i].data<scalar_t>(), grad_output[i].data<scalar_t>(), grad_gates_layer_normalized[i].data<scalar_t>(), gamma_o.data<scalar_t>(), grad_output_gate_normalized.data<scalar_t>(), gates_o_normalized[i].data<scalar_t>(), batch_size, state_size, state_size_3); hipLaunchKernelGGL(( backward_loop_part_1<scalar_t>) , dim3(blocks_1), dim3(threads), 0, 0, grad_output_gate_normalized.data<scalar_t>(), grad_output_gate_normalized.sum(/*dim=*/1, /*keepdim=*/false).data<scalar_t>(), gates_o_normalized[i].data<scalar_t>(), gates_o_stds[i].data<scalar_t>(), grad_gates_raw[i].data<scalar_t>(), weight_co.data<scalar_t>(), grad_new_cells_wrt_tanh_new_cell[i].data<scalar_t>(), grad_cell.data<scalar_t>(), grad_new_cells[i].data<scalar_t>(), grad_new_cell_normalized.data<scalar_t>(), new_cells_normalized[i].data<scalar_t>(), gamma_new_cell.data<scalar_t>(), batch_size, state_size, state_size_3); hipLaunchKernelGGL(( backward_loop_part_2<scalar_t>) , dim3(blocks_1), dim3(threads), 0, 0, grad_cell.data<scalar_t>(), grad_new_cell_normalized.sum(/*dim=*/1, /*keepdim=*/false).data<scalar_t>(), new_cells_normalized[i].data<scalar_t>(), new_cells_stds[i].data<scalar_t>(), batch_size, state_size); hipLaunchKernelGGL(( backward_loop_part_3<scalar_t>) , dim3(blocks_2), dim3(threads), 0, 0, grad_cell.data<scalar_t>(), grad_gates_layer_normalized[i].data<scalar_t>(), gamma_f.data<scalar_t>(), gamma_i.data<scalar_t>(), gamma_g.data<scalar_t>(), grad_fig_gate_normalized.data<scalar_t>(), gates_fig_normalized[i].data<scalar_t>(), batch_size, state_size, state_size_3); hipLaunchKernelGGL(( backward_loop_part_4<scalar_t>) , dim3(blocks_2), dim3(threads), 0, 0, grad_fig_gate_normalized.data<scalar_t>(), grad_fig_gate_normalized.sum(/*dim=*/2, /*keepdim=*/false).data<scalar_t>(), gates_fig_normalized[i].data<scalar_t>(), gates_fig_stds[i].data<scalar_t>(), grad_gates_raw[i].data<scalar_t>(), batch_size, state_size, state_size_3); hipLaunchKernelGGL(( backward_loop_part_5<scalar_t>) , dim3(blocks_3), dim3(threads), 0, 0, grad_gates_raw[i].mm(weights).data<scalar_t>(), forget_gates[i].data<scalar_t>(), grad_hidden.data<scalar_t>(), grad_cell.data<scalar_t>(), grad_input[i].data<scalar_t>(), batch_size, input_size, state_size, state_size_2, X_size); } sum_to_get_grads = at::empty({ sequence_length * batch_size, gate_size * 3 + state_size }, weights.options()); hipLaunchKernelGGL(( backward_final<scalar_t>) , dim3(blocks_4), dim3(threads), 0, 0, sum_to_get_grads.data<scalar_t>(), grad_gates_raw.data<scalar_t>(), cells.data<scalar_t>(), grad_gates_layer_normalized.data<scalar_t>(), gates_fig_normalized.data<scalar_t>(), gates_o_normalized.data<scalar_t>(), grad_new_cells.data<scalar_t>(), new_cells_normalized.data<scalar_t>(), n_total_batches, batch_size, state_size, state_size_2, state_size_3, gate_size); })); const auto grad_weight_ih_hh = grad_gates_raw.view({ sequence_length * batch_size, gate_size }).t().mm(at::cat({ input, hiddens }, 2).view({ sequence_length * batch_size, input_size + state_size })); const auto bunch_of_grads = sum_to_get_grads.sum(/*dim=*/0, /*keepdim=*/false); return { grad_input, grad_weight_ih_hh.slice(1, 0, input_size).contiguous(), grad_weight_ih_hh.slice(1, input_size).contiguous(), bunch_of_grads.slice(0, 0, state_size_3), bunch_of_grads.slice(0, state_size_3, gate_size + state_size_3), bunch_of_grads.slice(0, gate_size + state_size_3, gate_size * 2), bunch_of_grads.slice(0, gate_size * 2, gate_size * 2 + state_size), bunch_of_grads.slice(0, gate_size * 2 + state_size, gate_size * 2 + state_size_2), bunch_of_grads.slice(0, gate_size * 2 + state_size_2, gate_size * 2 + state_size_3), bunch_of_grads.slice(0, gate_size * 2 + state_size_3, gate_size * 3), bunch_of_grads.slice(0, gate_size * 3), grad_hidden, grad_cell }; }
98b81066f9c7d8c127b1722a3cdf424ee89a85f7.cu
#include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> template <typename scalar_t> __device__ __forceinline__ scalar_t sigmoid(scalar_t const &z) { return 1.0 / (1.0 + exp(-z)); } template <typename scalar_t> __device__ __forceinline__ scalar_t tanh(scalar_t const &z) { const auto exp_n2z = exp(-2 * z); return (1.0 - exp_n2z) / (1.0 + exp_n2z); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_sigmoid_with_output(scalar_t const &a) { return a * (1.0 - a); } template <typename scalar_t> __device__ __forceinline__ scalar_t d_tanh_with_output(scalar_t const &a) { return 1.0 - (a * a); } template <typename scalar_t> __global__ void forward_part_0( const scalar_t* __restrict__ hidden, const scalar_t* __restrict__ cell, scalar_t* __restrict__ hiddens_storage, scalar_t* __restrict__ cells_storage, scalar_t* __restrict__ current_gate, const scalar_t* __restrict__ mean_fig, const scalar_t* __restrict__ var_fig, const scalar_t epsilon, scalar_t* __restrict__ stds_storage, scalar_t* __restrict__ normalized_storage, const scalar_t* __restrict__ gamma_fig, const scalar_t* __restrict__ bias_fig, scalar_t* __restrict__ activated_storage, scalar_t* __restrict__ forgotten_cell, const scalar_t* __restrict__ dropout_candidate_cell, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int process_idx = blockIdx.z; if (process_idx < 4) { if (process_idx < 3) //Normalizations and stuff { const int mean_var_std_idx = batch * 3 + process_idx; const scalar_t std = sqrt(var_fig[mean_var_std_idx] + epsilon); if (column == 0) { stds_storage[mean_var_std_idx] = std; } const int gate_val_storage_idx = batch * state_size_3 + process_idx * state_size + column; const int gate_val_local_idx = gate_val_storage_idx + batch * state_size; scalar_t gate_val = (current_gate[gate_val_local_idx] - mean_fig[mean_var_std_idx]) / std; normalized_storage[gate_val_storage_idx] = gate_val; const int gamma_bias_idx = process_idx * state_size + column; if (process_idx < 2) //forget gate & input gate { gate_val = sigmoid((gate_val * gamma_fig[gamma_bias_idx]) + bias_fig[gamma_bias_idx]); if (process_idx == 0) //forget cell memory { const int local_state_idx = batch * state_size + column; forgotten_cell[local_state_idx] = gate_val * cell[local_state_idx]; } activated_storage[gate_val_storage_idx] = gate_val; current_gate[gate_val_local_idx] = gate_val; } else //candidate cell { gate_val = tanh((gate_val * gamma_fig[gamma_bias_idx]) + bias_fig[gamma_bias_idx]); activated_storage[gate_val_storage_idx] = gate_val; current_gate[gate_val_local_idx] = gate_val * dropout_candidate_cell[batch * state_size + column]; } } else //Hidden, Cell Storage { const int local_state_idx = batch * state_size + column; hiddens_storage[local_state_idx] = hidden[local_state_idx]; cells_storage[local_state_idx] = cell[local_state_idx]; } } } } } template <typename scalar_t> __global__ void forward_part_1( const scalar_t* __restrict__ forgotten_cell, const scalar_t* __restrict__ current_gate, scalar_t* __restrict__ cell, const int64_t batch_size, const int64_t state_size, const int64_t gate_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; const int local_input_gate_idx = batch * gate_size + state_size + column; cell[local_state_idx] = forgotten_cell[local_state_idx] + current_gate[local_input_gate_idx] * current_gate[local_input_gate_idx + state_size]; } } } template <typename scalar_t> __global__ void forward_part_2( scalar_t* __restrict__ cell, const scalar_t* __restrict__ mean, const scalar_t* __restrict__ var, const scalar_t epsilon, scalar_t* __restrict__ new_cell_stds_storage, scalar_t* __restrict__ new_cell_normalized_storage, const scalar_t* __restrict__ gamma_new_cell, const scalar_t* __restrict__ beta_new_cell, scalar_t* __restrict__ hc, scalar_t* __restrict__ current_gate, const scalar_t* __restrict__ weight_co, const int64_t batch_size, const int64_t state_size, const int64_t state_size_2) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int cell_idx = batch * state_size + column; const scalar_t std = sqrt(var[batch] + epsilon); if (column == 0) { new_cell_stds_storage[batch] = std; } scalar_t cell_val = (cell[cell_idx] - mean[batch]) / std; new_cell_normalized_storage[cell_idx] = cell_val; cell_val = (cell_val * gamma_new_cell[column]) + beta_new_cell[column]; cell[cell_idx] = cell_val; const int hc_idx = cell_idx + (batch + 1) * state_size; hc[hc_idx] = cell_val; current_gate[hc_idx + (batch + 1) * state_size_2] += cell_val * weight_co[column]; } } } template <typename scalar_t> __global__ void forward_part_3( const scalar_t* __restrict__ current_gate, const scalar_t* __restrict__ mean, const scalar_t* __restrict__ var, const scalar_t epsilon, scalar_t* __restrict__ output_gate_stds_storage, scalar_t* __restrict__ output_gate_normalized_storage, const scalar_t* __restrict__ gamma_o, const scalar_t* __restrict__ bias_o, scalar_t* __restrict__ output_gate_activated_storage, const scalar_t* __restrict__ cell, scalar_t* __restrict__ tanh_new_cell_storage, scalar_t* __restrict__ hidden, scalar_t* __restrict__ hc, scalar_t* __restrict__ outputs, const scalar_t* __restrict__ dropout_output, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int state_and_output_gate_storage_idx = batch * state_size + column; const int hc_idx = state_and_output_gate_storage_idx + batch * state_size; const int output_gate_idx = state_and_output_gate_storage_idx + (batch + 1) * state_size_3; const scalar_t std = sqrt(var[batch] + epsilon); if (column == 0) { output_gate_stds_storage[batch] = std; } scalar_t output_gate_val = (current_gate[output_gate_idx] - mean[batch]) / std; output_gate_normalized_storage[state_and_output_gate_storage_idx] = output_gate_val; output_gate_val = sigmoid((output_gate_val * gamma_o[column]) + bias_o[column]); output_gate_activated_storage[state_and_output_gate_storage_idx] = output_gate_val; const scalar_t tanh_cell = tanh(cell[state_and_output_gate_storage_idx]); tanh_new_cell_storage[state_and_output_gate_storage_idx] = tanh_cell; const scalar_t hidden_val = output_gate_val * tanh_cell; hidden[state_and_output_gate_storage_idx] = hidden_val; hc[hc_idx] = hidden_val; outputs[state_and_output_gate_storage_idx] = hidden_val * dropout_output[state_and_output_gate_storage_idx]; } } } std::vector<at::Tensor> ln_peephole_lstm_layer_cuda_forward( at::Tensor const &input, at::Tensor const &weight_ih, at::Tensor const &weight_hh, at::Tensor const &weight_ch, at::Tensor const &bias, at::Tensor const &gamma_f, at::Tensor const &gamma_i, at::Tensor const &gamma_g, at::Tensor const &gamma_o, at::Tensor const &gamma_new_cell, at::Tensor const &beta_new_cell, at::Tensor &hidden, at::Tensor &cell, double const &epsilon, double const &dropout_p, bool const &dropout_on_output, bool const &training, int64_t const &sequence_length, int64_t const &batch_size, int64_t const &input_size, int64_t const &state_size, int64_t const &state_size_2, int64_t const &state_size_3, int64_t const &gate_size) { const auto options = weight_ih.options(); auto hiddens = at::empty({ sequence_length, batch_size, state_size }, options); auto cells = at::empty({ sequence_length + 1, batch_size, state_size }, options); auto gates_fig_stds = at::empty({ sequence_length, batch_size, 3, 1 }, options); auto gates_fig_normalized = at::empty({ sequence_length, batch_size, 3, state_size }, options); auto gates_fig = at::empty({ sequence_length, batch_size, 3, state_size }, options); auto gates_o_stds = at::empty({ sequence_length, batch_size, 1 }, options); auto gates_o_normalized = at::empty({ sequence_length, batch_size, state_size }, options); auto gates_o = at::empty({ sequence_length, batch_size, state_size }, options); auto new_cells_stds = at::empty({ sequence_length, batch_size, 1 }, options); auto new_cells_normalized = at::empty({ sequence_length, batch_size, state_size }, options); auto tanh_new_cells = at::empty({ sequence_length, batch_size, state_size }, options); auto outputs = at::empty({ sequence_length, batch_size, state_size }, options); at::Tensor dropout; if (dropout_p <= 0. || !training) { dropout = at::ones({ 2, sequence_length, batch_size, state_size }, options); } else { if (dropout_p >= 1.) { dropout = at::zeros({ 2, sequence_length, batch_size, state_size }, options); } else { dropout = at::bernoulli(at::zeros({ 2, sequence_length, batch_size, state_size }, options), (1 - dropout_p)).div(1 - dropout_p); } if (!dropout_on_output) { dropout[1] = 1; } } const auto dropout_candidate_cell = dropout[0]; const auto dropout_output = dropout[1]; const auto ih = at::matmul(input, weight_ih.t()); hidden = hidden.clone(); cell = cell.clone(); auto hc = at::cat({ hidden, cell }, 1); const auto weight_hc_h = at::cat({ weight_hh.t(), at::cat({ weight_ch.slice(0, 0, state_size).diag(), weight_ch.slice(0, state_size, state_size_2).diag(), at::zeros({ state_size_2, state_size }, options) }).t() }); const auto weight_co = weight_ch.slice(0, state_size_2); const auto gamma_fig = at::stack({ gamma_f, gamma_i, gamma_g }); const auto bias_fig = bias.slice(0, 0, state_size_3).view({ 3, state_size }); const auto bias_o = bias.slice(0, state_size_3); at::Tensor current_gate; auto forgotten_cell = at::empty_like(cell); at::Tensor mean; at::Tensor var; const dim3 threads(32, 8); const dim3 blocks_0((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y, 4); const dim3 blocks_1((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y); AT_DISPATCH_FLOATING_TYPES(ih.type(), "ln_peephole_lstm_layer_cuda_forward", ([&] { for (int i = 0; i < sequence_length; i++) { current_gate = at::addmm(ih[i], hc, weight_hc_h).view({ batch_size, 4, state_size }); mean = current_gate.slice(1, 0, 3).mean(/*dim=*/2, /*keepdim=*/false); var = current_gate.slice(1, 0, 3).var(/*dim=*/2, /*unbiased=*/false, /*keepdim=*/false); forward_part_0<scalar_t> <<<blocks_0, threads>>> ( hidden.data<scalar_t>(), cell.data<scalar_t>(), hiddens[i].data<scalar_t>(), cells[i].data<scalar_t>(), current_gate.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), epsilon, gates_fig_stds[i].data<scalar_t>(), gates_fig_normalized[i].data<scalar_t>(), gamma_fig.data<scalar_t>(), bias_fig.data<scalar_t>(), gates_fig[i].data<scalar_t>(), forgotten_cell.data<scalar_t>(), dropout_candidate_cell[i].data<scalar_t>(), batch_size, state_size, state_size_3); forward_part_1<scalar_t> <<<blocks_1, threads>>> ( forgotten_cell.data<scalar_t>(), current_gate.data<scalar_t>(), cell.data<scalar_t>(), batch_size, state_size, gate_size); mean = cell.mean(/*dim=*/1, /*keepdim=*/false); var = cell.var(/*dim=*/1, /*unbiased=*/false, /*keepdim=*/false); forward_part_2<scalar_t> <<<blocks_1, threads>>> ( cell.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), epsilon, new_cells_stds[i].data<scalar_t>(), new_cells_normalized[i].data<scalar_t>(), gamma_new_cell.data<scalar_t>(), beta_new_cell.data<scalar_t>(), hc.data<scalar_t>(), current_gate.data<scalar_t>(), weight_co.data<scalar_t>(), batch_size, state_size, state_size_2); mean = current_gate.select(1, 3).mean(/*dim=*/1, /*keepdim=*/false); var = current_gate.select(1, 3).var(/*dim=*/1, /*unbiased=*/false, /*keepdim=*/false); forward_part_3<scalar_t> <<<blocks_1, threads>>> ( current_gate.data<scalar_t>(), mean.data<scalar_t>(), var.data<scalar_t>(), epsilon, gates_o_stds[i].data<scalar_t>(), gates_o_normalized[i].data<scalar_t>(), gamma_o.data<scalar_t>(), bias_o.data<scalar_t>(), gates_o[i].data<scalar_t>(), cell.data<scalar_t>(), tanh_new_cells[i].data<scalar_t>(), hidden.data<scalar_t>(), hc.data<scalar_t>(), outputs[i].data<scalar_t>(), dropout_output[i].data<scalar_t>(), batch_size, state_size, state_size_3); } })); cells[sequence_length] = cell; return { outputs, hc.slice(1, 0, state_size).contiguous(), hc.slice(1, state_size).contiguous(), input, hiddens, cells, gates_fig, gates_fig_normalized, gates_fig_stds, gates_o, gates_o_normalized, gates_o_stds, new_cells_normalized, new_cells_stds, tanh_new_cells, dropout }; } //////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename scalar_t> __global__ void backward_preparation( scalar_t* __restrict__ grad_output, const scalar_t* __restrict__ dropout_output, const scalar_t* __restrict__ dropout_candidate_cell, const scalar_t* __restrict__ cells, const scalar_t* __restrict__ gates_fig, const scalar_t* __restrict__ gates_o, scalar_t* __restrict__ grad_gates_layer_normalized, scalar_t* __restrict__ gates_fig_stds, scalar_t* __restrict__ gates_o_stds, scalar_t* __restrict__ new_cells_stds, const scalar_t* __restrict__ tanh_new_cells, scalar_t* __restrict__ grad_new_cells_wrt_tanh_new_cell, const int64_t n_total_batches, const int64_t state_size, const int64_t state_size_2, const int64_t state_size_3, const int64_t gate_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < n_total_batches) { const int process_idx = blockIdx.z; if (process_idx < 6) { if (process_idx == 0) { grad_gates_layer_normalized[batch * gate_size + column] = cells[batch * state_size + column] * d_sigmoid_with_output(gates_fig[batch * state_size_3 + column]); if (column == 0) { gates_fig_stds[batch * 3] *= state_size; } } else{if (process_idx == 1) { const int dropout_idx = batch * state_size + column; const int candidate_cell_idx = dropout_idx + (batch + 1) * state_size_2; const int input_gate_idx = candidate_cell_idx - state_size; const int store_idx = input_gate_idx + batch * state_size; grad_gates_layer_normalized[store_idx] = gates_fig[candidate_cell_idx] * d_sigmoid_with_output(gates_fig[input_gate_idx]) * dropout_candidate_cell[dropout_idx]; if (column == 0) { gates_fig_stds[batch * 3 + 1] *= state_size; } } else{if (process_idx == 2) { const int dropout_idx = batch * state_size + column; const int candidate_cell_idx = dropout_idx + (batch + 1) * state_size_2; const int input_gate_idx = candidate_cell_idx - state_size; const int store_idx = candidate_cell_idx + batch * state_size; grad_gates_layer_normalized[store_idx] = gates_fig[input_gate_idx] * d_tanh_with_output(gates_fig[candidate_cell_idx]) * dropout_candidate_cell[dropout_idx]; if (column == 0) { gates_fig_stds[batch * 3 + 2] *= state_size; } } else{if (process_idx == 3) { const int tanh_and_output_idx = batch * state_size + column; grad_gates_layer_normalized[batch * gate_size + column + state_size_3] = tanh_new_cells[tanh_and_output_idx] * d_sigmoid_with_output(gates_o[tanh_and_output_idx]); if (column == 0) { gates_o_stds[batch] *= state_size; } } else{if (process_idx == 4) { const int index = batch * state_size + column; grad_output[index] *= dropout_output[index]; if (column == 0) { new_cells_stds[batch] *= state_size; } } else{if (process_idx == 5) { const int index = batch * state_size + column; grad_new_cells_wrt_tanh_new_cell[index] = d_tanh_with_output(tanh_new_cells[index]) * gates_o[index]; }}}}}} } } } } template <typename scalar_t> __global__ void backward_loop_part_0( const scalar_t* __restrict__ grad_hidden, scalar_t* __restrict__ grad_new_cell_wrt_tanh_new_cell, const scalar_t* __restrict__ grad_output, scalar_t* __restrict__ grad_gate_layer_normalized, const scalar_t* __restrict__ gamma_o, scalar_t* __restrict__ grad_output_gate_normalized, const scalar_t* __restrict__ output_gate_normalized, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; const int gate_idx = local_state_idx + (batch + 1) * state_size_3; scalar_t grad_val = grad_hidden[local_state_idx] + grad_output[local_state_idx]; grad_new_cell_wrt_tanh_new_cell[local_state_idx] *= grad_val; grad_val *= grad_gate_layer_normalized[gate_idx]; grad_gate_layer_normalized[gate_idx] = grad_val; grad_val *= gamma_o[column]; grad_output_gate_normalized[local_state_idx] = grad_val; grad_output_gate_normalized[batch_size * state_size + local_state_idx] = grad_val * output_gate_normalized[local_state_idx]; } } } template <typename scalar_t> __global__ void backward_loop_part_1( const scalar_t* __restrict__ grad_output_gate_normalized, const scalar_t* __restrict__ grad_output_gate_normalized_sums, const scalar_t* __restrict__ output_gate_normalized, const scalar_t* __restrict__ output_gate_std, scalar_t* __restrict__ grad_gate_raw, const scalar_t* __restrict__ weight_co, const scalar_t* __restrict__ grad_new_cell_wrt_tanh_new_cell, scalar_t* __restrict__ grad_cell, scalar_t* __restrict__ grad_new_cell, scalar_t* __restrict__ grad_new_cell_normalized, const scalar_t* __restrict__ new_cell_normalized, const scalar_t* __restrict__ gamma_new_cell, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; const int gate_idx = local_state_idx + (batch + 1) * state_size_3; scalar_t grad_val = (state_size * grad_output_gate_normalized[local_state_idx] - grad_output_gate_normalized_sums[batch] - output_gate_normalized[local_state_idx] * grad_output_gate_normalized_sums[batch_size + batch]) / output_gate_std[batch]; grad_gate_raw[gate_idx] = grad_val; grad_val = grad_val * weight_co[column] + grad_new_cell_wrt_tanh_new_cell[local_state_idx] + grad_cell[local_state_idx]; grad_new_cell[local_state_idx] = grad_val; grad_val *= gamma_new_cell[column]; grad_cell[local_state_idx] = grad_val; grad_new_cell_normalized[local_state_idx] = grad_val; grad_new_cell_normalized[batch_size * state_size + local_state_idx] = grad_val * new_cell_normalized[local_state_idx]; } } } template <typename scalar_t> __global__ void backward_loop_part_2( scalar_t* __restrict__ grad_cell, const scalar_t* __restrict__ grad_cell_sums, const scalar_t* __restrict__ new_cell_normalized, const scalar_t* __restrict__ new_cell_std, const int64_t batch_size, const int64_t state_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int local_state_idx = batch * state_size + column; grad_cell[local_state_idx] = (state_size * grad_cell[local_state_idx] - grad_cell_sums[batch] - new_cell_normalized[local_state_idx] * grad_cell_sums[batch_size + batch]) / new_cell_std[batch]; } } } template <typename scalar_t> __global__ void backward_loop_part_3( const scalar_t* __restrict__ grad_cell, scalar_t* __restrict__ grad_gate_layer_normalized, const scalar_t* __restrict__ gamma_f, const scalar_t* __restrict__ gamma_i, const scalar_t* __restrict__ gamma_g, scalar_t* __restrict__ grad_fig_gate_normalized, const scalar_t* __restrict__ fig_gate_normalized, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int process_idx = blockIdx.z; if (process_idx < 3) { const int local_state_idx = batch * state_size + column; const int fig_idx = batch * state_size_3 + process_idx * state_size + column; const int gate_idx = fig_idx + batch * state_size; scalar_t grad_val = grad_cell[local_state_idx] * grad_gate_layer_normalized[gate_idx]; grad_gate_layer_normalized[gate_idx] = grad_val; if (process_idx == 0) { grad_val *= gamma_f[column]; } else{if (process_idx == 1) { grad_val *= gamma_i[column]; } else { grad_val *= gamma_g[column]; }} grad_fig_gate_normalized[fig_idx] = grad_val; grad_fig_gate_normalized[batch_size * state_size_3 + fig_idx] = grad_val * fig_gate_normalized[fig_idx]; } } } } template <typename scalar_t> __global__ void backward_loop_part_4( const scalar_t* __restrict__ grad_fig_gate_normalized, const scalar_t* __restrict__ grad_fig_gate_normalized_sums, const scalar_t* __restrict__ gate_fig_normalized, const scalar_t* __restrict__ gate_fig_std, scalar_t* __restrict__ grad_gate_raw, const int64_t batch_size, const int64_t state_size, const int64_t state_size_3) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int process_idx = blockIdx.z; if (process_idx < 3) { const int fig_idx = batch * state_size_3 + process_idx * state_size + column; const int reduced_fig_idx = batch * 3 + process_idx; scalar_t grad_val = (state_size * grad_fig_gate_normalized[fig_idx] - grad_fig_gate_normalized_sums[reduced_fig_idx] - gate_fig_normalized[fig_idx] * grad_fig_gate_normalized_sums[batch_size * 3 + reduced_fig_idx]) / gate_fig_std[reduced_fig_idx]; grad_gate_raw[fig_idx + batch * state_size] = grad_val; } } } } template <typename scalar_t> __global__ void backward_loop_part_5( const scalar_t* __restrict__ grad_hci, const scalar_t* __restrict__ forget_gate, scalar_t* __restrict__ grad_hidden, scalar_t* __restrict__ grad_cell, scalar_t* __restrict__ grad_input, const int64_t batch_size, const int64_t input_size, const int64_t state_size, const int64_t state_size_2, const int64_t X_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < X_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < batch_size) { const int grad_idx = batch * X_size + column; if (column < state_size) { grad_hidden[batch * state_size + column] = grad_hci[grad_idx]; } else{if (column < state_size_2) { const int local_state_idx = (batch - 1) * state_size + column; grad_cell[local_state_idx] = grad_hci[grad_idx] + grad_cell[local_state_idx] * forget_gate[local_state_idx]; } else { grad_input[batch * input_size + column - state_size_2] = grad_hci[grad_idx]; }} } } } template <typename scalar_t> __global__ void backward_final( scalar_t* __restrict__ sum_to_get_grads, const scalar_t* __restrict__ grad_gates_raw, const scalar_t* __restrict__ cells, const scalar_t* __restrict__ grad_gates_layer_normalized, const scalar_t* __restrict__ gates_fig_normalized, const scalar_t* __restrict__ gates_o_normalized, const scalar_t* __restrict__ grad_new_cells, const scalar_t* __restrict__ new_cells_normalized, const int64_t n_total_batches, const int64_t batch_size, const int64_t state_size, const int64_t state_size_2, const int64_t state_size_3, const int64_t gate_size) { const int column = blockIdx.x * blockDim.x + threadIdx.x; if (column < state_size) { const int batch = blockIdx.y * blockDim.y + threadIdx.y; if (batch < n_total_batches) { const int process_idx = blockIdx.z; if (process_idx < 13) { const int store_idx = batch * (gate_size * 3 + state_size) + process_idx * state_size + column; /*if (process_idx == 0) { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column] * cells[batch * state_size + column]; } else{ if (process_idx == 1) { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size] * cells[batch * state_size + column]; } else{ if (process_idx == 2) { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size_3] * cells[(batch + batch_size) * state_size + column]; } else{ if (process_idx == 3) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column]; } else{ if (process_idx == 4) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size]; } else{ if (process_idx == 5) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_2]; } else{ if (process_idx == 6) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_3]; } else{ if (process_idx == 7) { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column] * gates_fig_normalized[batch * state_size_3 + column]; } else{ if (process_idx == 8) { const int norm_idx = batch * state_size_3 + column + state_size; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; } else{ if (process_idx == 9) { const int norm_idx = batch * state_size_3 + column + state_size_2; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; } else{ if (process_idx == 10) { const int norm_idx = batch * state_size + column; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + (batch + 1) * state_size_3] * gates_o_normalized[norm_idx]; } else{ if (process_idx == 11) { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column] * new_cells_normalized[batch * state_size + column]; } else { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column]; }}}}}}}}}}}}*/ switch (process_idx) { case 0: { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column] * cells[batch * state_size + column]; break; } case 1: { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size] * cells[batch * state_size + column]; break; } case 2: { sum_to_get_grads[store_idx] = grad_gates_raw[batch * gate_size + column + state_size_3] * cells[(batch + batch_size) * state_size + column]; break; } case 3: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column]; break; } case 4: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size]; break; } case 5: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_2]; break; } case 6: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column + state_size_3]; break; } case 7: { sum_to_get_grads[store_idx] = grad_gates_layer_normalized[batch * gate_size + column] * gates_fig_normalized[batch * state_size_3 + column]; break; } case 8: { const int norm_idx = batch * state_size_3 + column + state_size; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; break; } case 9: { const int norm_idx = batch * state_size_3 + column + state_size_2; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + batch * state_size] * gates_fig_normalized[norm_idx]; break; } case 10: { const int norm_idx = batch * state_size + column; sum_to_get_grads[store_idx] = grad_gates_layer_normalized[norm_idx + (batch + 1) * state_size_3] * gates_o_normalized[norm_idx]; break; } case 11: { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column] * new_cells_normalized[batch * state_size + column]; break; } case 12: { sum_to_get_grads[store_idx] = grad_new_cells[batch * state_size + column]; break; } } } } } } std::vector<at::Tensor> ln_peephole_lstm_layer_cuda_backward( at::Tensor &grad_output, at::Tensor &grad_hidden, at::Tensor &grad_cell, at::Tensor const &input, at::Tensor const &hiddens, at::Tensor const &cells, at::Tensor const &gates_fig, at::Tensor const &gates_fig_normalized, at::Tensor &gates_fig_stds, at::Tensor const &gates_o, at::Tensor const &gates_o_normalized, at::Tensor &gates_o_stds, at::Tensor const &new_cells_normalized, at::Tensor &new_cells_stds, at::Tensor &tanh_new_cells, at::Tensor const &dropout, at::Tensor const &weight_ih, at::Tensor const &weight_hh, at::Tensor const &weight_ch, at::Tensor const &gamma_f, at::Tensor const &gamma_i, at::Tensor const &gamma_g, at::Tensor const &gamma_o, at::Tensor const &gamma_new_cell) { grad_output = grad_output.clone(); grad_hidden = grad_hidden.clone(); grad_cell = grad_cell.clone(); const auto sequence_length = input.size(0); const auto batch_size = input.size(1); const auto state_size = hiddens.size(2); const auto state_size_2 = state_size + state_size; const auto state_size_3 = state_size_2 + state_size; const auto gate_size = state_size_3 + state_size; const auto input_size = input.size(2); const auto X_size = input_size + state_size_2; const auto n_total_batches = batch_size * sequence_length; const auto dropout_candidate_cell = dropout[0]; const auto dropout_output = dropout[1]; const auto forget_gates = gates_fig.select(2, 0).clone(); const auto weights = at::cat({ weight_hh, at::cat({ weight_ch.slice(0, 0, state_size).diag(), weight_ch.slice(0, state_size, state_size_2).diag(), at::zeros({ state_size_2, state_size }, weight_ch.options()) }), weight_ih }, 1); const auto weight_co = weight_ch.slice(0, state_size_2); auto grad_input = at::empty_like(input); auto grad_gates_layer_normalized = at::empty({ sequence_length, batch_size, gate_size }, gates_fig.options()); auto grad_gates_raw = at::empty_like(grad_gates_layer_normalized); auto grad_new_cells = at::empty_like(tanh_new_cells); auto grad_new_cells_wrt_tanh_new_cell = at::empty_like(tanh_new_cells); auto grad_output_gate_normalized = at::empty({ batch_size * 2, state_size }, grad_gates_raw.options()); auto grad_new_cell_normalized = at::empty({ batch_size * 2, state_size }, grad_cell.options()); auto grad_fig_gate_normalized = at::empty({ batch_size * 2, 3, state_size }, grad_gates_raw.options()); at::Tensor sum_to_get_grads; const dim3 threads(64, 8); const dim3 blocks_0((state_size + threads.x - 1) / threads.x, (n_total_batches + threads.y - 1) / threads.y, 6); const dim3 blocks_1((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y); const dim3 blocks_2((state_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y, 3); const dim3 blocks_3((X_size + threads.x - 1) / threads.x, (batch_size + threads.y - 1) / threads.y); const dim3 blocks_4((state_size + threads.x - 1) / threads.x, (n_total_batches + threads.y - 1) / threads.y, 13); AT_DISPATCH_FLOATING_TYPES(gates_fig.type(), "ln_peephole_lstm_layer_cuda_backward", ([&] { backward_preparation<scalar_t> <<<blocks_0, threads>>> ( grad_output.data<scalar_t>(), dropout_output.data<scalar_t>(), dropout_candidate_cell.data<scalar_t>(), cells.data<scalar_t>(), gates_fig.data<scalar_t>(), gates_o.data<scalar_t>(), grad_gates_layer_normalized.data<scalar_t>(), gates_fig_stds.data<scalar_t>(), gates_o_stds.data<scalar_t>(), new_cells_stds.data<scalar_t>(), tanh_new_cells.data<scalar_t>(), grad_new_cells_wrt_tanh_new_cell.data<scalar_t>(), n_total_batches, state_size, state_size_2, state_size_3, gate_size); for (int i = sequence_length - 1; i >= 0; i--) { backward_loop_part_0<scalar_t> <<<blocks_1, threads>>> ( grad_hidden.data<scalar_t>(), grad_new_cells_wrt_tanh_new_cell[i].data<scalar_t>(), grad_output[i].data<scalar_t>(), grad_gates_layer_normalized[i].data<scalar_t>(), gamma_o.data<scalar_t>(), grad_output_gate_normalized.data<scalar_t>(), gates_o_normalized[i].data<scalar_t>(), batch_size, state_size, state_size_3); backward_loop_part_1<scalar_t> <<<blocks_1, threads>>> ( grad_output_gate_normalized.data<scalar_t>(), grad_output_gate_normalized.sum(/*dim=*/1, /*keepdim=*/false).data<scalar_t>(), gates_o_normalized[i].data<scalar_t>(), gates_o_stds[i].data<scalar_t>(), grad_gates_raw[i].data<scalar_t>(), weight_co.data<scalar_t>(), grad_new_cells_wrt_tanh_new_cell[i].data<scalar_t>(), grad_cell.data<scalar_t>(), grad_new_cells[i].data<scalar_t>(), grad_new_cell_normalized.data<scalar_t>(), new_cells_normalized[i].data<scalar_t>(), gamma_new_cell.data<scalar_t>(), batch_size, state_size, state_size_3); backward_loop_part_2<scalar_t> <<<blocks_1, threads>>> ( grad_cell.data<scalar_t>(), grad_new_cell_normalized.sum(/*dim=*/1, /*keepdim=*/false).data<scalar_t>(), new_cells_normalized[i].data<scalar_t>(), new_cells_stds[i].data<scalar_t>(), batch_size, state_size); backward_loop_part_3<scalar_t> <<<blocks_2, threads>>> ( grad_cell.data<scalar_t>(), grad_gates_layer_normalized[i].data<scalar_t>(), gamma_f.data<scalar_t>(), gamma_i.data<scalar_t>(), gamma_g.data<scalar_t>(), grad_fig_gate_normalized.data<scalar_t>(), gates_fig_normalized[i].data<scalar_t>(), batch_size, state_size, state_size_3); backward_loop_part_4<scalar_t> <<<blocks_2, threads>>> ( grad_fig_gate_normalized.data<scalar_t>(), grad_fig_gate_normalized.sum(/*dim=*/2, /*keepdim=*/false).data<scalar_t>(), gates_fig_normalized[i].data<scalar_t>(), gates_fig_stds[i].data<scalar_t>(), grad_gates_raw[i].data<scalar_t>(), batch_size, state_size, state_size_3); backward_loop_part_5<scalar_t> <<<blocks_3, threads>>> ( grad_gates_raw[i].mm(weights).data<scalar_t>(), forget_gates[i].data<scalar_t>(), grad_hidden.data<scalar_t>(), grad_cell.data<scalar_t>(), grad_input[i].data<scalar_t>(), batch_size, input_size, state_size, state_size_2, X_size); } sum_to_get_grads = at::empty({ sequence_length * batch_size, gate_size * 3 + state_size }, weights.options()); backward_final<scalar_t> <<<blocks_4, threads>>> ( sum_to_get_grads.data<scalar_t>(), grad_gates_raw.data<scalar_t>(), cells.data<scalar_t>(), grad_gates_layer_normalized.data<scalar_t>(), gates_fig_normalized.data<scalar_t>(), gates_o_normalized.data<scalar_t>(), grad_new_cells.data<scalar_t>(), new_cells_normalized.data<scalar_t>(), n_total_batches, batch_size, state_size, state_size_2, state_size_3, gate_size); })); const auto grad_weight_ih_hh = grad_gates_raw.view({ sequence_length * batch_size, gate_size }).t().mm(at::cat({ input, hiddens }, 2).view({ sequence_length * batch_size, input_size + state_size })); const auto bunch_of_grads = sum_to_get_grads.sum(/*dim=*/0, /*keepdim=*/false); return { grad_input, grad_weight_ih_hh.slice(1, 0, input_size).contiguous(), grad_weight_ih_hh.slice(1, input_size).contiguous(), bunch_of_grads.slice(0, 0, state_size_3), bunch_of_grads.slice(0, state_size_3, gate_size + state_size_3), bunch_of_grads.slice(0, gate_size + state_size_3, gate_size * 2), bunch_of_grads.slice(0, gate_size * 2, gate_size * 2 + state_size), bunch_of_grads.slice(0, gate_size * 2 + state_size, gate_size * 2 + state_size_2), bunch_of_grads.slice(0, gate_size * 2 + state_size_2, gate_size * 2 + state_size_3), bunch_of_grads.slice(0, gate_size * 2 + state_size_3, gate_size * 3), bunch_of_grads.slice(0, gate_size * 3), grad_hidden, grad_cell }; }
474b1ad6fdc283322e6c6a56b91ca4ad6bde7b5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> #define TPBx 16 // Number of threads per block #define TPBy 16 __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ void print_array(int Nx, int Ny, int Nz, float ***a) { int j,k; for (j=0; j<Ny; j++) { for (k=0; k<Nz; k++) { printf("%1.4f\t", a[Nx/2][j][k]); } printf("\n"); } printf("\n"); } __host__ float ***makeArray(int Nx, int Ny, int Nz) { float ***f; f = (float ***) calloc (Nx, sizeof(float **)); f[0] = (float **) calloc (Ny*Nx, sizeof(float *)); f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float)); for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny; for (int i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz; return f; } __host__ void set_geometry(int Nx, int Ny, int Nz, float ***CEx, float ***CEy, float ***CEz) { int i,j,k; for (i=0; i<Nx; i++) { for (j=0; j<Ny; j++) { for (k=0; k<Nz; k++) { CEx[i][j][k] = 0.5; CEy[i][j][k] = 0.5; CEz[i][j][k] = 0.5; } } } } __global__ void initArrays(int Nx, int Ny, int Nzpit, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz) { int idx; idx = blockIdx.x*blockDim.x + threadIdx.x; //printf("gridDim.x=%d\n",gridDim.x); //printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x); if ( idx < Nx*Ny*Nzpit ) { Ex[idx] = 0; Ey[idx] = 0; Ez[idx] = 0; Hx[idx] = 0; Hy[idx] = 0; Hz[idx] = 0; } } __global__ void updateE(int Nx, int Ny, int Nz, int Nzpit, int BPGy, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz) { int tk, tj; int i, j, k; tk = threadIdx.x; tj = threadIdx.y; k = TPBx*blockIdx.x + tk; j = TPBy*(blockIdx.y % BPGy ) + tj; i = blockIdx.y / BPGy; //printf("[%d, %d] [%d, %d, %d]\n", tk, tj, k, j, i); //printf("gridDim.x=%d, gridDim.y=%d\n", gridDim.x, gridDim.y ); printf("blockIdx.x=%d, blockIdx.y=%d\n", blockIdx.x, blockIdx.y ); //printf("BPGy=%d, blockIdx.y=%d, %BPGy=%d, /BPGy=%d\n", BPGy, blockIdx.y, blockIdx.y%BPGy, blockIdx.y/BPGy ); if ( i<Nx && j<Ny && k<Nz ) { int Nyzpit = Ny*Nzpit; int idx = k + Nzpit*j + Nyzpit*i; //printf("idx=%d, [%d, %d] [%d, %d, %d]\n", idx, tk, tj, k, j, i); //printf("idx=%d\n", idx); __shared__ float hx[TPBy+1][TPBx+1], hy[TPBy][TPBx+1], hz[TPBy+1][TPBx]; hx[tj][tk] = Hx[idx]; hy[tj][tk] = Hy[idx]; hz[tj][tk] = Hz[idx]; if ( tk==TPBx-1 && k<Nz-1 ) { hx[tj][tk+1] = Hx[idx+1]; hy[tj][tk+1] = Hy[idx+1]; } if ( tj==TPBy-1 && j<Ny-1 ) { hx[tj+1][tk] = Hx[idx+Nzpit]; hz[tj+1][tk] = Hz[idx+Nzpit]; } __syncthreads(); if ( k < Nz ) { if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( hz[tj+1][tk] - hz[tj][tk] - hy[tj][tk+1] + hy[tj][tk] ); if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[tj][tk+1] - hx[tj][tk] - Hz[idx+Nyzpit] + hz[tj][tk] ); if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyzpit] - hy[tj][tk] - hx[tj+1][tk] + hx[tj][tk] ); } } } __global__ void updateSrc(int Nx, int Ny, int Nz, int Nzpit, float *Ex, int tstep) { int idx, ijk; idx = blockIdx.x*blockDim.x + threadIdx.x; ijk = idx*(Ny)*(Nzpit) + (Ny/2)*(Nzpit) + (Nz/2); //printf("idx=%d, ijk=%d\n", idx, ijk); //Ex[ijk] += __sinf(0.1*tstep); if ( idx < Nx ) { Ex[ijk] += sin(0.1*tstep); } } __global__ void updateH(int Nx, int Ny, int Nz, int Nzpit, int BPGy, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz) { int tk, tj; int i, j, k; tk = threadIdx.x; tj = threadIdx.y; k = TPBx*blockIdx.x + tk; j = TPBy*(blockIdx.y % BPGy ) + tj; i = blockIdx.y / BPGy; if ( i<Nx && j<Ny && k<Nz ) { int Nyzpit = Ny*Nzpit; int idx = k + Nzpit*j + Nyzpit*k; __shared__ float ex[TPBy+1][TPBx+1], ey[TPBy][TPBx+1], ez[TPBy+1][TPBx]; ex[tj+1][tk+1] = Ex[idx]; ey[tj][tk+1] = Ey[idx]; ez[tj+1][tk] = Ez[idx]; if ( tk==0 && k>0 ) { ex[tj][0] = Ex[idx-1]; ey[tj][0] = Ey[idx-1]; } if ( tj==0 && j>0 ) { ex[0][tk] = Ex[idx-Nzpit]; ez[0][tk] = Ez[idx-Nzpit]; } __syncthreads(); if ( k < Nz ) { if ( j>0 && k>0 ) Hx[idx] -= 0.5*( ez[tj+1][tk] - ez[tj][tk] - ey[tj][tk+1] + ey[tj][tk] ); if ( i>0 && k>0 ) Hy[idx] -= 0.5*( ex[tj+1][tk+1] - ex[tj+1][tk] - ez[tj+1][tk] + Ez[idx-Nyzpit] ); if ( i>0 && j>0 ) Hz[idx] -= 0.5*( ey[tj][tk+1] - Ey[idx-Nyzpit] - ex[tj+1][tk+1] + ex[tj][tk+1] ); } } } int main() { int tstep; char time_str[32]; time_t t0; // Set the parameters int Nx, Ny, Nz, TMAX; Nx = 100; Ny = 200; Nz = 500; //Ny = 16; //Nz = 20; TMAX = 100; // Allocate host memory float ***Ex; float ***CEx, ***CEy, ***CEz; Ex = makeArray(Nx, Ny, Nz); CEx = makeArray(Nx, Ny, Nz); CEy = makeArray(Nx, Ny, Nz); CEz = makeArray(Nx, Ny, Nz); // Geometry set_geometry(Nx, Ny, Nz, CEx, CEy, CEz); // Allocate device memory float *devEx, *devEy, *devEz; float *devHx, *devHy, *devHz; float *devCEx, *devCEy, *devCEz; int z_size = Nz*sizeof(float); size_t pitch; hipMallocPitch ( (void**) &devEx, &pitch, z_size, Nx*Ny ); hipMallocPitch ( (void**) &devEy, &pitch, z_size, Nx*Ny ); hipMallocPitch ( (void**) &devEz, &pitch, z_size, Nx*Ny ); hipMallocPitch ( (void**) &devCEx, &pitch, z_size, Nx*Ny ); hipMallocPitch ( (void**) &devCEy, &pitch, z_size, Nx*Ny ); CUDAmallocPitch ( (void**) &devCEz, &pitch, z_size, Nx*Ny ); hipMallocPitch ( (void**) &devHx, &pitch, z_size, Nx*Ny ); hipMallocPitch ( (void**) &devHy, &pitch, z_size, Nx*Ny ); hipMallocPitch ( (void**) &devHz, &pitch, z_size, Nx*Ny ); // Copy arrays from host to device hipMemcpy2D ( devCEx, pitch, CEx[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice ); hipMemcpy2D ( devCEy, pitch, CEy[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice ); hipMemcpy2D ( devCEz, pitch, CEz[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice ); int Nz_pitch = pitch/4; printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch); // Number of thread blocks in the grid // Number of threads per block int BPGx = Nz_pitch/TPBx; int BPGy = Ny/TPBy == 0 ? Ny/TPBy : Ny/TPBy + 1; int BPGz = Nx; //dim3 Dg(BPGx, BPGy*BPGz); dim3 Dg = dim3(BPGx, BPGy*BPGz); dim3 Db = dim3(TPBx, TPBy); //printf("TPBx=%d, TPBy=%d, TPBz=%d\n", TPBx, TPBy, TPBz); printf("TPBx=%d, TPBy=%d\n", TPBx, TPBy); printf("BPGx=%d, BPGy=%d, BPGz=%d\n", BPGx, BPGy, BPGz); printf("Dg(%d,%d)\n", BPGx, BPGy*BPGz); printf("Db(%d,%d)\n", TPBx, TPBy); printf("Treads per block: %d (%d,%d,%d)\n", TPBx*TPBy*1, TPBx, TPBy, 1); if ( TPBx*TPBy > 512 ) { printf("Error: An excessive number of threads per block.\n%d (%d,%d,%d)\n", TPBx*TPBy, TPBx, TPBy, 1); exit(0); } printf("Blocks per grid: %d (%d,%d,%d)\n", BPGx*BPGy*BPGz, BPGx, BPGy, BPGz); if ( BPGx*BPGy*BPGz > 65535 ) { printf("Error: An excessive number of blocks per grid.\n%d (%d,%d,%d)\n", BPGx*BPGy*BPGz, BPGx, BPGy, BPGz); exit(0); } int TPBsrc = Nx; int BPGsrc = 1; dim3 Dgsrc(BPGsrc); dim3 Dbsrc(TPBsrc); int N = Nx*Ny*Nz_pitch; int TPBinit = Nz_pitch; int BPGinit = N%TPBinit == 0 ? N/TPBinit : N/TPBinit + 1; dim3 Dginit(BPGinit); dim3 Dbinit(TPBinit); // Initialize the device arrays //initArrays <<<Dginit,Dbinit>>> ( Nx, Ny, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz ); // Main time loop t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { //for ( tstep=1; tstep<=10; tstep++) { // Update on the GPU hipLaunchKernelGGL(( updateE) , dim3(Dg),dim3(Db), 0, 0, Nx, Ny, Nz, Nz_pitch, BPGy, devEx, devEy, devEz, devHx, devHy, devHz, devCEx, devCEy, devCEz ); //updateSrc <<<Dgsrc,Dbsrc>>> ( Nx, Ny, Nz, Nz_pitch, devEx, tstep ); //updateH <<<Dg,Db>>> ( Nx, Ny, Nz, Nz_pitch, BPGy, devEx, devEy, devEz, devHx, devHy, devHz ); //if ( tstep/10*10 == tstep ) { // Copy arrays from device to host hipMemcpy2D( Ex[0][0], z_size, devEx, pitch, z_size, Nx*Ny, hipMemcpyDeviceToHost ); //print_array(Nx, Ny, Nz, Ex); //dumpToH5(Nx, Ny, Nz, Nx/2, 0, 0, Nx/2, Ny-1, Nz-1, Ex, "gpu_png/Ex-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); //} } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); }
474b1ad6fdc283322e6c6a56b91ca4ad6bde7b5e.cu
#include <stdlib.h> #include <stdio.h> #include <stdarg.h> #include <math.h> #include <hdf5.h> #define TPBx 16 // Number of threads per block #define TPBy 16 __host__ void updateTimer(time_t t0, int tstep, char str[]) { int elapsedTime=(int)(time(0)-t0); sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60); } __host__ void exec(char *format, ...) { char str[1024]; va_list ap; va_start(ap, format); vsprintf(str, format, ap); system(str); } __host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) { char filename[1024]; va_list ap; va_start(ap, format); vsprintf(filename, format, ap); hid_t file, dataset, filespace, memspace; hsize_t dimsm[3] = { Ni, Nj, Nk }; hsize_t start[3] = { is, js, ks }; hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke }; memspace = H5Screate_simple(3, dimsm, 0); filespace = H5Screate_simple(3, count, 0); file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT); H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0); H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]); H5Dclose(dataset); H5Sclose(filespace); H5Sclose(memspace); H5Fclose(file); } __host__ void print_array(int Nx, int Ny, int Nz, float ***a) { int j,k; for (j=0; j<Ny; j++) { for (k=0; k<Nz; k++) { printf("%1.4f\t", a[Nx/2][j][k]); } printf("\n"); } printf("\n"); } __host__ float ***makeArray(int Nx, int Ny, int Nz) { float ***f; f = (float ***) calloc (Nx, sizeof(float **)); f[0] = (float **) calloc (Ny*Nx, sizeof(float *)); f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float)); for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny; for (int i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz; return f; } __host__ void set_geometry(int Nx, int Ny, int Nz, float ***CEx, float ***CEy, float ***CEz) { int i,j,k; for (i=0; i<Nx; i++) { for (j=0; j<Ny; j++) { for (k=0; k<Nz; k++) { CEx[i][j][k] = 0.5; CEy[i][j][k] = 0.5; CEz[i][j][k] = 0.5; } } } } __global__ void initArrays(int Nx, int Ny, int Nzpit, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz) { int idx; idx = blockIdx.x*blockDim.x + threadIdx.x; //printf("gridDim.x=%d\n",gridDim.x); //printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x); if ( idx < Nx*Ny*Nzpit ) { Ex[idx] = 0; Ey[idx] = 0; Ez[idx] = 0; Hx[idx] = 0; Hy[idx] = 0; Hz[idx] = 0; } } __global__ void updateE(int Nx, int Ny, int Nz, int Nzpit, int BPGy, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz) { int tk, tj; int i, j, k; tk = threadIdx.x; tj = threadIdx.y; k = TPBx*blockIdx.x + tk; j = TPBy*(blockIdx.y % BPGy ) + tj; i = blockIdx.y / BPGy; //printf("[%d, %d] [%d, %d, %d]\n", tk, tj, k, j, i); //printf("gridDim.x=%d, gridDim.y=%d\n", gridDim.x, gridDim.y ); printf("blockIdx.x=%d, blockIdx.y=%d\n", blockIdx.x, blockIdx.y ); //printf("BPGy=%d, blockIdx.y=%d, %BPGy=%d, /BPGy=%d\n", BPGy, blockIdx.y, blockIdx.y%BPGy, blockIdx.y/BPGy ); if ( i<Nx && j<Ny && k<Nz ) { int Nyzpit = Ny*Nzpit; int idx = k + Nzpit*j + Nyzpit*i; //printf("idx=%d, [%d, %d] [%d, %d, %d]\n", idx, tk, tj, k, j, i); //printf("idx=%d\n", idx); __shared__ float hx[TPBy+1][TPBx+1], hy[TPBy][TPBx+1], hz[TPBy+1][TPBx]; hx[tj][tk] = Hx[idx]; hy[tj][tk] = Hy[idx]; hz[tj][tk] = Hz[idx]; if ( tk==TPBx-1 && k<Nz-1 ) { hx[tj][tk+1] = Hx[idx+1]; hy[tj][tk+1] = Hy[idx+1]; } if ( tj==TPBy-1 && j<Ny-1 ) { hx[tj+1][tk] = Hx[idx+Nzpit]; hz[tj+1][tk] = Hz[idx+Nzpit]; } __syncthreads(); if ( k < Nz ) { if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( hz[tj+1][tk] - hz[tj][tk] - hy[tj][tk+1] + hy[tj][tk] ); if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[tj][tk+1] - hx[tj][tk] - Hz[idx+Nyzpit] + hz[tj][tk] ); if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyzpit] - hy[tj][tk] - hx[tj+1][tk] + hx[tj][tk] ); } } } __global__ void updateSrc(int Nx, int Ny, int Nz, int Nzpit, float *Ex, int tstep) { int idx, ijk; idx = blockIdx.x*blockDim.x + threadIdx.x; ijk = idx*(Ny)*(Nzpit) + (Ny/2)*(Nzpit) + (Nz/2); //printf("idx=%d, ijk=%d\n", idx, ijk); //Ex[ijk] += __sinf(0.1*tstep); if ( idx < Nx ) { Ex[ijk] += sin(0.1*tstep); } } __global__ void updateH(int Nx, int Ny, int Nz, int Nzpit, int BPGy, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz) { int tk, tj; int i, j, k; tk = threadIdx.x; tj = threadIdx.y; k = TPBx*blockIdx.x + tk; j = TPBy*(blockIdx.y % BPGy ) + tj; i = blockIdx.y / BPGy; if ( i<Nx && j<Ny && k<Nz ) { int Nyzpit = Ny*Nzpit; int idx = k + Nzpit*j + Nyzpit*k; __shared__ float ex[TPBy+1][TPBx+1], ey[TPBy][TPBx+1], ez[TPBy+1][TPBx]; ex[tj+1][tk+1] = Ex[idx]; ey[tj][tk+1] = Ey[idx]; ez[tj+1][tk] = Ez[idx]; if ( tk==0 && k>0 ) { ex[tj][0] = Ex[idx-1]; ey[tj][0] = Ey[idx-1]; } if ( tj==0 && j>0 ) { ex[0][tk] = Ex[idx-Nzpit]; ez[0][tk] = Ez[idx-Nzpit]; } __syncthreads(); if ( k < Nz ) { if ( j>0 && k>0 ) Hx[idx] -= 0.5*( ez[tj+1][tk] - ez[tj][tk] - ey[tj][tk+1] + ey[tj][tk] ); if ( i>0 && k>0 ) Hy[idx] -= 0.5*( ex[tj+1][tk+1] - ex[tj+1][tk] - ez[tj+1][tk] + Ez[idx-Nyzpit] ); if ( i>0 && j>0 ) Hz[idx] -= 0.5*( ey[tj][tk+1] - Ey[idx-Nyzpit] - ex[tj+1][tk+1] + ex[tj][tk+1] ); } } } int main() { int tstep; char time_str[32]; time_t t0; // Set the parameters int Nx, Ny, Nz, TMAX; Nx = 100; Ny = 200; Nz = 500; //Ny = 16; //Nz = 20; TMAX = 100; // Allocate host memory float ***Ex; float ***CEx, ***CEy, ***CEz; Ex = makeArray(Nx, Ny, Nz); CEx = makeArray(Nx, Ny, Nz); CEy = makeArray(Nx, Ny, Nz); CEz = makeArray(Nx, Ny, Nz); // Geometry set_geometry(Nx, Ny, Nz, CEx, CEy, CEz); // Allocate device memory float *devEx, *devEy, *devEz; float *devHx, *devHy, *devHz; float *devCEx, *devCEy, *devCEz; int z_size = Nz*sizeof(float); size_t pitch; cudaMallocPitch ( (void**) &devEx, &pitch, z_size, Nx*Ny ); cudaMallocPitch ( (void**) &devEy, &pitch, z_size, Nx*Ny ); cudaMallocPitch ( (void**) &devEz, &pitch, z_size, Nx*Ny ); cudaMallocPitch ( (void**) &devCEx, &pitch, z_size, Nx*Ny ); cudaMallocPitch ( (void**) &devCEy, &pitch, z_size, Nx*Ny ); CUDAmallocPitch ( (void**) &devCEz, &pitch, z_size, Nx*Ny ); cudaMallocPitch ( (void**) &devHx, &pitch, z_size, Nx*Ny ); cudaMallocPitch ( (void**) &devHy, &pitch, z_size, Nx*Ny ); cudaMallocPitch ( (void**) &devHz, &pitch, z_size, Nx*Ny ); // Copy arrays from host to device cudaMemcpy2D ( devCEx, pitch, CEx[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice ); cudaMemcpy2D ( devCEy, pitch, CEy[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice ); cudaMemcpy2D ( devCEz, pitch, CEz[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice ); int Nz_pitch = pitch/4; printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch); // Number of thread blocks in the grid // Number of threads per block int BPGx = Nz_pitch/TPBx; int BPGy = Ny/TPBy == 0 ? Ny/TPBy : Ny/TPBy + 1; int BPGz = Nx; //dim3 Dg(BPGx, BPGy*BPGz); dim3 Dg = dim3(BPGx, BPGy*BPGz); dim3 Db = dim3(TPBx, TPBy); //printf("TPBx=%d, TPBy=%d, TPBz=%d\n", TPBx, TPBy, TPBz); printf("TPBx=%d, TPBy=%d\n", TPBx, TPBy); printf("BPGx=%d, BPGy=%d, BPGz=%d\n", BPGx, BPGy, BPGz); printf("Dg(%d,%d)\n", BPGx, BPGy*BPGz); printf("Db(%d,%d)\n", TPBx, TPBy); printf("Treads per block: %d (%d,%d,%d)\n", TPBx*TPBy*1, TPBx, TPBy, 1); if ( TPBx*TPBy > 512 ) { printf("Error: An excessive number of threads per block.\n%d (%d,%d,%d)\n", TPBx*TPBy, TPBx, TPBy, 1); exit(0); } printf("Blocks per grid: %d (%d,%d,%d)\n", BPGx*BPGy*BPGz, BPGx, BPGy, BPGz); if ( BPGx*BPGy*BPGz > 65535 ) { printf("Error: An excessive number of blocks per grid.\n%d (%d,%d,%d)\n", BPGx*BPGy*BPGz, BPGx, BPGy, BPGz); exit(0); } int TPBsrc = Nx; int BPGsrc = 1; dim3 Dgsrc(BPGsrc); dim3 Dbsrc(TPBsrc); int N = Nx*Ny*Nz_pitch; int TPBinit = Nz_pitch; int BPGinit = N%TPBinit == 0 ? N/TPBinit : N/TPBinit + 1; dim3 Dginit(BPGinit); dim3 Dbinit(TPBinit); // Initialize the device arrays //initArrays <<<Dginit,Dbinit>>> ( Nx, Ny, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz ); // Main time loop t0 = time(0); for ( tstep=1; tstep<=TMAX; tstep++) { //for ( tstep=1; tstep<=10; tstep++) { // Update on the GPU updateE <<<Dg,Db>>> ( Nx, Ny, Nz, Nz_pitch, BPGy, devEx, devEy, devEz, devHx, devHy, devHz, devCEx, devCEy, devCEz ); //updateSrc <<<Dgsrc,Dbsrc>>> ( Nx, Ny, Nz, Nz_pitch, devEx, tstep ); //updateH <<<Dg,Db>>> ( Nx, Ny, Nz, Nz_pitch, BPGy, devEx, devEy, devEz, devHx, devHy, devHz ); //if ( tstep/10*10 == tstep ) { // Copy arrays from device to host cudaMemcpy2D( Ex[0][0], z_size, devEx, pitch, z_size, Nx*Ny, cudaMemcpyDeviceToHost ); //print_array(Nx, Ny, Nz, Ex); //dumpToH5(Nx, Ny, Nz, Nx/2, 0, 0, Nx/2, Ny-1, Nz-1, Ex, "gpu_png/Ex-%05d.h5", tstep); //exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep); updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); //} } updateTimer(t0, tstep, time_str); printf("tstep=%d\t%s\n", tstep, time_str); }
8e40240356829fdf7d7a456e40c8f707dc6ce727.hip
// !!! This is a file automatically generated by hipify!!! /* * This file is part of the Score-P software (http://www.score-p.org) * * Copyright (c) 2009-2013, * RWTH Aachen University, Germany * * Copyright (c) 2009-2013, * Gesellschaft fuer numerische Simulation mbH Braunschweig, Germany * * Copyright (c) 2009-2013, 2015, * Technische Universitaet Dresden, Germany * * Copyright (c) 2009-2013, * University of Oregon, Eugene, USA * * Copyright (c) 2009-2013, * Forschungszentrum Juelich GmbH, Germany * * Copyright (c) 2009-2013, * German Research School for Simulation Sciences GmbH, Juelich/Aachen, Germany * * Copyright (c) 2009-2013, * Technische Universitaet Muenchen, Germany * * This software may be modified and distributed under the terms of * a BSD-style license. See the COPYING file in the package base * directory for details. * */ /** * @file * * @brief Test program for the CUDA adapter. Several parts of this program have * been extracted from the NVIDIA computing samples 'simpleStreams' and * 'concurrentKernels' * * The basic test runs one kernel in (1+num_streams) streams. * * This advanced test runs (1+nreps*num_streams) instances of kernel 'init_array' and * (num_streams) instances of kernel 'clock_block'. */ #include <stdio.h> #include <unistd.h> #include <stdint.h> #include <SCOREP_User.h> // CUDA utilities and system includes #include <hip/hip_runtime.h> #define CUDART_CALL( _err, _msg ) \ if ( hipSuccess != _err ) \ __checkCUDACall( _err, _msg, __FILE__, __LINE__ ) static uint32_t kernel_workload = 20; static uint64_t cpu_usleeptime = 10000; static uint32_t num_streams = 3; static int basic_mode = 0; /* function declarations */ static void __checkCUDACall( hipError_t ecode, const char* msg, const char* file, const int line ); static void runBasicTest( int nstreams ); static void runCopyComputeOverlap( int nstreams ); static void runConcurrentKernels( int nstreams ); static void show_help( void ); static char getopt( char* argument ); static void setArguments( int argc, char* argv[] ); __global__ void init_array( int* g_data, int* factor, int num_iterations ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for ( int i = 0; i < num_iterations; i++ ) { g_data[ idx ] += *factor; // non-coalesced on purpose, to burn time } } // This is a kernel that does no real work but runs at least for a specified number of clocks __global__ void clock_block( clock_t* d_o, clock_t clock_count ) { unsigned int start_clock = ( unsigned int )clock(); clock_t clock_offset = 0; while ( clock_offset < clock_count ) { unsigned int end_clock = ( unsigned int )clock(); // The code below should work like // this (thanks to modular arithmetics): // // clock_offset = (clock_t) (end_clock > start_clock ? // end_clock - start_clock : // end_clock + (0xffffffffu - start_clock)); // // Indeed, let m = 2^32 then // end - start = end + m - start (mod m). clock_offset = ( clock_t )( end_clock - start_clock ); } d_o[ 0 ] = clock_offset; } int main( int argc, char** argv ) { SCOREP_USER_FUNC_BEGIN() // check the compute capability of the device int num_devices = 0; setArguments( argc, argv ); CUDART_CALL( hipGetDeviceCount( &num_devices ), "hipGetDeviceCount" ); if ( 0 == num_devices ) { printf( "your system does not have a CUDA capable device, waiving test...\n" ); SCOREP_USER_FUNC_END() exit( 77 ); /* denote the test as skipped */ } /* check if the command-line chosen device ID is within range, exit if not if( cuda_device >= num_devices ){ printf("cuda_device=%d is invalid, must choose device ID between 0 and %d\n", cuda_device, num_devices-1); SCOREP_USER_FUNC_END() exit(-1); }*/ if ( basic_mode ) { runBasicTest( num_streams ); } else { runCopyComputeOverlap( num_streams ); runConcurrentKernels( num_streams ); } hipDeviceReset(); SCOREP_USER_FUNC_END() } static void runBasicTest( int nstreams ) { SCOREP_USER_FUNC_BEGIN() int n = 512 * 1024; // number of integers in the data set int nbytes = n * sizeof( int ); // number of data bytes dim3 threads, blocks; // kernel launch configuration int niterations = kernel_workload; // number of iterations for the loop inside the kernel_time // allocate host memory int c = 5; // value to which the array will be initialized int* h_a = 0; // pointer to the array data in host memory printf( "Starting basic test\n" ); h_a = ( int* )malloc( nbytes ); // allocate device memory int* d_a = 0, * d_c = 0; // pointers to data and init value in the device memory CUDART_CALL( hipMalloc( ( void** )&d_a, nbytes ), "hipMalloc" ); CUDART_CALL( hipMalloc( ( void** )&d_c, sizeof( int ) ), "hipMalloc" ); CUDART_CALL( hipMemset( d_a, 0, nbytes ), "hipMemset" ); CUDART_CALL( hipMemcpy( d_c, &c, sizeof( int ), hipMemcpyHostToDevice ), "hipMemcpy" ); threads = dim3( 512, 1 ); blocks = dim3( n / threads.x, 1 ); hipStream_t* streams = ( hipStream_t* )malloc( nstreams * sizeof( hipStream_t ) ); for ( int i = 0; i < nstreams; i++ ) { CUDART_CALL( hipStreamCreate( &( streams[ i ] ) ), "hipStreamCreate" ); } SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, 0, d_a, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) CUDART_CALL( hipDeviceSynchronize(), "hipDeviceSynchronize" ); for ( int i = 0; i < nstreams; i++ ) { SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[ i ], d_a, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) CUDART_CALL( hipDeviceSynchronize(), "hipDeviceSynchronize" ); } // cleanup for ( int i = 0; i < nstreams; i++ ) { hipStreamDestroy( streams[ i ] ); } free( streams ); hipFree( d_a ); hipFree( d_c ); free( h_a ); SCOREP_USER_FUNC_END() } static void runCopyComputeOverlap( int nstreams ) { SCOREP_USER_FUNC_BEGIN() int nreps = 3; // number of times each experiment is repeated int n = 512 * 1024; // number of integers in the data set int nbytes = n * sizeof( int ); // number of data bytes dim3 threads, blocks; // kernel launch configuration int niterations = kernel_workload; // number of iterations for the loop inside the kernel_time // allocate host memory int c = 5; // value to which the array will be initialized int* h_a = 0; // pointer to the array data in host memory int* hAligned_a = 0; // pointer to the array data in host memory (aligned to MEMORY_ALIGNMENT) // allocate host memory (pinned is required for achieve asynchronicity) CUDART_CALL( hipHostMalloc( ( void** )&h_a, nbytes ), "hipHostMalloc" ); hAligned_a = h_a; // allocate device memory int* d_a = 0, * d_c = 0; // pointers to data and init value in the device memory CUDART_CALL( hipMalloc( ( void** )&d_a, nbytes ), "hipMalloc" ); CUDART_CALL( hipMalloc( ( void** )&d_c, sizeof( int ) ), "hipMalloc" ); CUDART_CALL( hipMemcpy( d_c, &c, sizeof( int ), hipMemcpyHostToDevice ), "hipMemcpy" ); threads = dim3( 512, 1 ); blocks = dim3( n / threads.x, 1 ); SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, 0, d_a, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) usleep( cpu_usleeptime ); hipMemcpyAsync( hAligned_a, d_a, nbytes, hipMemcpyDeviceToHost ); // allocate and initialize an array of stream handles hipStream_t* streams = ( hipStream_t* )malloc( nstreams * sizeof( hipStream_t ) ); for ( int i = 0; i < nstreams; i++ ) { CUDART_CALL( hipStreamCreate( &( streams[ i ] ) ), "hipStreamCreate" ); } niterations = kernel_workload; printf( "Starting Copy/Compute overlap test\n" ); threads = dim3( 512, 1 ); blocks = dim3( n / ( nstreams * threads.x ), 1 ); memset( hAligned_a, 255, nbytes ); // set host memory bits to all 1s, for testing correctness hipMemset( d_a, 0, nbytes ); // set device memory to all 0s, for testing correctness for ( int k = 0; k < nreps; k++ ) { // asynchronously launch nstreams kernels, each operating on its own portion of data for ( int i = 0; i < nstreams; i++ ) { SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[ i ], d_a + i * n / nstreams, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for ( int i = 0; i < nstreams; i++ ) { hipMemcpyAsync( hAligned_a + i * n / nstreams, d_a + i * n / nstreams, nbytes / nstreams, hipMemcpyDeviceToHost, streams[ i ] ); } } CUDART_CALL( hipDeviceSynchronize(), "hipDeviceSynchronize" ); // release resources for ( int i = 0; i < nstreams; i++ ) { hipStreamDestroy( streams[ i ] ); } free( streams ); hipHostFree( h_a ); hipFree( d_a ); hipFree( d_c ); SCOREP_USER_FUNC_END() } static void runConcurrentKernels( int nstreams ) { SCOREP_USER_FUNC_BEGIN() float kernel_time = 10; // time the kernel should run in ms hipDeviceProp_t deviceProp; clock_t* a = NULL; // pointer to the array data in host memory int nbytes = nstreams * sizeof( clock_t ); // number of data bytes int cuda_device = 0; CUDART_CALL( hipGetDevice( &cuda_device ), "hipGetDevice" ); CUDART_CALL( hipGetDeviceProperties( &deviceProp, cuda_device ), "hipGetDeviceProperties" ); if ( ( deviceProp.concurrentKernels == 0 ) ) { printf( "> GPU does not support concurrent kernel execution\n" ); printf( " CUDA kernel runs will be serialized\n" ); } // allocate host memory CUDART_CALL( hipHostMalloc( ( void** )&a, nbytes ), "hipHostMalloc" ); // allocate device memory clock_t* d_ac = 0; // pointers to data and init value in the device memory CUDART_CALL( hipMalloc( ( void** )&d_ac, nbytes ), "hipMalloc" ); // allocate and initialize an array of stream handles hipStream_t* streams = ( hipStream_t* )malloc( nstreams * sizeof( hipStream_t ) ); for ( int i = 0; i < nstreams; i++ ) { CUDART_CALL( hipStreamCreate( &( streams[ i ] ) ), "hipStreamCreate" ); } // time execution with nkernels streams clock_t total_clocks = 0; clock_t time_clocks = kernel_time * deviceProp.clockRate; printf( "Starting concurrent kernel test\n" ); // queue nkernels in separate streams and record when they are done for ( int i = 0; i < nstreams; ++i ) { SCOREP_USER_REGION_BY_NAME_BEGIN( "clock_block", SCOREP_USER_REGION_TYPE_COMMON ) hipLaunchKernelGGL(( clock_block), dim3(1), dim3(1), 0, streams[ i ], &d_ac[ i ], time_clocks ); SCOREP_USER_REGION_BY_NAME_END( "clock_block" ) total_clocks += time_clocks; } CUDART_CALL( hipDeviceSynchronize(), "hipDeviceSynchronize" ); // release resources for ( int i = 0; i < nstreams; i++ ) { hipStreamDestroy( streams[ i ] ); } free( streams ); hipHostFree( a ); hipFree( d_ac ); SCOREP_USER_FUNC_END() } /* * Checks if a CUDA runtime API call returns successful and respectively prints * the error. * * @param ecode the CUDA error code * @param msg a message to get more detailed information about the error * @param the corresponding file * @param the line the error occurred */ static void __checkCUDACall( hipError_t ecode, const char* msg, const char* file, const int line ) { if ( msg != NULL ) { printf( "[CUDART] %s", msg ); } printf( "[CUDA Error <%s>:%i] %s", file, line, hipGetErrorString( ecode ) ); } static void show_help( void ) { printf( "\ncuda_test [OPTION]\n" "\t-g kernel workload as number of loop iterations (positive integer)\n" "\t-c sleep time of host after first kernel launch in seconds (positive integer)\n" "\t-s number of CUDA streams (positive integer)\n" "\t-b run only basic test (one kernel)\n\n" ); } static char getopt( char* argument ) { if ( argument[ 0 ] == '-' ) { return argument[ 1 ]; } return 'f'; } static void setArguments( int argc, char* argv[] ) { int j = 1; while ( j < argc ) { switch ( getopt( argv[ j ] ) ) { case 'g': // number of loop iterations inside GPU kernel kernel_workload = atoi( argv[ ++j ] ); break; case 'c': // seconds to sleep after launch of first kernel cpu_usleeptime = atoi( argv[ ++j ] ) * 1000; break; case 's': // number of CUDA streams to use num_streams = atoi( argv[ ++j ] ); break; case 'b': // test only basic CUDA features basic_mode = 1; break; default: show_help(); exit( 1 ); } j++; } }
8e40240356829fdf7d7a456e40c8f707dc6ce727.cu
/* * This file is part of the Score-P software (http://www.score-p.org) * * Copyright (c) 2009-2013, * RWTH Aachen University, Germany * * Copyright (c) 2009-2013, * Gesellschaft fuer numerische Simulation mbH Braunschweig, Germany * * Copyright (c) 2009-2013, 2015, * Technische Universitaet Dresden, Germany * * Copyright (c) 2009-2013, * University of Oregon, Eugene, USA * * Copyright (c) 2009-2013, * Forschungszentrum Juelich GmbH, Germany * * Copyright (c) 2009-2013, * German Research School for Simulation Sciences GmbH, Juelich/Aachen, Germany * * Copyright (c) 2009-2013, * Technische Universitaet Muenchen, Germany * * This software may be modified and distributed under the terms of * a BSD-style license. See the COPYING file in the package base * directory for details. * */ /** * @file * * @brief Test program for the CUDA adapter. Several parts of this program have * been extracted from the NVIDIA computing samples 'simpleStreams' and * 'concurrentKernels' * * The basic test runs one kernel in (1+num_streams) streams. * * This advanced test runs (1+nreps*num_streams) instances of kernel 'init_array' and * (num_streams) instances of kernel 'clock_block'. */ #include <stdio.h> #include <unistd.h> #include <stdint.h> #include <SCOREP_User.h> // CUDA utilities and system includes #include <cuda_runtime.h> #define CUDART_CALL( _err, _msg ) \ if ( cudaSuccess != _err ) \ __checkCUDACall( _err, _msg, __FILE__, __LINE__ ) static uint32_t kernel_workload = 20; static uint64_t cpu_usleeptime = 10000; static uint32_t num_streams = 3; static int basic_mode = 0; /* function declarations */ static void __checkCUDACall( cudaError_t ecode, const char* msg, const char* file, const int line ); static void runBasicTest( int nstreams ); static void runCopyComputeOverlap( int nstreams ); static void runConcurrentKernels( int nstreams ); static void show_help( void ); static char getopt( char* argument ); static void setArguments( int argc, char* argv[] ); __global__ void init_array( int* g_data, int* factor, int num_iterations ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for ( int i = 0; i < num_iterations; i++ ) { g_data[ idx ] += *factor; // non-coalesced on purpose, to burn time } } // This is a kernel that does no real work but runs at least for a specified number of clocks __global__ void clock_block( clock_t* d_o, clock_t clock_count ) { unsigned int start_clock = ( unsigned int )clock(); clock_t clock_offset = 0; while ( clock_offset < clock_count ) { unsigned int end_clock = ( unsigned int )clock(); // The code below should work like // this (thanks to modular arithmetics): // // clock_offset = (clock_t) (end_clock > start_clock ? // end_clock - start_clock : // end_clock + (0xffffffffu - start_clock)); // // Indeed, let m = 2^32 then // end - start = end + m - start (mod m). clock_offset = ( clock_t )( end_clock - start_clock ); } d_o[ 0 ] = clock_offset; } int main( int argc, char** argv ) { SCOREP_USER_FUNC_BEGIN() // check the compute capability of the device int num_devices = 0; setArguments( argc, argv ); CUDART_CALL( cudaGetDeviceCount( &num_devices ), "cudaGetDeviceCount" ); if ( 0 == num_devices ) { printf( "your system does not have a CUDA capable device, waiving test...\n" ); SCOREP_USER_FUNC_END() exit( 77 ); /* denote the test as skipped */ } /* check if the command-line chosen device ID is within range, exit if not if( cuda_device >= num_devices ){ printf("cuda_device=%d is invalid, must choose device ID between 0 and %d\n", cuda_device, num_devices-1); SCOREP_USER_FUNC_END() exit(-1); }*/ if ( basic_mode ) { runBasicTest( num_streams ); } else { runCopyComputeOverlap( num_streams ); runConcurrentKernels( num_streams ); } cudaDeviceReset(); SCOREP_USER_FUNC_END() } static void runBasicTest( int nstreams ) { SCOREP_USER_FUNC_BEGIN() int n = 512 * 1024; // number of integers in the data set int nbytes = n * sizeof( int ); // number of data bytes dim3 threads, blocks; // kernel launch configuration int niterations = kernel_workload; // number of iterations for the loop inside the kernel_time // allocate host memory int c = 5; // value to which the array will be initialized int* h_a = 0; // pointer to the array data in host memory printf( "Starting basic test\n" ); h_a = ( int* )malloc( nbytes ); // allocate device memory int* d_a = 0, * d_c = 0; // pointers to data and init value in the device memory CUDART_CALL( cudaMalloc( ( void** )&d_a, nbytes ), "cudaMalloc" ); CUDART_CALL( cudaMalloc( ( void** )&d_c, sizeof( int ) ), "cudaMalloc" ); CUDART_CALL( cudaMemset( d_a, 0, nbytes ), "cudaMemset" ); CUDART_CALL( cudaMemcpy( d_c, &c, sizeof( int ), cudaMemcpyHostToDevice ), "cudaMemcpy" ); threads = dim3( 512, 1 ); blocks = dim3( n / threads.x, 1 ); cudaStream_t* streams = ( cudaStream_t* )malloc( nstreams * sizeof( cudaStream_t ) ); for ( int i = 0; i < nstreams; i++ ) { CUDART_CALL( cudaStreamCreate( &( streams[ i ] ) ), "cudaStreamCreate" ); } SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) init_array<<<blocks, threads>>>( d_a, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) CUDART_CALL( cudaDeviceSynchronize(), "cudaDeviceSynchronize" ); for ( int i = 0; i < nstreams; i++ ) { SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) init_array<<<blocks, threads, 0, streams[ i ]>>>( d_a, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) CUDART_CALL( cudaDeviceSynchronize(), "cudaDeviceSynchronize" ); } // cleanup for ( int i = 0; i < nstreams; i++ ) { cudaStreamDestroy( streams[ i ] ); } free( streams ); cudaFree( d_a ); cudaFree( d_c ); free( h_a ); SCOREP_USER_FUNC_END() } static void runCopyComputeOverlap( int nstreams ) { SCOREP_USER_FUNC_BEGIN() int nreps = 3; // number of times each experiment is repeated int n = 512 * 1024; // number of integers in the data set int nbytes = n * sizeof( int ); // number of data bytes dim3 threads, blocks; // kernel launch configuration int niterations = kernel_workload; // number of iterations for the loop inside the kernel_time // allocate host memory int c = 5; // value to which the array will be initialized int* h_a = 0; // pointer to the array data in host memory int* hAligned_a = 0; // pointer to the array data in host memory (aligned to MEMORY_ALIGNMENT) // allocate host memory (pinned is required for achieve asynchronicity) CUDART_CALL( cudaMallocHost( ( void** )&h_a, nbytes ), "cudaMallocHost" ); hAligned_a = h_a; // allocate device memory int* d_a = 0, * d_c = 0; // pointers to data and init value in the device memory CUDART_CALL( cudaMalloc( ( void** )&d_a, nbytes ), "cudaMalloc" ); CUDART_CALL( cudaMalloc( ( void** )&d_c, sizeof( int ) ), "cudaMalloc" ); CUDART_CALL( cudaMemcpy( d_c, &c, sizeof( int ), cudaMemcpyHostToDevice ), "cudaMemcpy" ); threads = dim3( 512, 1 ); blocks = dim3( n / threads.x, 1 ); SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) init_array<<<blocks, threads>>>( d_a, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) usleep( cpu_usleeptime ); cudaMemcpyAsync( hAligned_a, d_a, nbytes, cudaMemcpyDeviceToHost ); // allocate and initialize an array of stream handles cudaStream_t* streams = ( cudaStream_t* )malloc( nstreams * sizeof( cudaStream_t ) ); for ( int i = 0; i < nstreams; i++ ) { CUDART_CALL( cudaStreamCreate( &( streams[ i ] ) ), "cudaStreamCreate" ); } niterations = kernel_workload; printf( "Starting Copy/Compute overlap test\n" ); threads = dim3( 512, 1 ); blocks = dim3( n / ( nstreams * threads.x ), 1 ); memset( hAligned_a, 255, nbytes ); // set host memory bits to all 1s, for testing correctness cudaMemset( d_a, 0, nbytes ); // set device memory to all 0s, for testing correctness for ( int k = 0; k < nreps; k++ ) { // asynchronously launch nstreams kernels, each operating on its own portion of data for ( int i = 0; i < nstreams; i++ ) { SCOREP_USER_REGION_BY_NAME_BEGIN( "init_array", SCOREP_USER_REGION_TYPE_COMMON ) init_array<<<blocks, threads, 0, streams[ i ]>>>( d_a + i * n / nstreams, d_c, niterations ); SCOREP_USER_REGION_BY_NAME_END( "init_array" ) } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for ( int i = 0; i < nstreams; i++ ) { cudaMemcpyAsync( hAligned_a + i * n / nstreams, d_a + i * n / nstreams, nbytes / nstreams, cudaMemcpyDeviceToHost, streams[ i ] ); } } CUDART_CALL( cudaDeviceSynchronize(), "cudaDeviceSynchronize" ); // release resources for ( int i = 0; i < nstreams; i++ ) { cudaStreamDestroy( streams[ i ] ); } free( streams ); cudaFreeHost( h_a ); cudaFree( d_a ); cudaFree( d_c ); SCOREP_USER_FUNC_END() } static void runConcurrentKernels( int nstreams ) { SCOREP_USER_FUNC_BEGIN() float kernel_time = 10; // time the kernel should run in ms cudaDeviceProp deviceProp; clock_t* a = NULL; // pointer to the array data in host memory int nbytes = nstreams * sizeof( clock_t ); // number of data bytes int cuda_device = 0; CUDART_CALL( cudaGetDevice( &cuda_device ), "cudaGetDevice" ); CUDART_CALL( cudaGetDeviceProperties( &deviceProp, cuda_device ), "cudaGetDeviceProperties" ); if ( ( deviceProp.concurrentKernels == 0 ) ) { printf( "> GPU does not support concurrent kernel execution\n" ); printf( " CUDA kernel runs will be serialized\n" ); } // allocate host memory CUDART_CALL( cudaMallocHost( ( void** )&a, nbytes ), "cudaMallocHost" ); // allocate device memory clock_t* d_ac = 0; // pointers to data and init value in the device memory CUDART_CALL( cudaMalloc( ( void** )&d_ac, nbytes ), "cudaMalloc" ); // allocate and initialize an array of stream handles cudaStream_t* streams = ( cudaStream_t* )malloc( nstreams * sizeof( cudaStream_t ) ); for ( int i = 0; i < nstreams; i++ ) { CUDART_CALL( cudaStreamCreate( &( streams[ i ] ) ), "cudaStreamCreate" ); } // time execution with nkernels streams clock_t total_clocks = 0; clock_t time_clocks = kernel_time * deviceProp.clockRate; printf( "Starting concurrent kernel test\n" ); // queue nkernels in separate streams and record when they are done for ( int i = 0; i < nstreams; ++i ) { SCOREP_USER_REGION_BY_NAME_BEGIN( "clock_block", SCOREP_USER_REGION_TYPE_COMMON ) clock_block<<<1, 1, 0, streams[ i ]>>>( &d_ac[ i ], time_clocks ); SCOREP_USER_REGION_BY_NAME_END( "clock_block" ) total_clocks += time_clocks; } CUDART_CALL( cudaDeviceSynchronize(), "cudaDeviceSynchronize" ); // release resources for ( int i = 0; i < nstreams; i++ ) { cudaStreamDestroy( streams[ i ] ); } free( streams ); cudaFreeHost( a ); cudaFree( d_ac ); SCOREP_USER_FUNC_END() } /* * Checks if a CUDA runtime API call returns successful and respectively prints * the error. * * @param ecode the CUDA error code * @param msg a message to get more detailed information about the error * @param the corresponding file * @param the line the error occurred */ static void __checkCUDACall( cudaError_t ecode, const char* msg, const char* file, const int line ) { if ( msg != NULL ) { printf( "[CUDART] %s", msg ); } printf( "[CUDA Error <%s>:%i] %s", file, line, cudaGetErrorString( ecode ) ); } static void show_help( void ) { printf( "\ncuda_test [OPTION]\n" "\t-g kernel workload as number of loop iterations (positive integer)\n" "\t-c sleep time of host after first kernel launch in seconds (positive integer)\n" "\t-s number of CUDA streams (positive integer)\n" "\t-b run only basic test (one kernel)\n\n" ); } static char getopt( char* argument ) { if ( argument[ 0 ] == '-' ) { return argument[ 1 ]; } return 'f'; } static void setArguments( int argc, char* argv[] ) { int j = 1; while ( j < argc ) { switch ( getopt( argv[ j ] ) ) { case 'g': // number of loop iterations inside GPU kernel kernel_workload = atoi( argv[ ++j ] ); break; case 'c': // seconds to sleep after launch of first kernel cpu_usleeptime = atoi( argv[ ++j ] ) * 1000; break; case 's': // number of CUDA streams to use num_streams = atoi( argv[ ++j ] ); break; case 'b': // test only basic CUDA features basic_mode = 1; break; default: show_help(); exit( 1 ); } j++; } }
21609601cff10183c45dbc18e54d4b88c2046b79.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #define VERTICES 600 extern "C" { __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n); __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n); } /* * This file contains the implementation of a CUDA Kernel for the * point-in-polygon problem using the crossing number algorithm * * Simplified for use in the NLeSC GPU Course * * The algorithm used here is adapted from: * 'Inclusion of a Point in a Polygon', Dan Sunday, 2001 * (http://geomalgorithms.com/a03-_inclusion.html) * * Author: Ben van Werkhoven <[email protected]> */ __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { // edge from vk to vj float2 vj = vertices[j]; float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically (p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vk-vj when moved in positive x-direction c = !c; } } bitmap[i] = c; // 0 if even (out), and 1 if odd (in) } } int main() { hipSetDeviceFlags(hipDeviceMapHost); hipSetDevice(0); hipDeviceSynchronize(); hipError_t err; int num_points = (int)2e7; float2 *h_vertices; float2 *d_vertices; float2 *h_points; int *h_bitmap; int *h_reference; //Allocate pinned and aligned host memory and copy input data err = hipHostMalloc((void **)&h_vertices, VERTICES*sizeof(float2), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } err = hipHostMalloc((void **)&h_points, num_points *sizeof(float2), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } err = hipHostMalloc((void **)&h_bitmap, num_points *sizeof(int), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } err = hipHostMalloc((void **)&h_reference, num_points *sizeof(int), hipHostMallocMapped); if (err != hipSuccess) { fprintf(stderr, "Error in hipHostMalloc: %s\n", hipGetErrorString(err)); } // generate random input for (int i=0; i< num_points; i++) { h_points[i].x = 50.0 / (rand() % 1000); h_points[i].y = 50.0 / (rand() % 1000); } // read vertices from disk FILE *file = fopen("vertices.dat", "rb"); fread(h_vertices, sizeof(float), 2*VERTICES, file); // allocate device memory for storing the vertices err = hipMalloc((void **)&d_vertices, VERTICES*sizeof(float2)); if (err != hipSuccess) { fprintf(stderr, "Error in hipMalloc: %s\n", hipGetErrorString( err )); } // transfer vertices to d_vertices err = hipMemcpy(d_vertices, h_vertices, VERTICES*sizeof(float2), hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Error in hipMemcpy: %s\n", hipGetErrorString(err)); } // create CUDA streams and events hipStream_t stream[1]; err = hipStreamCreate(&stream[0]); if (err != hipSuccess) { fprintf(stderr, "Error in hipStreamCreate: %s\n", hipGetErrorString(err)); } hipEvent_t start; err = hipEventCreate(&start); if (err != hipSuccess) { fprintf(stderr, "Error in hipEventCreate: %s\n", hipGetErrorString(err)); } hipEvent_t stop; err = hipEventCreate(&stop); if (err != hipSuccess) { fprintf(stderr, "Error in hipEventCreate: %s\n", hipGetErrorString(err)); } hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Error after memory setup: %s\n", hipGetErrorString(err)); } //kernel parameters dim3 threads(256, 1, 1); dim3 grid((int)ceil(num_points / (float)threads.x), 1); //run the kernel a few times to warmup the device for (int i=0; i<5; i++) { hipLaunchKernelGGL(( cn_pnpoly_reference_kernel), dim3(grid), dim3(threads), 0, stream[0], h_reference, h_points, d_vertices, num_points); } memset(h_bitmap, 0, num_points*sizeof(int)); //start measuring time hipDeviceSynchronize(); hipEventRecord(start, stream[0]); //call the kernel hipLaunchKernelGGL(( cn_pnpoly), dim3(grid), dim3(threads), 0, stream[0], h_bitmap, h_points, d_vertices, num_points); //stop time measurement hipEventRecord(stop, stream[0]); hipDeviceSynchronize(); float time = 0.0; hipEventElapsedTime(&time, start, stop); printf("cn_pnpoly kernel took: %f (ms)\n", time); //compute reference answer and measure time hipDeviceSynchronize(); hipEventRecord(start, stream[0]); hipLaunchKernelGGL(( cn_pnpoly_reference_kernel), dim3(grid), dim3(threads), 0, stream[0], h_reference, h_points, d_vertices, num_points); hipEventRecord(stop, stream[0]); hipDeviceSynchronize(); hipEventElapsedTime(&time, start, stop); printf("reference kernel took: %f (ms)\n", time); //cleanup hipStreamDestroy(stream[0]); hipEventDestroy(start); hipEventDestroy(stop); hipFree(d_vertices); hipHostFree(h_vertices); hipHostFree(h_points); //final check for errors hipDeviceSynchronize(); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Error after CUDA kernel: %s\n", hipGetErrorString(err)); exit(1); } else { int errors = 0; int print = 0; for (int i=0; i<num_points; i++) { if (h_bitmap[i] != h_reference[i]) { errors++; if (print++ < 10) { fprintf(stderr, "error at %d, reference=%d, answer=%d\n", i, h_reference[i], h_bitmap[i]); } } } if (errors == 0) { printf("ok!\n"); } else { printf("there were %d errors\n", errors); } } hipHostFree(h_bitmap); hipHostFree(h_reference); return 0; } /* * Reference kernel * * This kernel is kept for checking the output of the above kernel, DO NOT MODIFY THIS KERNEL */ __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; // DO NOT MODIFY THIS KERNEL int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { float2 vj = vertices[j]; // DO NOT MODIFY THIS KERNEL float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && (p.x < slope * (p.y-vj.y) + vj.x) ) { c = !c; } } bitmap[i] = c; // DO NOT MODIFY THIS KERNEL } }
21609601cff10183c45dbc18e54d4b88c2046b79.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define VERTICES 600 extern "C" { __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n); __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n); } /* * This file contains the implementation of a CUDA Kernel for the * point-in-polygon problem using the crossing number algorithm * * Simplified for use in the NLeSC GPU Course * * The algorithm used here is adapted from: * 'Inclusion of a Point in a Polygon', Dan Sunday, 2001 * (http://geomalgorithms.com/a03-_inclusion.html) * * Author: Ben van Werkhoven <[email protected]> */ __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { // edge from vk to vj float2 vj = vertices[j]; float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically (p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vk-vj when moved in positive x-direction c = !c; } } bitmap[i] = c; // 0 if even (out), and 1 if odd (in) } } int main() { cudaSetDeviceFlags(cudaDeviceMapHost); cudaSetDevice(0); cudaDeviceSynchronize(); cudaError_t err; int num_points = (int)2e7; float2 *h_vertices; float2 *d_vertices; float2 *h_points; int *h_bitmap; int *h_reference; //Allocate pinned and aligned host memory and copy input data err = cudaHostAlloc((void **)&h_vertices, VERTICES*sizeof(float2), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_points, num_points *sizeof(float2), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_bitmap, num_points *sizeof(int), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_reference, num_points *sizeof(int), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } // generate random input for (int i=0; i< num_points; i++) { h_points[i].x = 50.0 / (rand() % 1000); h_points[i].y = 50.0 / (rand() % 1000); } // read vertices from disk FILE *file = fopen("vertices.dat", "rb"); fread(h_vertices, sizeof(float), 2*VERTICES, file); // allocate device memory for storing the vertices err = cudaMalloc((void **)&d_vertices, VERTICES*sizeof(float2)); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString( err )); } // transfer vertices to d_vertices err = cudaMemcpy(d_vertices, h_vertices, VERTICES*sizeof(float2), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMemcpy: %s\n", cudaGetErrorString(err)); } // create CUDA streams and events cudaStream_t stream[1]; err = cudaStreamCreate(&stream[0]); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t start; err = cudaEventCreate(&start); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t stop; err = cudaEventCreate(&stop); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Error after memory setup: %s\n", cudaGetErrorString(err)); } //kernel parameters dim3 threads(256, 1, 1); dim3 grid((int)ceil(num_points / (float)threads.x), 1); //run the kernel a few times to warmup the device for (int i=0; i<5; i++) { cn_pnpoly_reference_kernel<<<grid, threads, 0, stream[0]>>>(h_reference, h_points, d_vertices, num_points); } memset(h_bitmap, 0, num_points*sizeof(int)); //start measuring time cudaDeviceSynchronize(); cudaEventRecord(start, stream[0]); //call the kernel cn_pnpoly<<<grid, threads, 0, stream[0]>>>(h_bitmap, h_points, d_vertices, num_points); //stop time measurement cudaEventRecord(stop, stream[0]); cudaDeviceSynchronize(); float time = 0.0; cudaEventElapsedTime(&time, start, stop); printf("cn_pnpoly kernel took: %f (ms)\n", time); //compute reference answer and measure time cudaDeviceSynchronize(); cudaEventRecord(start, stream[0]); cn_pnpoly_reference_kernel<<<grid, threads, 0, stream[0]>>>(h_reference, h_points, d_vertices, num_points); cudaEventRecord(stop, stream[0]); cudaDeviceSynchronize(); cudaEventElapsedTime(&time, start, stop); printf("reference kernel took: %f (ms)\n", time); //cleanup cudaStreamDestroy(stream[0]); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_vertices); cudaFreeHost(h_vertices); cudaFreeHost(h_points); //final check for errors cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Error after CUDA kernel: %s\n", cudaGetErrorString(err)); exit(1); } else { int errors = 0; int print = 0; for (int i=0; i<num_points; i++) { if (h_bitmap[i] != h_reference[i]) { errors++; if (print++ < 10) { fprintf(stderr, "error at %d, reference=%d, answer=%d\n", i, h_reference[i], h_bitmap[i]); } } } if (errors == 0) { printf("ok!\n"); } else { printf("there were %d errors\n", errors); } } cudaFreeHost(h_bitmap); cudaFreeHost(h_reference); return 0; } /* * Reference kernel * * This kernel is kept for checking the output of the above kernel, DO NOT MODIFY THIS KERNEL */ __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; // DO NOT MODIFY THIS KERNEL int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { float2 vj = vertices[j]; // DO NOT MODIFY THIS KERNEL float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && (p.x < slope * (p.y-vj.y) + vj.x) ) { c = !c; } } bitmap[i] = c; // DO NOT MODIFY THIS KERNEL } }
9e01e5ff04fc92c6cd646bc78848473a72326572.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "kMultByRowVectorScale.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; hipMalloc(&mat, XSIZE*YSIZE); float *vec = NULL; hipMalloc(&vec, XSIZE*YSIZE); float *tgtMat = NULL; hipMalloc(&tgtMat, XSIZE*YSIZE); unsigned int width = 1; unsigned int height = 1; float scale_targets = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( kMultByRowVectorScale), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height,scale_targets); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( kMultByRowVectorScale), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height,scale_targets); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( kMultByRowVectorScale), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,vec,tgtMat,width,height,scale_targets); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
9e01e5ff04fc92c6cd646bc78848473a72326572.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "kMultByRowVectorScale.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *mat = NULL; cudaMalloc(&mat, XSIZE*YSIZE); float *vec = NULL; cudaMalloc(&vec, XSIZE*YSIZE); float *tgtMat = NULL; cudaMalloc(&tgtMat, XSIZE*YSIZE); unsigned int width = 1; unsigned int height = 1; float scale_targets = 2; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); kMultByRowVectorScale<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height,scale_targets); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { kMultByRowVectorScale<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height,scale_targets); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { kMultByRowVectorScale<<<gridBlock,threadBlock>>>(mat,vec,tgtMat,width,height,scale_targets); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5b9e03ba571ed63066cfef4f31faf8a0f3928aac.hip
// !!! This is a file automatically generated by hipify!!! /** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg <[email protected]> * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the deriving author. * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "hipEventDefault", "hipEventBlockingSync", "hipEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "hipDeviceScheduleAuto", "hipDeviceScheduleSpin", "hipDeviceScheduleYield", "INVALID", "hipDeviceScheduleBlockingSync", NULL }; // System includes // CUDA runtime #include "hip/hip_runtime.h" // helper functions and utilities to work with CUDA #include "../helper_cuda.h" #include <cuda/api_wrappers.hpp> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif #include <cstdlib> #include <fstream> #include <vector> #include <iostream> #include <algorithm> // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { std::cout << i << ": " << a[i] << " " << c << "\n"; return false; } } return true; } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { std::cout << "Usage: " << sSDKsample << " [options below]\n" << "\t--sync_method=n for CPU/GPU synchronization\n" << "\t n=" << sSyncMethod[0] << "\n" << "\t n=" << sSyncMethod[1] << "\n" << "\t n=" << sSyncMethod[2] << "\n" << "\t <Default> n=" << sSyncMethod[4] << "\n" << "\t--use_generic_memory (default) use generic page-aligned for system memory\n" << "\t--use_cuda_malloc_host (optional) use hipHostMalloc to allocate system memory\n"; } int main(int argc, char **argv) { int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using hipHostMalloc() int device_sync_method = hipDeviceScheduleBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { std::cout << "Device synchronization method set to = " << sSyncMethod[device_sync_method] << "\n"; std::cout << "Setting reps to 100 to demonstrate steady state\n"; nreps = 100; } else { std::cout << "Invalid command line option sync_method=\"" << device_sync_method << "\"\n"; return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { std::cout << "To simplify this example, support for using cuda_malloc_host instead of " << "pinned memory has been dropped.\n"; return EXIT_FAILURE; } std::cout << "\n> "; chooseCudaDevice(argc, (const char **)argv); auto current_device = cuda::device::current::get(); // Checking for compute capabilities auto properties = current_device.properties(); auto compute_capability = properties.compute_capability(); if (compute_capability < cuda::device::compute_capability_t({1, 1}) ) { std::cout << properties.name << " does not have Compute Capability 1.1 or newer. Reducing workload.\n"; } if (compute_capability.major() >= 2) { niterations = 5; } else { if (compute_capability.minor() > 1) { niterations = 5; } else { niterations = 1; // reduced workload for compute capability 1.0 and 1.1 } } // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false std::cout << "Device: <" << properties.name << "> canMapHostMemory: " << (properties.canMapHostMemory ? "Yes" : "No") << "\n"; if (not properties.can_map_host_memory()) { std::cout << "Cannot allocate pinned memory (and map GPU device memory to it); aborting.\n"; return EXIT_FAILURE; } // Anything that is less than 32 Cores will have scaled down workload auto faux_cores_per_sm = compute_capability.max_in_flight_threads_per_processor(); auto faux_cores_overall = properties.max_in_flight_threads_on_device(); scale_factor = max((32.0f / faux_cores_overall), 1.0f); n = (int)rint((float)n / scale_factor); std::cout << "> CUDA Capable: SM " << compute_capability.major() << "." << compute_capability.minor() << " hardware\n"; std::cout << "> " << properties.multiProcessorCount << " Multiprocessor(s)" << " x " << faux_cores_per_sm << " (Cores/Multiprocessor) = " << faux_cores_overall << " (Cores)\n"; std::cout << "> scale_factor = " << 1.0f/scale_factor << "\n"; std::cout << "> array_size = " << n << "\n\n"; // enable use of blocking sync, to reduce CPU usage std::cout << "> Using CPU/GPU Device Synchronization method " << sDeviceSyncMethod[device_sync_method] << "\n"; cuda::host_thread_synch_scheduling_policy_t policy; switch(device_sync_method) { case 0: policy = cuda::heuristic; break; case 1: policy = cuda::spin; break; case 2: policy = cuda::yield; break; case 4: policy = cuda::block; break; default: // should not be able to get here exit(EXIT_FAILURE); } current_device.set_synch_scheduling_policy(policy); current_device.enable_mapping_host_memory(); // allocate host memory int c = 5; // value to which the array will be initialized // Allocate Host memory auto h_a = cuda::memory::host::make_unique<int[]>(n); // allocate device memory // pointers to data and init value in the device memory auto d_a = cuda::memory::device::make_unique<int[]>(current_device, n); auto d_c = cuda::memory::device::make_unique<int>(current_device); cuda::memory::copy_single(d_c.get(), &c); std::cout << "\nStarting Test\n"; // allocate and initialize an array of stream handles std::vector<cuda::stream_t> streams; std::generate_n( std::back_inserter(streams), nstreams, [&current_device]() { // Note: we could omit the specific requirement of synchronization // with the default stream, since that's the CUDA default - but I // think it's important to state that's the case return current_device.create_stream( cuda::stream::implicitly_synchronizes_with_default_stream); } ); // create CUDA event handles // use blocking sync auto use_blocking_sync = (device_sync_method == hipDeviceScheduleBlockingSync); auto start_event = cuda::event::create(current_device, use_blocking_sync); auto stop_event = cuda::event::create(current_device, use_blocking_sync); // time memcopy from device start_event.record(); // record on the default stream, to ensure that all previous CUDA calls have completed cuda::memory::async::copy(h_a.get(), d_a.get(), nbytes, streams[0]); stop_event.record(); stop_event.synchronize(); // block until the event is actually recorded auto time_memcpy = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "memcopy:\t" << time_memcpy.count() << "\n"; // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(); hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[0].id(), d_a.get(), d_c.get(), niterations); stop_event.record(); stop_event.synchronize(); auto time_kernel = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "kernel:\t\t" << time_kernel.count() << "\n"; ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(); for (int k = 0; k < nreps; k++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, 0, d_a.get(), d_c.get(), niterations); cuda::memory::copy(h_a.get(), d_a.get(), nbytes); } stop_event.record(); stop_event.synchronize(); auto elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "non-streamed:\t" << elapsed_time.count() / nreps << "\n"; ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(h_a.get(), 255, nbytes); // set host memory bits to all 1s, for testing correctness cuda::memory::device::zero(d_a.get(), nbytes); // set device memory to all 0s, for testing correctness start_event.record(); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { hipLaunchKernelGGL(( init_array), dim3(blocks), dim3(threads), 0, streams[i].id(), d_a.get() + i *n / nstreams, d_c.get(), niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { cuda::memory::async::copy( h_a.get() + i * n / nstreams, d_a.get() + i * n / nstreams, nbytes / nstreams, streams[i]); } } stop_event.record(); stop_event.synchronize(); elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << nstreams <<" streams:\t" << elapsed_time.count() / nreps << "\n"; // check whether the output is correct std::cout << "-------------------------------\n"; bool bResults = correct_data(h_a.get(), n, c*nreps*niterations); std::cout << (bResults ? "SUCCESS" : "FAILURE") << "\n"; return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
5b9e03ba571ed63066cfef4f31faf8a0f3928aac.cu
/** * Derived from the nVIDIA CUDA 8.0 samples by * * Eyal Rozenberg <[email protected]> * * The derivation is specifically permitted in the nVIDIA CUDA Samples EULA * and the deriver is the owner of this code according to the EULA. * * Use this reasonably. If you want to discuss licensing formalities, please * contact the deriving author. * * * This sample illustrates the usage of CUDA streams for overlapping * kernel execution with device/host memcopies. The kernel is used to * initialize an array to a specific value, after which the array is * copied to the host (CPU) memory. To increase performance, multiple * kernel/memcopy pairs are launched asynchronously, each pair in its * own stream. Devices with Compute Capability 1.1 can overlap a kernel * and a memcopy as long as they are issued in different streams. Kernels * are serialized. Thus, if n pairs are launched, streamed approach * can reduce the memcopy cost to the (1/n)th of a single copy of the entire * data set. * * Additionally, this sample uses CUDA events to measure elapsed time for * CUDA calls. Events are a part of CUDA API and provide a system independent * way to measure execution times on CUDA devices with approximately 0.5 * microsecond precision. * * Elapsed times are averaged over nreps repetitions (10 by default). * */ #ifndef EXIT_WAIVED #define EXIT_WAIVED 2 #endif const char *sSDKsample = "simpleStreams"; const char *sEventSyncMethod[] = { "cudaEventDefault", "cudaEventBlockingSync", "cudaEventDisableTiming", NULL }; const char *sDeviceSyncMethod[] = { "cudaDeviceScheduleAuto", "cudaDeviceScheduleSpin", "cudaDeviceScheduleYield", "INVALID", "cudaDeviceScheduleBlockingSync", NULL }; // System includes // CUDA runtime #include "cuda_runtime.h" // helper functions and utilities to work with CUDA #include "../helper_cuda.h" #include <cuda/api_wrappers.hpp> #ifndef WIN32 #include <sys/mman.h> // for mmap() / munmap() #endif #include <cstdlib> #include <fstream> #include <vector> #include <iostream> #include <algorithm> // Macro to aligned up to the memory size in question #define MEMORY_ALIGNMENT 4096 #define ALIGN_UP(x,size) ( ((size_t)x+(size-1))&(~(size-1)) ) __global__ void init_array(int *g_data, int *factor, int num_iterations) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i=0; i<num_iterations; i++) { g_data[idx] += *factor; // non-coalesced on purpose, to burn time } } bool correct_data(int *a, const int n, const int c) { for (int i = 0; i < n; i++) { if (a[i] != c) { std::cout << i << ": " << a[i] << " " << c << "\n"; return false; } } return true; } static const char *sSyncMethod[] = { "0 (Automatic Blocking)", "1 (Spin Blocking)", "2 (Yield Blocking)", "3 (Undefined Blocking Method)", "4 (Blocking Sync Event) = low CPU utilization", NULL }; void printHelp() { std::cout << "Usage: " << sSDKsample << " [options below]\n" << "\t--sync_method=n for CPU/GPU synchronization\n" << "\t n=" << sSyncMethod[0] << "\n" << "\t n=" << sSyncMethod[1] << "\n" << "\t n=" << sSyncMethod[2] << "\n" << "\t <Default> n=" << sSyncMethod[4] << "\n" << "\t--use_generic_memory (default) use generic page-aligned for system memory\n" << "\t--use_cuda_malloc_host (optional) use cudaMallocHost to allocate system memory\n"; } int main(int argc, char **argv) { int nstreams = 4; // number of streams for CUDA calls int nreps = 10; // number of times each experiment is repeated int n = 16 * 1024 * 1024; // number of ints in the data set int nbytes = n * sizeof(int); // number of data bytes dim3 threads, blocks; // kernel launch configuration float scale_factor = 1.0f; // allocate generic memory and pin it laster instead of using cudaHostAlloc() int device_sync_method = cudaDeviceBlockingSync; // by default we use BlockingSync int niterations; // number of iterations for the loop inside the kernel if (checkCmdLineFlag(argc, (const char **)argv, "help")) { printHelp(); return EXIT_SUCCESS; } if ((device_sync_method = getCmdLineArgumentInt(argc, (const char **)argv, "sync_method")) >= 0) { if (device_sync_method == 0 || device_sync_method == 1 || device_sync_method == 2 || device_sync_method == 4) { std::cout << "Device synchronization method set to = " << sSyncMethod[device_sync_method] << "\n"; std::cout << "Setting reps to 100 to demonstrate steady state\n"; nreps = 100; } else { std::cout << "Invalid command line option sync_method=\"" << device_sync_method << "\"\n"; return EXIT_FAILURE; } } else { printHelp(); return EXIT_SUCCESS; } if (checkCmdLineFlag(argc, (const char **)argv, "use_cuda_malloc_host")) { std::cout << "To simplify this example, support for using cuda_malloc_host instead of " << "pinned memory has been dropped.\n"; return EXIT_FAILURE; } std::cout << "\n> "; chooseCudaDevice(argc, (const char **)argv); auto current_device = cuda::device::current::get(); // Checking for compute capabilities auto properties = current_device.properties(); auto compute_capability = properties.compute_capability(); if (compute_capability < cuda::device::compute_capability_t({1, 1}) ) { std::cout << properties.name << " does not have Compute Capability 1.1 or newer. Reducing workload.\n"; } if (compute_capability.major() >= 2) { niterations = 5; } else { if (compute_capability.minor() > 1) { niterations = 5; } else { niterations = 1; // reduced workload for compute capability 1.0 and 1.1 } } // Check if GPU can map host memory (Generic Method), if not then we override bPinGenericMemory to be false std::cout << "Device: <" << properties.name << "> canMapHostMemory: " << (properties.canMapHostMemory ? "Yes" : "No") << "\n"; if (not properties.can_map_host_memory()) { std::cout << "Cannot allocate pinned memory (and map GPU device memory to it); aborting.\n"; return EXIT_FAILURE; } // Anything that is less than 32 Cores will have scaled down workload auto faux_cores_per_sm = compute_capability.max_in_flight_threads_per_processor(); auto faux_cores_overall = properties.max_in_flight_threads_on_device(); scale_factor = max((32.0f / faux_cores_overall), 1.0f); n = (int)rint((float)n / scale_factor); std::cout << "> CUDA Capable: SM " << compute_capability.major() << "." << compute_capability.minor() << " hardware\n"; std::cout << "> " << properties.multiProcessorCount << " Multiprocessor(s)" << " x " << faux_cores_per_sm << " (Cores/Multiprocessor) = " << faux_cores_overall << " (Cores)\n"; std::cout << "> scale_factor = " << 1.0f/scale_factor << "\n"; std::cout << "> array_size = " << n << "\n\n"; // enable use of blocking sync, to reduce CPU usage std::cout << "> Using CPU/GPU Device Synchronization method " << sDeviceSyncMethod[device_sync_method] << "\n"; cuda::host_thread_synch_scheduling_policy_t policy; switch(device_sync_method) { case 0: policy = cuda::heuristic; break; case 1: policy = cuda::spin; break; case 2: policy = cuda::yield; break; case 4: policy = cuda::block; break; default: // should not be able to get here exit(EXIT_FAILURE); } current_device.set_synch_scheduling_policy(policy); current_device.enable_mapping_host_memory(); // allocate host memory int c = 5; // value to which the array will be initialized // Allocate Host memory auto h_a = cuda::memory::host::make_unique<int[]>(n); // allocate device memory // pointers to data and init value in the device memory auto d_a = cuda::memory::device::make_unique<int[]>(current_device, n); auto d_c = cuda::memory::device::make_unique<int>(current_device); cuda::memory::copy_single(d_c.get(), &c); std::cout << "\nStarting Test\n"; // allocate and initialize an array of stream handles std::vector<cuda::stream_t> streams; std::generate_n( std::back_inserter(streams), nstreams, [&current_device]() { // Note: we could omit the specific requirement of synchronization // with the default stream, since that's the CUDA default - but I // think it's important to state that's the case return current_device.create_stream( cuda::stream::implicitly_synchronizes_with_default_stream); } ); // create CUDA event handles // use blocking sync auto use_blocking_sync = (device_sync_method == cudaDeviceBlockingSync); auto start_event = cuda::event::create(current_device, use_blocking_sync); auto stop_event = cuda::event::create(current_device, use_blocking_sync); // time memcopy from device start_event.record(); // record on the default stream, to ensure that all previous CUDA calls have completed cuda::memory::async::copy(h_a.get(), d_a.get(), nbytes, streams[0]); stop_event.record(); stop_event.synchronize(); // block until the event is actually recorded auto time_memcpy = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "memcopy:\t" << time_memcpy.count() << "\n"; // time kernel threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(); init_array<<<blocks, threads, 0, streams[0].id()>>>(d_a.get(), d_c.get(), niterations); stop_event.record(); stop_event.synchronize(); auto time_kernel = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "kernel:\t\t" << time_kernel.count() << "\n"; ////////////////////////////////////////////////////////////////////// // time non-streamed execution for reference threads=dim3(512, 1); blocks=dim3(n / threads.x, 1); start_event.record(); for (int k = 0; k < nreps; k++) { init_array<<<blocks, threads>>>(d_a.get(), d_c.get(), niterations); cuda::memory::copy(h_a.get(), d_a.get(), nbytes); } stop_event.record(); stop_event.synchronize(); auto elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << "non-streamed:\t" << elapsed_time.count() / nreps << "\n"; ////////////////////////////////////////////////////////////////////// // time execution with nstreams streams threads=dim3(512,1); blocks=dim3(n/(nstreams*threads.x),1); memset(h_a.get(), 255, nbytes); // set host memory bits to all 1s, for testing correctness cuda::memory::device::zero(d_a.get(), nbytes); // set device memory to all 0s, for testing correctness start_event.record(); for (int k = 0; k < nreps; k++) { // asynchronously launch nstreams kernels, each operating on its own portion of data for (int i = 0; i < nstreams; i++) { init_array<<<blocks, threads, 0, streams[i].id()>>>(d_a.get() + i *n / nstreams, d_c.get(), niterations); } // asynchronously launch nstreams memcopies. Note that memcopy in stream x will only // commence executing when all previous CUDA calls in stream x have completed for (int i = 0; i < nstreams; i++) { cuda::memory::async::copy( h_a.get() + i * n / nstreams, d_a.get() + i * n / nstreams, nbytes / nstreams, streams[i]); } } stop_event.record(); stop_event.synchronize(); elapsed_time = cuda::event::time_elapsed_between(start_event, stop_event); std::cout << nstreams <<" streams:\t" << elapsed_time.count() / nreps << "\n"; // check whether the output is correct std::cout << "-------------------------------\n"; bool bResults = correct_data(h_a.get(), n, c*nreps*niterations); std::cout << (bResults ? "SUCCESS" : "FAILURE") << "\n"; return bResults ? EXIT_SUCCESS : EXIT_FAILURE; }
c93ca4c3f2fe43666d360ae61bb15176cf3baab8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/ztrtri_upper.cu, normal z -> s, Sun Nov 20 20:20:31 2016 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar @author Ahmad Abdelfattah This file implements upper case, and is called by strtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "strtri.cuh" #include "strtri_upper_device.cuh" /******************************************************************************/ __global__ void strtri_diag_upper_kernel( magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA) { strtri_diag_upper_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_sgemm16_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm16_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm32_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm32_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm64_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm64_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm_above64_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm_above64_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm_above64_part3_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages); }
c93ca4c3f2fe43666d360ae61bb15176cf3baab8.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/ztrtri_upper.cu, normal z -> s, Sun Nov 20 20:20:31 2016 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar @author Ahmad Abdelfattah This file implements upper case, and is called by strtri_kernel.cu. It's convenient to have separate files for lower & upper, to diff the sources. */ #include "magma_internal.h" #define TRTRI_NONBATCHED #include "strtri.cuh" #include "strtri_upper_device.cuh" /******************************************************************************/ __global__ void strtri_diag_upper_kernel( magma_diag_t diag, int n, const float *A, int lda, float *d_dinvA) { strtri_diag_upper_device(diag, n, A, lda, d_dinvA); } /******************************************************************************/ __global__ void triple_sgemm16_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm16_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm16_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm16_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm32_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm32_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm32_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm32_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm64_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm64_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm_above64_part1_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm_above64_part1_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm_above64_part2_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm_above64_part2_upper_device( n, Ain, lda, d_dinvA, jb, npages); } /******************************************************************************/ __global__ void triple_sgemm_above64_part3_upper_kernel( int n, const float *Ain, int lda, float *d_dinvA, int jb, int npages) { triple_sgemm_above64_part3_upper_device( n, Ain, lda, d_dinvA, jb, npages); }
8648c8c39adc8f4a07f7929979e8b445c047b2ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Test of DeviceReduce utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <limits> #include <typeinfo> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <hipcub/hipcub.hpp> #include <hipcub/hipcub.hpp> #include <cub/device/device_segmented_reduce.cuh> #include <cub/iterator/constant_input_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include <cub/iterator/transform_input_iterator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- int g_ptx_version; int g_sm_count; double g_device_giga_bandwidth; bool g_verbose = false; bool g_verbose_input = false; int g_timing_iterations = 0; int g_repeat = 0; CachingDeviceAllocator g_allocator(true); // Dispatch types enum Backend { CUB, // CUB method CUB_SEGMENTED, // CUB segmented method CUB_CDP, // GPU-based (dynamic parallelism) dispatch to CUB method THRUST, // Thrust method }; // Custom max functor struct CustomMax { /// Boolean max operator, returns <tt>(a > b) ? a : b</tt> template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT &a, const OutputT &b) { return CUB_MAX(a, b); } }; //--------------------------------------------------------------------- // Dispatch to different CUB DeviceReduce entrypoints //--------------------------------------------------------------------- /** * Dispatch to reduce entrypoint (custom-max) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, ReductionOpT reduction_op, hipStream_t stream, bool debug_synchronous) { typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // Max-identity OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, identity, stream, debug_synchronous); } return error; } /** * Dispatch to sum entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, hipcub::Sum /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to min entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, hipcub::Min /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to max entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, hipcub::Max /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to argmin entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, hipcub::ArgMin /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to argmax entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, hipcub::ArgMax /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } //--------------------------------------------------------------------- // Dispatch to different CUB DeviceSegmentedReduce entrypoints //--------------------------------------------------------------------- /** * Dispatch to reduce entrypoint (custom-max) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, hipStream_t stream, bool debug_synchronous) { // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // Max-identity OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, reduction_op, identity, stream, debug_synchronous); } return error; } /** * Dispatch to sum entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, hipcub::Sum /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to min entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, hipcub::Min /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to max entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, hipcub::Max /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to argmin entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, hipcub::ArgMin /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to argmax entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, hipcub::ArgMax /*reduction_op*/, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly hipError_t error = hipSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } //--------------------------------------------------------------------- // Dispatch to different Thrust entrypoints //--------------------------------------------------------------------- /** * Dispatch to reduction entrypoint (min or max specialization) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> hipError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, ReductionOpT reduction_op, hipStream_t /*stream*/, bool /*debug_synchronous*/) { // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { OutputT init; CubDebugExit(hipMemcpy(&init, d_in + 0, sizeof(OutputT), hipMemcpyDeviceToHost)); thrust::device_ptr<OutputT> d_in_wrapper(d_in); OutputT retval; for (int i = 0; i < timing_iterations; ++i) { retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items, init, reduction_op); } if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE) CubDebugExit(hipMemcpy(d_out, &retval, sizeof(OutputT), hipMemcpyHostToDevice)); } return hipSuccess; } /** * Dispatch to reduction entrypoint (sum specialization) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> hipError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, hipError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, Sum /*reduction_op*/, hipStream_t /*stream*/, bool /*debug_synchronous*/) { // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::device_ptr<OutputT> d_in_wrapper(d_in); OutputT retval; for (int i = 0; i < timing_iterations; ++i) { retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items); } if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE) CubDebugExit(hipMemcpy(d_out, &retval, sizeof(OutputT), hipMemcpyHostToDevice)); } return hipSuccess; } //--------------------------------------------------------------------- // CUDA nested-parallelism test kernel //--------------------------------------------------------------------- /** * Simple wrapper kernel to invoke DeviceReduce */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> __global__ void CnpDispatchKernel( int timing_iterations, size_t *d_temp_storage_bytes, hipError_t *d_cdp_error, void* d_temp_storage, size_t temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int max_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, bool debug_synchronous) { #ifndef CUB_CDP (void)timing_iterations; (void)d_temp_storage_bytes; (void)d_cdp_error; (void)d_temp_storage; (void)temp_storage_bytes; (void)d_in; (void)d_out; (void)num_items; (void)max_segments; (void)d_segment_offsets; (void)reduction_op; (void)debug_synchronous; *d_cdp_error = hipErrorNotSupported; #else *d_cdp_error = Dispatch(Int2Type<CUB>(), timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, 0, debug_synchronous); *d_temp_storage_bytes = temp_storage_bytes; #endif } /** * Dispatch to CUB_CDP kernel */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ hipError_t Dispatch( Int2Type<CUB_CDP> dispatch_to, int timing_iterations, size_t *d_temp_storage_bytes, hipError_t *d_cdp_error, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int max_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, hipStream_t stream, bool debug_synchronous) { // Invoke kernel to invoke device-side dispatch hipLaunchKernelGGL(( CnpDispatchKernel), dim3(1),dim3(1), 0, 0, timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, debug_synchronous); // Copy out temp_storage_bytes CubDebugExit(hipMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, hipMemcpyDeviceToHost)); // Copy out error hipError_t retval; CubDebugExit(hipMemcpy(&retval, d_cdp_error, sizeof(hipError_t) * 1, hipMemcpyDeviceToHost)); return retval; } //--------------------------------------------------------------------- // Problem generation //--------------------------------------------------------------------- /// Initialize problem template <typename InputT> void Initialize( GenMode gen_mode, InputT *h_in, int num_items) { for (int i = 0; i < num_items; ++i) { InitValue(gen_mode, h_in[i], i); } if (g_verbose_input) { printf("Input:\n"); DisplayResults(h_in, num_items); printf("\n\n"); } } /// Solve problem (max/custom-max functor) template <typename ReductionOpT, typename InputT, typename _OutputT> struct Solution { typedef _OutputT OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, ReductionOpT reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) aggregate = reduction_op(aggregate, OutputT(h_in[j])); h_reference[i] = aggregate; } } }; /// Solve problem (min functor) template <typename InputT, typename _OutputT> struct Solution<hipcub::Min, InputT, _OutputT> { typedef _OutputT OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, hipcub::Min reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) aggregate = reduction_op(aggregate, OutputT(h_in[j])); h_reference[i] = aggregate; } } }; /// Solve problem (sum functor) template <typename InputT, typename _OutputT> struct Solution<hipcub::Sum, InputT, _OutputT> { typedef _OutputT OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, hipcub::Sum reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate; InitValue(INTEGER_SEED, aggregate, 0); for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) aggregate = reduction_op(aggregate, OutputT(h_in[j])); h_reference[i] = aggregate; } } }; /// Solve problem (argmin functor) template <typename InputValueT, typename OutputValueT> struct Solution<hipcub::ArgMin, InputValueT, OutputValueT> { typedef KeyValuePair<int, OutputValueT> OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, hipcub::ArgMin reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) { OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j])); aggregate = reduction_op(aggregate, item); } h_reference[i] = aggregate; } } }; /// Solve problem (argmax functor) template <typename InputValueT, typename OutputValueT> struct Solution<hipcub::ArgMax, InputValueT, OutputValueT> { typedef KeyValuePair<int, OutputValueT> OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, hipcub::ArgMax reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) { OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j])); aggregate = reduction_op(aggregate, item); } h_reference[i] = aggregate; } } }; //--------------------------------------------------------------------- // Problem generation //--------------------------------------------------------------------- /// Test DeviceReduce for a given problem input template < typename BackendT, typename DeviceInputIteratorT, typename DeviceOutputIteratorT, typename HostReferenceIteratorT, typename OffsetT, typename OffsetIteratorT, typename ReductionOpT> void Test( BackendT backend, DeviceInputIteratorT d_in, DeviceOutputIteratorT d_out, OffsetT num_items, OffsetT num_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, HostReferenceIteratorT h_reference) { // Input data types typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputT; // Allocate CUB_CDP device arrays for temp storage size and error size_t *d_temp_storage_bytes = NULL; hipError_t *d_cdp_error = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(hipError_t) * 1)); // Inquire temp device storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(Dispatch(backend, 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, 0, true)); // Allocate temp device storage CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Run warmup/correctness iteration CubDebugExit(Dispatch(backend, 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, 0, true)); // Check for correctness (and display results, if specified) int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose); printf("\t%s", compare ? "FAIL" : "PASS"); // Flush any stdout/stderr fflush(stdout); fflush(stderr); // Performance if (g_timing_iterations > 0) { GpuTimer gpu_timer; gpu_timer.Start(); CubDebugExit(Dispatch(backend, g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, 0, false)); gpu_timer.Stop(); float elapsed_millis = gpu_timer.ElapsedMillis(); // Display performance float avg_millis = elapsed_millis / g_timing_iterations; float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; float giga_bandwidth = giga_rate * sizeof(InputT); printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0); } if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); // Correctness asserts AssertEquals(0, compare); } /// Test DeviceReduce template < Backend BACKEND, typename OutputValueT, typename HostInputIteratorT, typename DeviceInputIteratorT, typename OffsetT, typename OffsetIteratorT, typename ReductionOpT> void SolveAndTest( HostInputIteratorT h_in, DeviceInputIteratorT d_in, OffsetT num_items, OffsetT num_segments, OffsetIteratorT h_segment_offsets, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op) { typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputValueT; typedef Solution<ReductionOpT, InputValueT, OutputValueT> SolutionT; typedef typename SolutionT::OutputT OutputT; printf("\n\n%s hipcub::DeviceReduce<%s> %d items (%s), %d segments\n", (BACKEND == CUB_CDP) ? "CUB_CDP" : (BACKEND == THRUST) ? "Thrust" : (BACKEND == CUB_SEGMENTED) ? "CUB_SEGMENTED" : "CUB", typeid(ReductionOpT).name(), num_items, typeid(HostInputIteratorT).name(), num_segments); fflush(stdout); // Allocate and solve solution OutputT *h_reference = new OutputT[num_segments]; SolutionT::Solve(h_in, h_reference, num_segments, h_segment_offsets, reduction_op); // // Run with discard iterator // DiscardOutputIterator<OffsetT> discard_itr; // Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_offsets, reduction_op, h_reference); // Run with output data (cleared for sanity-check) OutputT *d_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments)); CubDebugExit(hipMemset(d_out, 0, sizeof(OutputT) * num_segments)); Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, h_reference); // Cleanup if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (h_reference) delete[] h_reference; } /// Test specific problem type template < Backend BACKEND, typename InputT, typename OutputT, typename OffsetT, typename ReductionOpT> void TestProblem( OffsetT num_items, OffsetT num_segments, GenMode gen_mode, ReductionOpT reduction_op) { printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout); fflush(stdout); // Initialize value data InputT* h_in = new InputT[num_items]; Initialize(gen_mode, h_in, num_items); // Initialize segment data OffsetT *h_segment_offsets = new OffsetT[num_segments + 1]; InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input); // Initialize device data OffsetT *d_segment_offsets = NULL; InputT *d_in = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1))); CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice)); CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice)); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, reduction_op); if (h_segment_offsets) delete[] h_segment_offsets; if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); if (h_in) delete[] h_in; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); } /// Test different operators template < Backend BACKEND, typename OutputT, typename HostInputIteratorT, typename DeviceInputIteratorT, typename OffsetT, typename OffsetIteratorT> void TestByOp( HostInputIteratorT h_in, DeviceInputIteratorT d_in, OffsetT num_items, OffsetT num_segments, OffsetIteratorT h_segment_offsets, OffsetIteratorT d_segment_offsets) { SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, CustomMax()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Sum()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Min()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMin()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Max()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMax()); } /// Test different backends template < typename InputT, typename OutputT, typename OffsetT> void TestByBackend( OffsetT num_items, OffsetT max_segments, GenMode gen_mode) { // Initialize host data printf("\n\nInitializing %d %s -> %s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout); InputT *h_in = new InputT[num_items]; OffsetT *h_segment_offsets = new OffsetT[max_segments + 1]; Initialize(gen_mode, h_in, num_items); // Initialize device data InputT *d_in = NULL; OffsetT *d_segment_offsets = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1))); CubDebugExit(hipMemcpy(d_in, h_in, sizeof(InputT) * num_items, hipMemcpyHostToDevice)); // // Test single-segment implementations // InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input); // Page-aligned-input tests TestByOp<CUB, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Host-dispatch #ifdef CUB_CDP TestByOp<CUB_CDP, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Device-dispatch #endif // Non-page-aligned-input tests if (num_items > 1) { InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input); TestByOp<CUB, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1, h_segment_offsets, (OffsetT*) NULL); } // // Test segmented implementation // // Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment int max_items_per_segment = 128000; for (int num_segments = (num_items + max_items_per_segment - 1) / max_items_per_segment; num_segments < max_segments; num_segments = (num_segments * 32) + 1) { // Test with segment pointer InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input); CubDebugExit(hipMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), hipMemcpyHostToDevice)); TestByOp<CUB_SEGMENTED, OutputT>( h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets); // Test with segment iterator typedef CastOp<OffsetT> IdentityOpT; IdentityOpT identity_op; TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr( h_segment_offsets, identity_op); TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr( d_segment_offsets, identity_op); TestByOp<CUB_SEGMENTED, OutputT>( h_in, d_in, num_items, num_segments, h_segment_offsets_itr, d_segment_offsets_itr); } if (h_in) delete[] h_in; if (h_segment_offsets) delete[] h_segment_offsets; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); } /// Test different input-generation modes template < typename InputT, typename OutputT, typename OffsetT> void TestByGenMode( OffsetT num_items, OffsetT max_segments) { // // Test pointer support using different input-generation modes // TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM); TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED); TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM); // // Test iterator support using a constant-iterator and SUM // InputT val; InitValue(UNIFORM, val, 0); ConstantInputIterator<InputT, OffsetT> h_in(val); OffsetT *h_segment_offsets = new OffsetT[1 + 1]; InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input); SolveAndTest<CUB, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum()); #ifdef CUB_CDP SolveAndTest<CUB_CDP, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum()); #endif if (h_segment_offsets) delete[] h_segment_offsets; } /// Test different problem sizes template < typename InputT, typename OutputT, typename OffsetT> struct TestBySize { OffsetT max_items; OffsetT max_segments; TestBySize(OffsetT max_items, OffsetT max_segments) : max_items(max_items), max_segments(max_segments) {} template <typename ActivePolicyT> hipError_t Invoke() { // // Black-box testing on all backends // // Test 0, 1, many TestByGenMode<InputT, OutputT>(0, max_segments); TestByGenMode<InputT, OutputT>(1, max_segments); TestByGenMode<InputT, OutputT>(max_items, max_segments); // Test random problem sizes from a log-distribution [8, max_items-ish) int num_iterations = 8; double max_exp = log(double(max_items)) / log(double(2.0)); for (int i = 0; i < num_iterations; ++i) { OffsetT num_items = (OffsetT) pow(2.0, RandomValue(max_exp - 3.0) + 3.0); TestByGenMode<InputT, OutputT>(num_items, max_segments); } // // White-box testing of single-segment problems around specific sizes // // Tile-boundaries: multiple blocks, one tile per block OffsetT tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS * ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD; TestProblem<CUB, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum()); // Tile-boundaries: multiple blocks, multiple tiles per block OffsetT sm_occupancy = 32; OffsetT occupancy = tile_size * sm_occupancy * g_sm_count; TestProblem<CUB, InputT, OutputT>(occupancy, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum()); return hipSuccess; } }; /// Test problem type template < typename InputT, typename OutputT, typename OffsetT> void TestType( OffsetT max_items, OffsetT max_segments) { typedef typename DeviceReducePolicy<InputT, OutputT, OffsetT, hipcub::Sum>::MaxPolicy MaxPolicyT; TestBySize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments); MaxPolicyT::Invoke(g_ptx_version, dispatch); } //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { typedef int OffsetT; OffsetT max_items = 27000000; OffsetT max_segments = 34000; // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); g_verbose_input = args.CheckCmdLineFlag("v2"); args.GetCmdLineArgument("n", max_items); args.GetCmdLineArgument("s", max_segments); args.GetCmdLineArgument("i", g_timing_iterations); args.GetCmdLineArgument("repeat", g_repeat); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items> " "[--s=<num segments> " "[--i=<timing iterations> " "[--device=<device-id>] " "[--repeat=<repetitions of entire test suite>]" "[--v] " "[--cdp]" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); g_device_giga_bandwidth = args.device_giga_bandwidth; // Get ptx version CubDebugExit(PtxVersion(g_ptx_version)); // Get SM count g_sm_count = args.deviceProp.multiProcessorCount; #ifdef QUICKER_TEST // Compile/run basic test TestProblem<CUB, char, int>( max_items, 1, RANDOM_BIT, Sum()); TestProblem<CUB, short, int>( max_items, 1, RANDOM_BIT, Sum()); printf("\n-------------------------------\n"); TestProblem<CUB, int, int>( max_items, 1, RANDOM_BIT, Sum()); TestProblem<CUB, long long, long long>( max_items, 1, RANDOM_BIT, Sum()); printf("\n-------------------------------\n"); TestProblem<CUB, float, float>( max_items, 1, RANDOM_BIT, Sum()); TestProblem<CUB, double, double>( max_items, 1, RANDOM_BIT, Sum()); printf("\n-------------------------------\n"); TestProblem<CUB_SEGMENTED, int, int>(max_items, max_segments, RANDOM_BIT, Sum()); #elif defined(QUICK_TEST) // Compile/run quick comparison tests TestProblem<CUB, char, char>( max_items * 4, 1, UNIFORM, Sum()); TestProblem<THRUST, char, char>( max_items * 4, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, short, short>( max_items * 2, 1, UNIFORM, Sum()); TestProblem<THRUST, short, short>( max_items * 2, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, int, int>( max_items, 1, UNIFORM, Sum()); TestProblem<THRUST, int, int>( max_items, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, long long, long long>( max_items / 2, 1, UNIFORM, Sum()); TestProblem<THRUST, long long, long long>( max_items / 2, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max()); TestProblem<THRUST, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max()); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Test different input types TestType<char, char>(max_items, max_segments); TestType<unsigned char, unsigned char>(max_items, max_segments); TestType<char, int>(max_items, max_segments); TestType<short, short>(max_items, max_segments); TestType<int, int>(max_items, max_segments); TestType<long, long>(max_items, max_segments); TestType<long long, long long>(max_items, max_segments); TestType<uchar2, uchar2>(max_items, max_segments); TestType<uint2, uint2>(max_items, max_segments); TestType<ulonglong2, ulonglong2>(max_items, max_segments); TestType<ulonglong4, ulonglong4>(max_items, max_segments); TestType<TestFoo, TestFoo>(max_items, max_segments); TestType<TestBar, TestBar>(max_items, max_segments); } #endif printf("\n"); return 0; }
8648c8c39adc8f4a07f7929979e8b445c047b2ea.cu
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Test of DeviceReduce utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <limits> #include <typeinfo> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <cub/util_allocator.cuh> #include <cub/device/device_reduce.cuh> #include <cub/device/device_segmented_reduce.cuh> #include <cub/iterator/constant_input_iterator.cuh> #include <cub/iterator/discard_output_iterator.cuh> #include <cub/iterator/transform_input_iterator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- int g_ptx_version; int g_sm_count; double g_device_giga_bandwidth; bool g_verbose = false; bool g_verbose_input = false; int g_timing_iterations = 0; int g_repeat = 0; CachingDeviceAllocator g_allocator(true); // Dispatch types enum Backend { CUB, // CUB method CUB_SEGMENTED, // CUB segmented method CUB_CDP, // GPU-based (dynamic parallelism) dispatch to CUB method THRUST, // Thrust method }; // Custom max functor struct CustomMax { /// Boolean max operator, returns <tt>(a > b) ? a : b</tt> template <typename OutputT> __host__ __device__ __forceinline__ OutputT operator()(const OutputT &a, const OutputT &b) { return CUB_MAX(a, b); } }; //--------------------------------------------------------------------- // Dispatch to different CUB DeviceReduce entrypoints //--------------------------------------------------------------------- /** * Dispatch to reduce entrypoint (custom-max) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, ReductionOpT reduction_op, cudaStream_t stream, bool debug_synchronous) { typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // Max-identity OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, reduction_op, identity, stream, debug_synchronous); } return error; } /** * Dispatch to sum entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, cub::Sum /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to min entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, cub::Min /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to max entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, cub::Max /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to argmin entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, cub::ArgMin /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } /** * Dispatch to argmax entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, cub::ArgMax /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, stream, debug_synchronous); } return error; } //--------------------------------------------------------------------- // Dispatch to different CUB DeviceSegmentedReduce entrypoints //--------------------------------------------------------------------- /** * Dispatch to reduce entrypoint (custom-max) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, cudaStream_t stream, bool debug_synchronous) { // The input value type typedef typename std::iterator_traits<InputIteratorT>::value_type InputT; // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type // Max-identity OutputT identity = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Reduce(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, reduction_op, identity, stream, debug_synchronous); } return error; } /** * Dispatch to sum entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, cub::Sum /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Sum(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to min entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, cub::Min /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Min(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to max entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, cub::Max /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::Max(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to argmin entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, cub::ArgMin /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::ArgMin(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } /** * Dispatch to argmax entrypoint */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB_SEGMENTED> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int /*num_items*/, int max_segments, OffsetIteratorT d_segment_offsets, cub::ArgMax /*reduction_op*/, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to device reduction directly cudaError_t error = cudaSuccess; for (int i = 0; i < timing_iterations; ++i) { error = DeviceSegmentedReduce::ArgMax(d_temp_storage, temp_storage_bytes, d_in, d_out, max_segments, d_segment_offsets, d_segment_offsets + 1, stream, debug_synchronous); } return error; } //--------------------------------------------------------------------- // Dispatch to different Thrust entrypoints //--------------------------------------------------------------------- /** * Dispatch to reduction entrypoint (min or max specialization) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> cudaError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, ReductionOpT reduction_op, cudaStream_t /*stream*/, bool /*debug_synchronous*/) { // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { OutputT init; CubDebugExit(cudaMemcpy(&init, d_in + 0, sizeof(OutputT), cudaMemcpyDeviceToHost)); thrust::device_ptr<OutputT> d_in_wrapper(d_in); OutputT retval; for (int i = 0; i < timing_iterations; ++i) { retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items, init, reduction_op); } if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE) CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice)); } return cudaSuccess; } /** * Dispatch to reduction entrypoint (sum specialization) */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT> cudaError_t Dispatch( Int2Type<THRUST> /*dispatch_to*/, int timing_iterations, size_t */*d_temp_storage_bytes*/, cudaError_t */*d_cdp_error*/, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int /*max_segments*/, OffsetIteratorT /*d_segment_offsets*/, Sum /*reduction_op*/, cudaStream_t /*stream*/, bool /*debug_synchronous*/) { // The output value type typedef typename If<(Equals<typename std::iterator_traits<OutputIteratorT>::value_type, void>::VALUE), // OutputT = (if output iterator's value type is void) ? typename std::iterator_traits<InputIteratorT>::value_type, // ... then the input iterator's value type, typename std::iterator_traits<OutputIteratorT>::value_type>::Type OutputT; // ... else the output iterator's value type if (d_temp_storage == 0) { temp_storage_bytes = 1; } else { thrust::device_ptr<OutputT> d_in_wrapper(d_in); OutputT retval; for (int i = 0; i < timing_iterations; ++i) { retval = thrust::reduce(d_in_wrapper, d_in_wrapper + num_items); } if (!Equals<OutputIteratorT, DiscardOutputIterator<int> >::VALUE) CubDebugExit(cudaMemcpy(d_out, &retval, sizeof(OutputT), cudaMemcpyHostToDevice)); } return cudaSuccess; } //--------------------------------------------------------------------- // CUDA nested-parallelism test kernel //--------------------------------------------------------------------- /** * Simple wrapper kernel to invoke DeviceReduce */ template < typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> __global__ void CnpDispatchKernel( int timing_iterations, size_t *d_temp_storage_bytes, cudaError_t *d_cdp_error, void* d_temp_storage, size_t temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int max_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, bool debug_synchronous) { #ifndef CUB_CDP (void)timing_iterations; (void)d_temp_storage_bytes; (void)d_cdp_error; (void)d_temp_storage; (void)temp_storage_bytes; (void)d_in; (void)d_out; (void)num_items; (void)max_segments; (void)d_segment_offsets; (void)reduction_op; (void)debug_synchronous; *d_cdp_error = cudaErrorNotSupported; #else *d_cdp_error = Dispatch(Int2Type<CUB>(), timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, 0, debug_synchronous); *d_temp_storage_bytes = temp_storage_bytes; #endif } /** * Dispatch to CUB_CDP kernel */ template <typename InputIteratorT, typename OutputIteratorT, typename OffsetIteratorT, typename ReductionOpT> CUB_RUNTIME_FUNCTION __forceinline__ cudaError_t Dispatch( Int2Type<CUB_CDP> dispatch_to, int timing_iterations, size_t *d_temp_storage_bytes, cudaError_t *d_cdp_error, void* d_temp_storage, size_t& temp_storage_bytes, InputIteratorT d_in, OutputIteratorT d_out, int num_items, int max_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, cudaStream_t stream, bool debug_synchronous) { // Invoke kernel to invoke device-side dispatch CnpDispatchKernel<<<1,1>>>(timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, max_segments, d_segment_offsets, reduction_op, debug_synchronous); // Copy out temp_storage_bytes CubDebugExit(cudaMemcpy(&temp_storage_bytes, d_temp_storage_bytes, sizeof(size_t) * 1, cudaMemcpyDeviceToHost)); // Copy out error cudaError_t retval; CubDebugExit(cudaMemcpy(&retval, d_cdp_error, sizeof(cudaError_t) * 1, cudaMemcpyDeviceToHost)); return retval; } //--------------------------------------------------------------------- // Problem generation //--------------------------------------------------------------------- /// Initialize problem template <typename InputT> void Initialize( GenMode gen_mode, InputT *h_in, int num_items) { for (int i = 0; i < num_items; ++i) { InitValue(gen_mode, h_in[i], i); } if (g_verbose_input) { printf("Input:\n"); DisplayResults(h_in, num_items); printf("\n\n"); } } /// Solve problem (max/custom-max functor) template <typename ReductionOpT, typename InputT, typename _OutputT> struct Solution { typedef _OutputT OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, ReductionOpT reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate = Traits<InputT>::Lowest(); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) aggregate = reduction_op(aggregate, OutputT(h_in[j])); h_reference[i] = aggregate; } } }; /// Solve problem (min functor) template <typename InputT, typename _OutputT> struct Solution<cub::Min, InputT, _OutputT> { typedef _OutputT OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, cub::Min reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate = Traits<InputT>::Max(); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) aggregate = reduction_op(aggregate, OutputT(h_in[j])); h_reference[i] = aggregate; } } }; /// Solve problem (sum functor) template <typename InputT, typename _OutputT> struct Solution<cub::Sum, InputT, _OutputT> { typedef _OutputT OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, cub::Sum reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate; InitValue(INTEGER_SEED, aggregate, 0); for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) aggregate = reduction_op(aggregate, OutputT(h_in[j])); h_reference[i] = aggregate; } } }; /// Solve problem (argmin functor) template <typename InputValueT, typename OutputValueT> struct Solution<cub::ArgMin, InputValueT, OutputValueT> { typedef KeyValuePair<int, OutputValueT> OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, cub::ArgMin reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate(1, Traits<InputValueT>::Max()); // replace with std::numeric_limits<OutputT>::max() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) { OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j])); aggregate = reduction_op(aggregate, item); } h_reference[i] = aggregate; } } }; /// Solve problem (argmax functor) template <typename InputValueT, typename OutputValueT> struct Solution<cub::ArgMax, InputValueT, OutputValueT> { typedef KeyValuePair<int, OutputValueT> OutputT; template <typename HostInputIteratorT, typename OffsetT, typename OffsetIteratorT> static void Solve(HostInputIteratorT h_in, OutputT *h_reference, OffsetT num_segments, OffsetIteratorT h_segment_offsets, cub::ArgMax reduction_op) { for (int i = 0; i < num_segments; ++i) { OutputT aggregate(1, Traits<InputValueT>::Lowest()); // replace with std::numeric_limits<OutputT>::lowest() when C++ support is more prevalent for (int j = h_segment_offsets[i]; j < h_segment_offsets[i + 1]; ++j) { OutputT item(j - h_segment_offsets[i], OutputValueT(h_in[j])); aggregate = reduction_op(aggregate, item); } h_reference[i] = aggregate; } } }; //--------------------------------------------------------------------- // Problem generation //--------------------------------------------------------------------- /// Test DeviceReduce for a given problem input template < typename BackendT, typename DeviceInputIteratorT, typename DeviceOutputIteratorT, typename HostReferenceIteratorT, typename OffsetT, typename OffsetIteratorT, typename ReductionOpT> void Test( BackendT backend, DeviceInputIteratorT d_in, DeviceOutputIteratorT d_out, OffsetT num_items, OffsetT num_segments, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op, HostReferenceIteratorT h_reference) { // Input data types typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputT; // Allocate CUB_CDP device arrays for temp storage size and error size_t *d_temp_storage_bytes = NULL; cudaError_t *d_cdp_error = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_temp_storage_bytes, sizeof(size_t) * 1)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_cdp_error, sizeof(cudaError_t) * 1)); // Inquire temp device storage void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; CubDebugExit(Dispatch(backend, 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, 0, true)); // Allocate temp device storage CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage, temp_storage_bytes)); // Run warmup/correctness iteration CubDebugExit(Dispatch(backend, 1, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, 0, true)); // Check for correctness (and display results, if specified) int compare = CompareDeviceResults(h_reference, d_out, num_segments, g_verbose, g_verbose); printf("\t%s", compare ? "FAIL" : "PASS"); // Flush any stdout/stderr fflush(stdout); fflush(stderr); // Performance if (g_timing_iterations > 0) { GpuTimer gpu_timer; gpu_timer.Start(); CubDebugExit(Dispatch(backend, g_timing_iterations, d_temp_storage_bytes, d_cdp_error, d_temp_storage, temp_storage_bytes, d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, 0, false)); gpu_timer.Stop(); float elapsed_millis = gpu_timer.ElapsedMillis(); // Display performance float avg_millis = elapsed_millis / g_timing_iterations; float giga_rate = float(num_items) / avg_millis / 1000.0f / 1000.0f; float giga_bandwidth = giga_rate * sizeof(InputT); printf(", %.3f avg ms, %.3f billion items/s, %.3f logical GB/s, %.1f%% peak", avg_millis, giga_rate, giga_bandwidth, giga_bandwidth / g_device_giga_bandwidth * 100.0); } if (d_temp_storage_bytes) CubDebugExit(g_allocator.DeviceFree(d_temp_storage_bytes)); if (d_cdp_error) CubDebugExit(g_allocator.DeviceFree(d_cdp_error)); if (d_temp_storage) CubDebugExit(g_allocator.DeviceFree(d_temp_storage)); // Correctness asserts AssertEquals(0, compare); } /// Test DeviceReduce template < Backend BACKEND, typename OutputValueT, typename HostInputIteratorT, typename DeviceInputIteratorT, typename OffsetT, typename OffsetIteratorT, typename ReductionOpT> void SolveAndTest( HostInputIteratorT h_in, DeviceInputIteratorT d_in, OffsetT num_items, OffsetT num_segments, OffsetIteratorT h_segment_offsets, OffsetIteratorT d_segment_offsets, ReductionOpT reduction_op) { typedef typename std::iterator_traits<DeviceInputIteratorT>::value_type InputValueT; typedef Solution<ReductionOpT, InputValueT, OutputValueT> SolutionT; typedef typename SolutionT::OutputT OutputT; printf("\n\n%s cub::DeviceReduce<%s> %d items (%s), %d segments\n", (BACKEND == CUB_CDP) ? "CUB_CDP" : (BACKEND == THRUST) ? "Thrust" : (BACKEND == CUB_SEGMENTED) ? "CUB_SEGMENTED" : "CUB", typeid(ReductionOpT).name(), num_items, typeid(HostInputIteratorT).name(), num_segments); fflush(stdout); // Allocate and solve solution OutputT *h_reference = new OutputT[num_segments]; SolutionT::Solve(h_in, h_reference, num_segments, h_segment_offsets, reduction_op); // // Run with discard iterator // DiscardOutputIterator<OffsetT> discard_itr; // Test(Int2Type<BACKEND>(), d_in, discard_itr, num_items, num_segments, d_segment_offsets, reduction_op, h_reference); // Run with output data (cleared for sanity-check) OutputT *d_out = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(OutputT) * num_segments)); CubDebugExit(cudaMemset(d_out, 0, sizeof(OutputT) * num_segments)); Test(Int2Type<BACKEND>(), d_in, d_out, num_items, num_segments, d_segment_offsets, reduction_op, h_reference); // Cleanup if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (h_reference) delete[] h_reference; } /// Test specific problem type template < Backend BACKEND, typename InputT, typename OutputT, typename OffsetT, typename ReductionOpT> void TestProblem( OffsetT num_items, OffsetT num_segments, GenMode gen_mode, ReductionOpT reduction_op) { printf("\n\nInitializing %d %s->%s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout); fflush(stdout); // Initialize value data InputT* h_in = new InputT[num_items]; Initialize(gen_mode, h_in, num_items); // Initialize segment data OffsetT *h_segment_offsets = new OffsetT[num_segments + 1]; InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input); // Initialize device data OffsetT *d_segment_offsets = NULL; InputT *d_in = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (num_segments + 1))); CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice)); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, reduction_op); if (h_segment_offsets) delete[] h_segment_offsets; if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); if (h_in) delete[] h_in; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); } /// Test different operators template < Backend BACKEND, typename OutputT, typename HostInputIteratorT, typename DeviceInputIteratorT, typename OffsetT, typename OffsetIteratorT> void TestByOp( HostInputIteratorT h_in, DeviceInputIteratorT d_in, OffsetT num_items, OffsetT num_segments, OffsetIteratorT h_segment_offsets, OffsetIteratorT d_segment_offsets) { SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, CustomMax()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Sum()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Min()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMin()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, Max()); SolveAndTest<BACKEND, OutputT>(h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets, ArgMax()); } /// Test different backends template < typename InputT, typename OutputT, typename OffsetT> void TestByBackend( OffsetT num_items, OffsetT max_segments, GenMode gen_mode) { // Initialize host data printf("\n\nInitializing %d %s -> %s (gen mode %d)... ", num_items, typeid(InputT).name(), typeid(OutputT).name(), gen_mode); fflush(stdout); InputT *h_in = new InputT[num_items]; OffsetT *h_segment_offsets = new OffsetT[max_segments + 1]; Initialize(gen_mode, h_in, num_items); // Initialize device data InputT *d_in = NULL; OffsetT *d_segment_offsets = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(InputT) * num_items)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_segment_offsets, sizeof(OffsetT) * (max_segments + 1))); CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(InputT) * num_items, cudaMemcpyHostToDevice)); // // Test single-segment implementations // InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input); // Page-aligned-input tests TestByOp<CUB, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Host-dispatch #ifdef CUB_CDP TestByOp<CUB_CDP, OutputT>(h_in, d_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL); // Device-dispatch #endif // Non-page-aligned-input tests if (num_items > 1) { InitializeSegments(num_items - 1, 1, h_segment_offsets, g_verbose_input); TestByOp<CUB, OutputT>(h_in + 1, d_in + 1, num_items - 1, 1, h_segment_offsets, (OffsetT*) NULL); } // // Test segmented implementation // // Right now we assign a single thread block to each segment, so lets keep it to under 128K items per segment int max_items_per_segment = 128000; for (int num_segments = (num_items + max_items_per_segment - 1) / max_items_per_segment; num_segments < max_segments; num_segments = (num_segments * 32) + 1) { // Test with segment pointer InitializeSegments(num_items, num_segments, h_segment_offsets, g_verbose_input); CubDebugExit(cudaMemcpy(d_segment_offsets, h_segment_offsets, sizeof(OffsetT) * (num_segments + 1), cudaMemcpyHostToDevice)); TestByOp<CUB_SEGMENTED, OutputT>( h_in, d_in, num_items, num_segments, h_segment_offsets, d_segment_offsets); // Test with segment iterator typedef CastOp<OffsetT> IdentityOpT; IdentityOpT identity_op; TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> h_segment_offsets_itr( h_segment_offsets, identity_op); TransformInputIterator<OffsetT, IdentityOpT, OffsetT*, OffsetT> d_segment_offsets_itr( d_segment_offsets, identity_op); TestByOp<CUB_SEGMENTED, OutputT>( h_in, d_in, num_items, num_segments, h_segment_offsets_itr, d_segment_offsets_itr); } if (h_in) delete[] h_in; if (h_segment_offsets) delete[] h_segment_offsets; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_segment_offsets) CubDebugExit(g_allocator.DeviceFree(d_segment_offsets)); } /// Test different input-generation modes template < typename InputT, typename OutputT, typename OffsetT> void TestByGenMode( OffsetT num_items, OffsetT max_segments) { // // Test pointer support using different input-generation modes // TestByBackend<InputT, OutputT>(num_items, max_segments, UNIFORM); TestByBackend<InputT, OutputT>(num_items, max_segments, INTEGER_SEED); TestByBackend<InputT, OutputT>(num_items, max_segments, RANDOM); // // Test iterator support using a constant-iterator and SUM // InputT val; InitValue(UNIFORM, val, 0); ConstantInputIterator<InputT, OffsetT> h_in(val); OffsetT *h_segment_offsets = new OffsetT[1 + 1]; InitializeSegments(num_items, 1, h_segment_offsets, g_verbose_input); SolveAndTest<CUB, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum()); #ifdef CUB_CDP SolveAndTest<CUB_CDP, OutputT>(h_in, h_in, num_items, 1, h_segment_offsets, (OffsetT*) NULL, Sum()); #endif if (h_segment_offsets) delete[] h_segment_offsets; } /// Test different problem sizes template < typename InputT, typename OutputT, typename OffsetT> struct TestBySize { OffsetT max_items; OffsetT max_segments; TestBySize(OffsetT max_items, OffsetT max_segments) : max_items(max_items), max_segments(max_segments) {} template <typename ActivePolicyT> cudaError_t Invoke() { // // Black-box testing on all backends // // Test 0, 1, many TestByGenMode<InputT, OutputT>(0, max_segments); TestByGenMode<InputT, OutputT>(1, max_segments); TestByGenMode<InputT, OutputT>(max_items, max_segments); // Test random problem sizes from a log-distribution [8, max_items-ish) int num_iterations = 8; double max_exp = log(double(max_items)) / log(double(2.0)); for (int i = 0; i < num_iterations; ++i) { OffsetT num_items = (OffsetT) pow(2.0, RandomValue(max_exp - 3.0) + 3.0); TestByGenMode<InputT, OutputT>(num_items, max_segments); } // // White-box testing of single-segment problems around specific sizes // // Tile-boundaries: multiple blocks, one tile per block OffsetT tile_size = ActivePolicyT::ReducePolicy::BLOCK_THREADS * ActivePolicyT::ReducePolicy::ITEMS_PER_THREAD; TestProblem<CUB, InputT, OutputT>(tile_size * 4, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(tile_size * 4 + 1, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(tile_size * 4 - 1, 1, RANDOM, Sum()); // Tile-boundaries: multiple blocks, multiple tiles per block OffsetT sm_occupancy = 32; OffsetT occupancy = tile_size * sm_occupancy * g_sm_count; TestProblem<CUB, InputT, OutputT>(occupancy, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(occupancy + 1, 1, RANDOM, Sum()); TestProblem<CUB, InputT, OutputT>(occupancy - 1, 1, RANDOM, Sum()); return cudaSuccess; } }; /// Test problem type template < typename InputT, typename OutputT, typename OffsetT> void TestType( OffsetT max_items, OffsetT max_segments) { typedef typename DeviceReducePolicy<InputT, OutputT, OffsetT, cub::Sum>::MaxPolicy MaxPolicyT; TestBySize<InputT, OutputT, OffsetT> dispatch(max_items, max_segments); MaxPolicyT::Invoke(g_ptx_version, dispatch); } //--------------------------------------------------------------------- // Main //--------------------------------------------------------------------- /** * Main */ int main(int argc, char** argv) { typedef int OffsetT; OffsetT max_items = 27000000; OffsetT max_segments = 34000; // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); g_verbose_input = args.CheckCmdLineFlag("v2"); args.GetCmdLineArgument("n", max_items); args.GetCmdLineArgument("s", max_segments); args.GetCmdLineArgument("i", g_timing_iterations); args.GetCmdLineArgument("repeat", g_repeat); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--n=<input items> " "[--s=<num segments> " "[--i=<timing iterations> " "[--device=<device-id>] " "[--repeat=<repetitions of entire test suite>]" "[--v] " "[--cdp]" "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); g_device_giga_bandwidth = args.device_giga_bandwidth; // Get ptx version CubDebugExit(PtxVersion(g_ptx_version)); // Get SM count g_sm_count = args.deviceProp.multiProcessorCount; #ifdef QUICKER_TEST // Compile/run basic test TestProblem<CUB, char, int>( max_items, 1, RANDOM_BIT, Sum()); TestProblem<CUB, short, int>( max_items, 1, RANDOM_BIT, Sum()); printf("\n-------------------------------\n"); TestProblem<CUB, int, int>( max_items, 1, RANDOM_BIT, Sum()); TestProblem<CUB, long long, long long>( max_items, 1, RANDOM_BIT, Sum()); printf("\n-------------------------------\n"); TestProblem<CUB, float, float>( max_items, 1, RANDOM_BIT, Sum()); TestProblem<CUB, double, double>( max_items, 1, RANDOM_BIT, Sum()); printf("\n-------------------------------\n"); TestProblem<CUB_SEGMENTED, int, int>(max_items, max_segments, RANDOM_BIT, Sum()); #elif defined(QUICK_TEST) // Compile/run quick comparison tests TestProblem<CUB, char, char>( max_items * 4, 1, UNIFORM, Sum()); TestProblem<THRUST, char, char>( max_items * 4, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, short, short>( max_items * 2, 1, UNIFORM, Sum()); TestProblem<THRUST, short, short>( max_items * 2, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, int, int>( max_items, 1, UNIFORM, Sum()); TestProblem<THRUST, int, int>( max_items, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, long long, long long>( max_items / 2, 1, UNIFORM, Sum()); TestProblem<THRUST, long long, long long>( max_items / 2, 1, UNIFORM, Sum()); printf("\n----------------------------\n"); TestProblem<CUB, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max()); TestProblem<THRUST, TestFoo, TestFoo>( max_items / 4, 1, UNIFORM, Max()); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Test different input types TestType<char, char>(max_items, max_segments); TestType<unsigned char, unsigned char>(max_items, max_segments); TestType<char, int>(max_items, max_segments); TestType<short, short>(max_items, max_segments); TestType<int, int>(max_items, max_segments); TestType<long, long>(max_items, max_segments); TestType<long long, long long>(max_items, max_segments); TestType<uchar2, uchar2>(max_items, max_segments); TestType<uint2, uint2>(max_items, max_segments); TestType<ulonglong2, ulonglong2>(max_items, max_segments); TestType<ulonglong4, ulonglong4>(max_items, max_segments); TestType<TestFoo, TestFoo>(max_items, max_segments); TestType<TestBar, TestBar>(max_items, max_segments); } #endif printf("\n"); return 0; }
813d1a6b5ccde31218eafca79988d8e571486945.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/st_layer.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { template <typename Dtype> __global__ void set_value_to_constant(const int nthreads, Dtype value, int size, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size + i] = value; } } template <typename Dtype> __global__ void copy_values(const int nthreads, int size_src, int k, const Dtype* src, int size_dst, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size_dst + i] = src[index * size_src + k]; } } template <typename Dtype> __global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* U, Dtype* V) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; V[V_offset] = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; const Dtype* pic = U + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "SpatialTransformerLayer::Forward_gpu::\t"; const Dtype* U = bottom[0]->gpu_data(); const Dtype* theta = bottom[1]->gpu_data(); const Dtype* output_grid_data = output_grid.gpu_data(); Dtype* full_theta_data = full_theta.mutable_gpu_data(); Dtype* input_grid_data = input_grid.mutable_gpu_data(); Dtype* V = top[0]->mutable_gpu_data(); caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data); caffe_gpu_set(top[0]->count(), (Dtype)0, V); // compute full_theta int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(is_pre_defined_theta[i]) { hipLaunchKernelGGL(( set_value_to_constant<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, pre_defined_theta[i], 6, i, full_theta_data); //std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i << // "/6 of full_theta_data" << std::endl; } else { hipLaunchKernelGGL(( copy_values<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, 6 - pre_defined_count, k, theta, 6, i, full_theta_data); //std::cout << "Copying " << k << "/" << 6 - pre_defined_count << " of theta to " // << i << "/6 of full_theta_data" << std::endl; ++ k; } } // compute out input_grid_data for(int i = 0; i < N; ++i) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1., output_grid_data, full_theta_data + 6 * i, (Dtype)0., input_grid_data + (output_H_ * output_W_ * 2) * i); } const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V); } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dTheta(const int nthreads, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array, Dtype* dTheta_tmp_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; Dtype delta_dpx = (Dtype)0.; Dtype delta_dpy = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; const int dV_offset = index; const Dtype dV = dV_array[dV_offset]; int m, n; const Dtype* U = U_array + i * (C * H * W) + j * (H * W); // left-bottom neighbor m = floor(x); n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // left-top neighbor m = floor(x); n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // right-bottom neighbor m = floor(x) + 1; n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2; } // right-top neighbor m = floor(x) + 1; n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2; } int idx = j * (output_H_ * output_W_) + s * output_W_ + t; dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx; dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy; } } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dU(const int nthreads, const int C, const int W, const int H, const int output_H_, const int output_W_, const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; Dtype* pic = dU + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { string prefix = "SpatialTransformerLayer::Backward_GPU::\t"; const Dtype* dV = top[0]->gpu_diff(); const Dtype* input_grid_data = input_grid.gpu_data(); const Dtype* U = bottom[0]->gpu_data(); Dtype* dFull_theta = full_theta.mutable_gpu_diff(); Dtype* dTheta = bottom[1]->mutable_gpu_diff(); Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff(); caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff); const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerBackwardGPU_dTheta<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, output_H_, output_W_, H, W, input_grid_data, dV, U, dTheta_tmp_diff); Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data(); caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C, (Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta); /*const Dtype* db_dFull_theta = full_theta.cpu_diff(); for(int i=0; i<full_theta.count(); ++i) { std::cout << db_dFull_theta[i] << " "; } std::cout<<std::endl;*/ int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(!is_pre_defined_theta[i]) { hipLaunchKernelGGL(( copy_values<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, 6, i, dFull_theta, 6 - pre_defined_count, k, dTheta); //std::cout << "Copying " << i << "/6 of dFull_theta to " << k << "/" << // 6 - pre_defined_count << " of dTheta" << std::endl; ++ k; } } /*const Dtype* db_dtheta = bottom[1]->cpu_diff(); for(int i=0; i<bottom[1]->count(); ++i) { std::cout << db_dtheta[i] << " "; } std::cout<<std::endl;*/ if(to_compute_dU_) { Dtype* dU = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU); const int nthreads = N * C * output_H_ * output_W_; hipLaunchKernelGGL(( SpatialTransformerBackwardGPU_dU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3( CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU); } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe
813d1a6b5ccde31218eafca79988d8e571486945.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/util/gpu_util.cuh" #include "caffe/st_layer.hpp" #include "caffe/util/benchmark.hpp" namespace caffe { template <typename Dtype> __global__ void set_value_to_constant(const int nthreads, Dtype value, int size, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size + i] = value; } } template <typename Dtype> __global__ void copy_values(const int nthreads, int size_src, int k, const Dtype* src, int size_dst, int i, Dtype* dst) { CUDA_KERNEL_LOOP(index, nthreads) { dst[index * size_dst + i] = src[index * size_src + k]; } } template <typename Dtype> __global__ void SpatialTransformerForwardGPU(const int nthreads, int N, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* U, Dtype* V) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; V[V_offset] = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; const Dtype* pic = U + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); V[V_offset] += w * pic[m * W + n]; } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); V[V_offset] += w * pic[m * W + n]; } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { string prefix = "SpatialTransformerLayer::Forward_gpu::\t"; const Dtype* U = bottom[0]->gpu_data(); const Dtype* theta = bottom[1]->gpu_data(); const Dtype* output_grid_data = output_grid.gpu_data(); Dtype* full_theta_data = full_theta.mutable_gpu_data(); Dtype* input_grid_data = input_grid.mutable_gpu_data(); Dtype* V = top[0]->mutable_gpu_data(); caffe_gpu_set(input_grid.count(), (Dtype)0, input_grid_data); caffe_gpu_set(top[0]->count(), (Dtype)0, V); // compute full_theta int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(is_pre_defined_theta[i]) { set_value_to_constant<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>( num_threads, pre_defined_theta[i], 6, i, full_theta_data); //std::cout << "Setting value " << pre_defined_theta[i] << " to "<< i << // "/6 of full_theta_data" << std::endl; } else { copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads, 6 - pre_defined_count, k, theta, 6, i, full_theta_data); //std::cout << "Copying " << k << "/" << 6 - pre_defined_count << " of theta to " // << i << "/6 of full_theta_data" << std::endl; ++ k; } } // compute out input_grid_data for(int i = 0; i < N; ++i) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, output_H_ * output_W_, 2, 3, (Dtype)1., output_grid_data, full_theta_data + 6 * i, (Dtype)0., input_grid_data + (output_H_ * output_W_ * 2) * i); } const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, N, C, output_H_, output_W_, H, W, input_grid_data, U, V); } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dTheta(const int nthreads, int C, int output_H_, int output_W_, int H, int W, const Dtype* input_grid_data, const Dtype* dV_array, const Dtype* U_array, Dtype* dTheta_tmp_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; Dtype delta_dpx = (Dtype)0.; Dtype delta_dpy = (Dtype)0.; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; const int dV_offset = index; const Dtype dV = dV_array[dV_offset]; int m, n; const Dtype* U = U_array + i * (C * H * W) + j * (H * W); // left-bottom neighbor m = floor(x); n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // left-top neighbor m = floor(x); n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx -= (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (x - m)) * U[m * W + n] * dV * W / 2; } // right-bottom neighbor m = floor(x) + 1; n = floor(y); if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (y - n)) * U[m * W + n] * dV * H / 2; delta_dpy -= (1 - (m - x)) * U[m * W + n] * dV * W / 2; } // right-top neighbor m = floor(x) + 1; n = floor(y) + 1; if(m >= 0 && m < H && n >= 0 && n < W) { delta_dpx += (1 - (n - y)) * U[m * W + n] * dV * H / 2; delta_dpy += (1 - (m - x)) * U[m * W + n] * dV * W / 2; } int idx = j * (output_H_ * output_W_) + s * output_W_ + t; dTheta_tmp_diff[(6 * i) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 1) * (output_H_ * output_W_ * C) + idx] += delta_dpx * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 2) * (output_H_ * output_W_ * C) + idx] += delta_dpx; dTheta_tmp_diff[(6 * i + 3) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (s * 1.0 / output_H_ * 2 - 1); dTheta_tmp_diff[(6 * i + 4) * (output_H_ * output_W_ * C) + idx] += delta_dpy * (t * 1.0 / output_W_ * 2 - 1); dTheta_tmp_diff[(6 * i + 5) * (output_H_ * output_W_ * C) + idx] += delta_dpy; } } template <typename Dtype> __global__ void SpatialTransformerBackwardGPU_dU(const int nthreads, const int C, const int W, const int H, const int output_H_, const int output_W_, const Dtype* input_grid_data, const Dtype* dV, Dtype* dU) { CUDA_KERNEL_LOOP(index, nthreads) { const int t = index % output_W_; const int s = (index / output_W_) % output_H_; const int j = (index / (output_W_ * output_H_)) % C; const int i = index / (output_W_ * output_H_ * C); const Dtype* coordinates = input_grid_data + (output_H_ * output_W_ * 2) * i; const int row_idx = output_W_ * s + t; const Dtype px = coordinates[row_idx * 2]; const Dtype py = coordinates[row_idx * 2 + 1]; const int V_offset = index; const Dtype x = (px + 1) / 2 * H; const Dtype y = (py + 1) / 2 * W; int m, n; Dtype w; Dtype* pic = dU + i * (C * H * W) + j * (H * W); m = floor(x); n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y); w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (y - n)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x); n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (x - m)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } m = floor(x) + 1; n = floor(y) + 1; w = 0; if(m >= 0 && m < H && n >= 0 && n < W) { w = (1 - (m - x)) * (1 - (n - y)); caffe_gpu_atomic_add(w * dV[V_offset], pic + (m * W + n)); } } } template <typename Dtype> void SpatialTransformerLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { string prefix = "SpatialTransformerLayer::Backward_GPU::\t"; const Dtype* dV = top[0]->gpu_diff(); const Dtype* input_grid_data = input_grid.gpu_data(); const Dtype* U = bottom[0]->gpu_data(); Dtype* dFull_theta = full_theta.mutable_gpu_diff(); Dtype* dTheta = bottom[1]->mutable_gpu_diff(); Dtype* dTheta_tmp_diff = dTheta_tmp.mutable_gpu_diff(); caffe_gpu_set(dTheta_tmp.count(), (Dtype)0., dTheta_tmp_diff); const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerBackwardGPU_dTheta<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, output_H_, output_W_, H, W, input_grid_data, dV, U, dTheta_tmp_diff); Dtype* all_ones_2_data = all_ones_2.mutable_gpu_data(); caffe_gpu_set(all_ones_2.count(), (Dtype)1., all_ones_2_data); caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, full_theta.count(), 1, output_H_ * output_W_ * C, (Dtype)1., dTheta_tmp_diff, all_ones_2_data, (Dtype)0., dFull_theta); /*const Dtype* db_dFull_theta = full_theta.cpu_diff(); for(int i=0; i<full_theta.count(); ++i) { std::cout << db_dFull_theta[i] << " "; } std::cout<<std::endl;*/ int k = 0; const int num_threads = N; for(int i=0; i<6; ++i) { if(!is_pre_defined_theta[i]) { copy_values<Dtype><<<CAFFE_GET_BLOCKS(num_threads), CAFFE_CUDA_NUM_THREADS>>>(num_threads, 6, i, dFull_theta, 6 - pre_defined_count, k, dTheta); //std::cout << "Copying " << i << "/6 of dFull_theta to " << k << "/" << // 6 - pre_defined_count << " of dTheta" << std::endl; ++ k; } } /*const Dtype* db_dtheta = bottom[1]->cpu_diff(); for(int i=0; i<bottom[1]->count(); ++i) { std::cout << db_dtheta[i] << " "; } std::cout<<std::endl;*/ if(to_compute_dU_) { Dtype* dU = bottom[0]->mutable_gpu_diff(); caffe_gpu_set(bottom[0]->count(), (Dtype)0., dU); const int nthreads = N * C * output_H_ * output_W_; SpatialTransformerBackwardGPU_dU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, C, W, H, output_H_, output_W_, input_grid_data, dV, dU); } } INSTANTIATE_LAYER_GPU_FUNCS(SpatialTransformerLayer); } // namespace caffe
106f7d3aaba30830ea4fb795d32f177e3f3ec2a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" __global__ void cuda_VolumetricMaxPooling_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> indices, THCDeviceTensor<float, 4> output, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = blockIdx.z % output.getSize(1); // output frame/time int slice = blockIdx.z / output.getSize(1); // output slice/feature if (oRow < output.getSize(2) && oColumn < output.getSize(3)) { int iColumn = oColumn * dW - padW; int iRow = oRow * dH - padH; int iFrame = oFrame * dT - padT; int maxColumn = 0; int maxRow = 0; int maxFrame = 0; float max = -THInf; for (int frame = 0; frame < kT; ++frame) { if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0) { for (int row = 0; row < kH; ++row) { if (iRow + row < input.getSize(2) && iRow + row >= 0) { for (int column = 0; column < kW; ++column) { if (iColumn + column < input.getSize(3) && iColumn + column >= 0) { float val = input[slice][iFrame + frame][iRow + row][iColumn + column]; if (max < val) { max = val; maxColumn = column; maxRow = row; maxFrame = frame; } } } } } } } output[slice][oFrame][oRow][oColumn] = max; float *idx = &indices[slice][oFrame][oRow][oColumn]; ((unsigned char*)(idx))[0] = maxFrame; ((unsigned char*)(idx))[1] = maxRow; ((unsigned char*)(idx))[2] = maxColumn; ((unsigned char*)(idx))[3] = 0; } } template <int KERNEL_WIDTH> __global__ void cuda_VolumetricMaxPooling_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> indices, THCDeviceTensor<float, 4> output, int kT, int kH, int dT, int dH, int dW, int padT, int padH, int padW) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = blockIdx.z % output.getSize(1); // output frame/time int slice = blockIdx.z / output.getSize(1); // output slice/feature if (oRow < output.getSize(2) && oColumn < output.getSize(3)) { int iColumn = oColumn * dW - padW; int iRow = oRow * dH - padH; int iFrame = oFrame * dT - padT; int maxColumn = 0; int maxRow = 0; int maxFrame; float max = -THInf; for (int frame = 0; frame < kT; ++frame) { if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0) { for (int row = 0; row < kH; ++row) { if (iRow + row < input.getSize(2) && iRow + row >= 0) { for (int column = 0; column < KERNEL_WIDTH; ++column) { if (iColumn + column < input.getSize(3) && iColumn + column >= 0) { float val = input[slice][iFrame + frame][iRow + row][iColumn + column]; if (max < val) { max = val; maxColumn = column; maxRow = row; maxFrame = frame; } } } } } } } output[slice][oFrame][oRow][oColumn] = max; float *idx = &indices[slice][oFrame][oRow][oColumn]; ((unsigned char*)(idx))[0] = maxFrame; ((unsigned char*)(idx))[1] = maxRow; ((unsigned char*)(idx))[2] = maxColumn; ((unsigned char*)(idx))[3] = 0; } } #define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ hipLaunchKernelGGL(( cuda_VolumetricMaxPooling_updateOutput<KW>), dim3(grid), dim3(block), \ 0, THCState_getCurrentStream(state), \ cudaInput, cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW); \ break void THNN_CudaVolumetricMaxPooling_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceilMode) { int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; THCUNN_assertSameGPU(state, 3, input, indices, output); if (THCudaTensor_nDimension(state, input) == 4) { THArgCheck( THCudaTensor_size(state, input, 1) >= kT && THCudaTensor_size(state, input, 2) >= kH && THCudaTensor_size(state, input, 3) >= kW, 2, "input image smaller than kernel size" ); /* sizes */ batchSize = 1; inputSlices = THCudaTensor_size(state, input, 0); inputTime = THCudaTensor_size(state, input, 1); inputHeight = THCudaTensor_size(state, input, 2); inputWidth = THCudaTensor_size(state, input, 3); } else if (THCudaTensor_nDimension(state, input) == 5) { THArgCheck( THCudaTensor_size(state, input, 4) >= kW && THCudaTensor_size(state, input, 3) >= kH && THCudaTensor_size(state, input, 2) >= kT, 2, "input image smaller than kernel size" ); /* sizes */ batchSize = THCudaTensor_size(state, input, 0); inputSlices = THCudaTensor_size(state, input, 1); inputTime = THCudaTensor_size(state, input, 2); inputHeight = THCudaTensor_size(state, input, 3); inputWidth = THCudaTensor_size(state, input, 4); } else { THArgCheck(false, 2, "4D or 5D tensor expected"); } THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size" ); if (ceilMode) { outputTime = (int)(ceil((float)(inputTime - kT + 2*padT) / dT)) + 1; outputHeight = (int)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; outputWidth = (int)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; } else { outputTime = (int)(floor((float)(inputTime - kT + 2*padT) / dT)) + 1; outputHeight = (int)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; outputWidth = (int)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; } if (padT || padW || padH) { if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (input->nDimension == 4) /* 4D */ { /* resize output */ THCudaTensor_resize4d(state, output, inputSlices, outputTime, outputHeight, outputWidth); /* indices pack ti,i,j locations for each output point as uchar into each float of the tensor */ THCudaTensor_resize4d(state, indices, inputSlices, outputTime, outputHeight, outputWidth); } else { /* 5D */ THCudaTensor_resize5d(state, output, batchSize, inputSlices, outputTime, outputHeight, outputWidth); // Index tensor packs index offsets as uchars into floats THCudaTensor_resize5d(state, indices, batchSize, inputSlices, outputTime, outputHeight, outputWidth); } input = THCudaTensor_newContiguous(state, input); // Collapse batch and feature dimensions THCDeviceTensor<float, 4> cudaInput; THCDeviceTensor<float, 4> cudaOutput; if (THCudaTensor_nDimension(state, input) == 4) { cudaInput = toDeviceTensor<float, 4>(state, input); cudaOutput = toDeviceTensor<float, 4>(state, output); } else { cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>(); cudaOutput = toDeviceTensor<float, 5>(state, output).downcastOuter<4>(); } THLongStorage *indicesSize = THLongStorage_newWithSize(4); long indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCudaTensor *indices1 = THCudaTensor_newWithStorage( state, THCudaTensor_storage(state, indices), THCudaTensor_storageOffset(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<float, 4> cudaIndices = toDeviceTensor<float, 4>(state, indices1); dim3 block(32, 8); dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), outputTime * inputSlices * batchSize); switch (kW) { UPDATE_OUTPUT_KERNEL_WIDTH(1); UPDATE_OUTPUT_KERNEL_WIDTH(2); UPDATE_OUTPUT_KERNEL_WIDTH(3); UPDATE_OUTPUT_KERNEL_WIDTH(4); UPDATE_OUTPUT_KERNEL_WIDTH(5); UPDATE_OUTPUT_KERNEL_WIDTH(6); UPDATE_OUTPUT_KERNEL_WIDTH(7); default: hipLaunchKernelGGL(( cuda_VolumetricMaxPooling_updateOutput), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), cudaInput, cudaIndices, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW); } THCudaTensor_free(state, input); THCudaTensor_free(state, indices1); } #undef UPDATE_OUTPUT_KERNEL_WIDTH __global__ void cuda_VolumetricMaxPooling_updateGradInput( THCDeviceTensor<float, 4> gradOutput, THCDeviceTensor<float, 4> indices, THCDeviceTensor<float, 4> gradInput, int dT, int dH, int dW, int padT, int padH, int padW) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = blockIdx.z % gradOutput.getSize(1); // output frame/time int slice = blockIdx.z / gradOutput.getSize(1); // output slice/feature if (oRow < gradOutput.getSize(2) && oColumn < gradOutput.getSize(3)) { float *idx = &indices[slice][oFrame][oRow][oColumn]; int iFrame = ((unsigned char*)(idx))[0] + oFrame * dT - padT; int iRow = ((unsigned char*)(idx))[1] + oRow * dH - padH; int iColumn = ((unsigned char*)(idx))[2] + oColumn * dW - padW; atomicAdd(&gradInput[slice][iFrame][iRow][iColumn], gradOutput[slice][oFrame][oRow][oColumn]); } } void THNN_CudaVolumetricMaxPooling_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int dT, int dW, int dH, int padT, int padW, int padH) { // Resize and initialize result tensor. THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); int batchSize; int inputSlices; int outputTime; int outputHeight; int outputWidth; THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput); if (THCudaTensor_nDimension(state, input) == 4) /* 4D */ { batchSize = 1; inputSlices = THCudaTensor_size(state, input, 0); outputTime = THCudaTensor_size(state, gradOutput, 1); outputHeight = THCudaTensor_size(state, gradOutput, 2); outputWidth = THCudaTensor_size(state, gradOutput, 3); } else { batchSize = THCudaTensor_size(state, input, 0); inputSlices = THCudaTensor_size(state, input, 1); outputTime = THCudaTensor_size(state, gradOutput, 2); outputHeight = THCudaTensor_size(state, gradOutput, 3); outputWidth = THCudaTensor_size(state, gradOutput, 4); } gradOutput = THCudaTensor_newContiguous(state, gradOutput); // Collapse batch and feature dimensions THCDeviceTensor<float, 4> cudaGradInput; THCDeviceTensor<float, 4> cudaGradOutput; if (THCudaTensor_nDimension(state, input) == 4) { cudaGradInput = toDeviceTensor<float, 4>(state, gradInput); cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput); } else { cudaGradInput = toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>(); cudaGradOutput = toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>(); } THLongStorage *indicesSize = THLongStorage_newWithSize(4); long indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCudaTensor *indices1 = THCudaTensor_newWithStorage( state, THCudaTensor_storage(state, indices), THCudaTensor_storageOffset(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<float, 4> cudaIndices = toDeviceTensor<float, 4>(state, indices1); dim3 block(32, 8); dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), outputTime * inputSlices * batchSize); hipLaunchKernelGGL(( cuda_VolumetricMaxPooling_updateGradInput), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), cudaGradOutput, cudaIndices, cudaGradInput, dT, dH, dW, padT, padH, padW); // cleanup THCudaTensor_free(state, gradOutput); THCudaTensor_free(state, indices1); }
106f7d3aaba30830ea4fb795d32f177e3f3ec2a1.cu
#include "THCUNN.h" #include "common.h" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" __global__ void cuda_VolumetricMaxPooling_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> indices, THCDeviceTensor<float, 4> output, int kT, int kH, int kW, int dT, int dH, int dW, int padT, int padH, int padW) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = blockIdx.z % output.getSize(1); // output frame/time int slice = blockIdx.z / output.getSize(1); // output slice/feature if (oRow < output.getSize(2) && oColumn < output.getSize(3)) { int iColumn = oColumn * dW - padW; int iRow = oRow * dH - padH; int iFrame = oFrame * dT - padT; int maxColumn = 0; int maxRow = 0; int maxFrame = 0; float max = -THInf; for (int frame = 0; frame < kT; ++frame) { if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0) { for (int row = 0; row < kH; ++row) { if (iRow + row < input.getSize(2) && iRow + row >= 0) { for (int column = 0; column < kW; ++column) { if (iColumn + column < input.getSize(3) && iColumn + column >= 0) { float val = input[slice][iFrame + frame][iRow + row][iColumn + column]; if (max < val) { max = val; maxColumn = column; maxRow = row; maxFrame = frame; } } } } } } } output[slice][oFrame][oRow][oColumn] = max; float *idx = &indices[slice][oFrame][oRow][oColumn]; ((unsigned char*)(idx))[0] = maxFrame; ((unsigned char*)(idx))[1] = maxRow; ((unsigned char*)(idx))[2] = maxColumn; ((unsigned char*)(idx))[3] = 0; } } template <int KERNEL_WIDTH> __global__ void cuda_VolumetricMaxPooling_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> indices, THCDeviceTensor<float, 4> output, int kT, int kH, int dT, int dH, int dW, int padT, int padH, int padW) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = blockIdx.z % output.getSize(1); // output frame/time int slice = blockIdx.z / output.getSize(1); // output slice/feature if (oRow < output.getSize(2) && oColumn < output.getSize(3)) { int iColumn = oColumn * dW - padW; int iRow = oRow * dH - padH; int iFrame = oFrame * dT - padT; int maxColumn = 0; int maxRow = 0; int maxFrame; float max = -THInf; for (int frame = 0; frame < kT; ++frame) { if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0) { for (int row = 0; row < kH; ++row) { if (iRow + row < input.getSize(2) && iRow + row >= 0) { for (int column = 0; column < KERNEL_WIDTH; ++column) { if (iColumn + column < input.getSize(3) && iColumn + column >= 0) { float val = input[slice][iFrame + frame][iRow + row][iColumn + column]; if (max < val) { max = val; maxColumn = column; maxRow = row; maxFrame = frame; } } } } } } } output[slice][oFrame][oRow][oColumn] = max; float *idx = &indices[slice][oFrame][oRow][oColumn]; ((unsigned char*)(idx))[0] = maxFrame; ((unsigned char*)(idx))[1] = maxRow; ((unsigned char*)(idx))[2] = maxColumn; ((unsigned char*)(idx))[3] = 0; } } #define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \ cuda_VolumetricMaxPooling_updateOutput<KW><<<grid, block, \ 0, THCState_getCurrentStream(state)>>>( \ cudaInput, cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW); \ break void THNN_CudaVolumetricMaxPooling_updateOutput( THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int padT, int padW, int padH, bool ceilMode) { int batchSize; int inputSlices; int inputTime; int inputHeight; int inputWidth; int outputTime; int outputHeight; int outputWidth; THCUNN_assertSameGPU(state, 3, input, indices, output); if (THCudaTensor_nDimension(state, input) == 4) { THArgCheck( THCudaTensor_size(state, input, 1) >= kT && THCudaTensor_size(state, input, 2) >= kH && THCudaTensor_size(state, input, 3) >= kW, 2, "input image smaller than kernel size" ); /* sizes */ batchSize = 1; inputSlices = THCudaTensor_size(state, input, 0); inputTime = THCudaTensor_size(state, input, 1); inputHeight = THCudaTensor_size(state, input, 2); inputWidth = THCudaTensor_size(state, input, 3); } else if (THCudaTensor_nDimension(state, input) == 5) { THArgCheck( THCudaTensor_size(state, input, 4) >= kW && THCudaTensor_size(state, input, 3) >= kH && THCudaTensor_size(state, input, 2) >= kT, 2, "input image smaller than kernel size" ); /* sizes */ batchSize = THCudaTensor_size(state, input, 0); inputSlices = THCudaTensor_size(state, input, 1); inputTime = THCudaTensor_size(state, input, 2); inputHeight = THCudaTensor_size(state, input, 3); inputWidth = THCudaTensor_size(state, input, 4); } else { THArgCheck(false, 2, "4D or 5D tensor expected"); } THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size" ); if (ceilMode) { outputTime = (int)(ceil((float)(inputTime - kT + 2*padT) / dT)) + 1; outputHeight = (int)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1; outputWidth = (int)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1; } else { outputTime = (int)(floor((float)(inputTime - kT + 2*padT) / dT)) + 1; outputHeight = (int)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1; outputWidth = (int)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1; } if (padT || padW || padH) { if ((outputTime - 1)*dT >= inputTime + padT) --outputTime; if ((outputHeight - 1)*dH >= inputHeight + padH) --outputHeight; if ((outputWidth - 1)*dW >= inputWidth + padW) --outputWidth; } if (input->nDimension == 4) /* 4D */ { /* resize output */ THCudaTensor_resize4d(state, output, inputSlices, outputTime, outputHeight, outputWidth); /* indices pack ti,i,j locations for each output point as uchar into each float of the tensor */ THCudaTensor_resize4d(state, indices, inputSlices, outputTime, outputHeight, outputWidth); } else { /* 5D */ THCudaTensor_resize5d(state, output, batchSize, inputSlices, outputTime, outputHeight, outputWidth); // Index tensor packs index offsets as uchars into floats THCudaTensor_resize5d(state, indices, batchSize, inputSlices, outputTime, outputHeight, outputWidth); } input = THCudaTensor_newContiguous(state, input); // Collapse batch and feature dimensions THCDeviceTensor<float, 4> cudaInput; THCDeviceTensor<float, 4> cudaOutput; if (THCudaTensor_nDimension(state, input) == 4) { cudaInput = toDeviceTensor<float, 4>(state, input); cudaOutput = toDeviceTensor<float, 4>(state, output); } else { cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>(); cudaOutput = toDeviceTensor<float, 5>(state, output).downcastOuter<4>(); } THLongStorage *indicesSize = THLongStorage_newWithSize(4); long indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCudaTensor *indices1 = THCudaTensor_newWithStorage( state, THCudaTensor_storage(state, indices), THCudaTensor_storageOffset(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<float, 4> cudaIndices = toDeviceTensor<float, 4>(state, indices1); dim3 block(32, 8); dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), outputTime * inputSlices * batchSize); switch (kW) { UPDATE_OUTPUT_KERNEL_WIDTH(1); UPDATE_OUTPUT_KERNEL_WIDTH(2); UPDATE_OUTPUT_KERNEL_WIDTH(3); UPDATE_OUTPUT_KERNEL_WIDTH(4); UPDATE_OUTPUT_KERNEL_WIDTH(5); UPDATE_OUTPUT_KERNEL_WIDTH(6); UPDATE_OUTPUT_KERNEL_WIDTH(7); default: cuda_VolumetricMaxPooling_updateOutput<<<grid, block, 0, THCState_getCurrentStream(state)>>>( cudaInput, cudaIndices, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW); } THCudaTensor_free(state, input); THCudaTensor_free(state, indices1); } #undef UPDATE_OUTPUT_KERNEL_WIDTH __global__ void cuda_VolumetricMaxPooling_updateGradInput( THCDeviceTensor<float, 4> gradOutput, THCDeviceTensor<float, 4> indices, THCDeviceTensor<float, 4> gradInput, int dT, int dH, int dW, int padT, int padH, int padW) { int oColumn = blockIdx.x * blockDim.x + threadIdx.x; int oRow = blockIdx.y * blockDim.y + threadIdx.y; int oFrame = blockIdx.z % gradOutput.getSize(1); // output frame/time int slice = blockIdx.z / gradOutput.getSize(1); // output slice/feature if (oRow < gradOutput.getSize(2) && oColumn < gradOutput.getSize(3)) { float *idx = &indices[slice][oFrame][oRow][oColumn]; int iFrame = ((unsigned char*)(idx))[0] + oFrame * dT - padT; int iRow = ((unsigned char*)(idx))[1] + oRow * dH - padH; int iColumn = ((unsigned char*)(idx))[2] + oColumn * dW - padW; atomicAdd(&gradInput[slice][iFrame][iRow][iColumn], gradOutput[slice][oFrame][oRow][oColumn]); } } void THNN_CudaVolumetricMaxPooling_updateGradInput( THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *indices, int dT, int dW, int dH, int padT, int padW, int padH) { // Resize and initialize result tensor. THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); int batchSize; int inputSlices; int outputTime; int outputHeight; int outputWidth; THCUNN_assertSameGPU(state, 4, input, indices, gradOutput, gradInput); if (THCudaTensor_nDimension(state, input) == 4) /* 4D */ { batchSize = 1; inputSlices = THCudaTensor_size(state, input, 0); outputTime = THCudaTensor_size(state, gradOutput, 1); outputHeight = THCudaTensor_size(state, gradOutput, 2); outputWidth = THCudaTensor_size(state, gradOutput, 3); } else { batchSize = THCudaTensor_size(state, input, 0); inputSlices = THCudaTensor_size(state, input, 1); outputTime = THCudaTensor_size(state, gradOutput, 2); outputHeight = THCudaTensor_size(state, gradOutput, 3); outputWidth = THCudaTensor_size(state, gradOutput, 4); } gradOutput = THCudaTensor_newContiguous(state, gradOutput); // Collapse batch and feature dimensions THCDeviceTensor<float, 4> cudaGradInput; THCDeviceTensor<float, 4> cudaGradOutput; if (THCudaTensor_nDimension(state, input) == 4) { cudaGradInput = toDeviceTensor<float, 4>(state, gradInput); cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput); } else { cudaGradInput = toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>(); cudaGradOutput = toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>(); } THLongStorage *indicesSize = THLongStorage_newWithSize(4); long indicesSizeRaw[4] = { batchSize * inputSlices, outputTime, outputHeight, outputWidth }; THLongStorage_rawCopy(indicesSize, indicesSizeRaw); THCudaTensor *indices1 = THCudaTensor_newWithStorage( state, THCudaTensor_storage(state, indices), THCudaTensor_storageOffset(state, indices), indicesSize, NULL); THLongStorage_free(indicesSize); THCDeviceTensor<float, 4> cudaIndices = toDeviceTensor<float, 4>(state, indices1); dim3 block(32, 8); dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)), THCCeilDiv(outputHeight, static_cast<int>(block.y)), outputTime * inputSlices * batchSize); cuda_VolumetricMaxPooling_updateGradInput<<<grid, block, 0, THCState_getCurrentStream(state)>>>( cudaGradOutput, cudaIndices, cudaGradInput, dT, dH, dW, padT, padH, padW); // cleanup THCudaTensor_free(state, gradOutput); THCudaTensor_free(state, indices1); }
a1cab043df0cc09718507a9a76245313744232bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <cmath> #include <thrust/device_vector.h> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } #define THREAD_SIZE 512 struct to01 { __host__ __device__ int operator()(char c) {if (c == '\n') return 0; return 1;} }; void CountPosition1(const char *text, int *pos, int text_size) { thrust::device_ptr<const char> d_text(text); thrust::device_ptr<int> d_pos(pos); thrust::fill(d_pos, d_pos + text_size, 1); thrust::device_vector<int> keys(text_size); to01 op; thrust::transform(d_text, d_text+text_size, keys.begin(), op); thrust::inclusive_scan_by_key(thrust::device, keys.begin(), keys.end(), d_pos, d_pos); thrust::transform(d_pos, d_pos+text_size, keys.begin(), d_pos, thrust::multiplies<int>()); } __global__ void Count(const char* text, int* pos, int end) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > end) return; if (text[idx] == '\n') return; if (text[idx-1] == '\n' || idx == 0) { int count = 1; while(1) { pos[idx] = count++; if (text[++idx] == '\n' || idx > end) return; } } } void CountPosition2(const char *text, int *pos, int text_size) { hipLaunchKernelGGL(( Count) , dim3(CeilDiv(text_size, THREAD_SIZE)), dim3(THREAD_SIZE), 0, 0, text, pos, text_size -1); }
a1cab043df0cc09718507a9a76245313744232bc.cu
#include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <cmath> #include <thrust/device_vector.h> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } #define THREAD_SIZE 512 struct to01 { __host__ __device__ int operator()(char c) {if (c == '\n') return 0; return 1;} }; void CountPosition1(const char *text, int *pos, int text_size) { thrust::device_ptr<const char> d_text(text); thrust::device_ptr<int> d_pos(pos); thrust::fill(d_pos, d_pos + text_size, 1); thrust::device_vector<int> keys(text_size); to01 op; thrust::transform(d_text, d_text+text_size, keys.begin(), op); thrust::inclusive_scan_by_key(thrust::device, keys.begin(), keys.end(), d_pos, d_pos); thrust::transform(d_pos, d_pos+text_size, keys.begin(), d_pos, thrust::multiplies<int>()); } __global__ void Count(const char* text, int* pos, int end) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > end) return; if (text[idx] == '\n') return; if (text[idx-1] == '\n' || idx == 0) { int count = 1; while(1) { pos[idx] = count++; if (text[++idx] == '\n' || idx > end) return; } } } void CountPosition2(const char *text, int *pos, int text_size) { Count <<<CeilDiv(text_size, THREAD_SIZE), THREAD_SIZE>>>(text, pos, text_size -1); }
27212bbd83dfb22a39d2a844749ce82ad04e30ab.hip
// !!! This is a file automatically generated by hipify!!! #define inf 1e9 #define eps 1e-6 #include <fstream> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <limits.h> #include <ctime> #include <vector> #include <string> #include <unordered_map> #include <random> #include <cstdint> #include <algorithm> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <thrust/sort.h> #include <stdio.h> #include <cmath> #define DEVICE __host__ __device__ #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(hipError_t result, char const* const func, const char* const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } struct Vec { public: double x, y, z; DEVICE Vec(double x0, double y0, double z0) : x(x0), y(y0), z(z0) {} DEVICE Vec() : x(0), y(0), z(0) {} DEVICE Vec(double d) : x(d), y(d), z(d) {} DEVICE Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); } DEVICE Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); } DEVICE Vec operator*(double b) const { return Vec(x * b, y * b, z * b); } DEVICE Vec operator/(double b) const { return Vec(x / b, y / b, z / b); } DEVICE Vec operator*(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); } DEVICE Vec& norm() { return *this = *this / length(); } DEVICE double length() { return sqrt(x * x + y * y + z * z); } DEVICE double dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } DEVICE Vec operator%(Vec& b) const { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } DEVICE bool operator!=(const Vec& b) const { return (b.x != x || b.y != y || b.z != z); } }; struct Ray { Vec o, d; DEVICE Ray(Vec o0 = 0, Vec d0 = 0) { o = o0, d = d0.norm(); } }; #include <algorithm> struct AABB { DEVICE inline AABB() { min = Vec(inf, inf, inf); max = Vec(-inf, -inf, -inf); } // an empty interval DEVICE inline AABB(Vec min_, Vec max_) { min = min_; max = max_; } DEVICE inline bool unbounded() const { return min.x == -inf || min.y == -inf || min.z == -inf || max.x == inf || max.y == inf || max.z == inf; } DEVICE inline size_t largestDimension() const { double dx = abs(max.x - min.x); double dy = abs(max.y - min.y); double dz = abs(max.z - min.z); if (dx > dy && dx > dz) { return 0; } if (dy > dz) { return 1; } return 2; } // ray-slab tests, see PBRT 2nd edition, section 4.2.1 DEVICE inline bool intersect(const Ray& ray, const Vec& inverseDirection, double closestKnownT) const { bool xDirNegative = ray.d.x < 0; bool yDirNegative = ray.d.y < 0; bool zDirNegative = ray.d.z < 0; // check for ray intersection against x and y slabs float tmin = ((xDirNegative ? max.x : min.x) - ray.o.x) * inverseDirection.x; float tmax = ((xDirNegative ? min.x : max.x) - ray.o.x) * inverseDirection.x; float tymin = ((yDirNegative ? max.y : min.y) - ray.o.y) * inverseDirection.y; float tymax = ((yDirNegative ? min.y : max.y) - ray.o.y) * inverseDirection.y; if (tmin > tymax || tymin > tmax) { return false; } if (tymin > tmin) { tmin = tymin; } if (tymax < tmax) { tmax = tymax; } // check for ray intersection against z slab float tzmin = ((zDirNegative ? max.z : min.z) - ray.o.z) * inverseDirection.z; float tzmax = ((zDirNegative ? min.z : max.z) - ray.o.z) * inverseDirection.z; if (tmin > tzmax || tzmin > tmax) { return false; } if (tzmin > tmin) { tmin = tzmin; } if (tzmax < tmax) { tmax = tzmax; } return (tmin < closestKnownT) && (tmax > eps); } Vec min; Vec max; }; DEVICE double mini(double a, double b) { if (a < b) return a; return b; } DEVICE double maxi(double a, double b) { if (a > b) return a; return b; } DEVICE inline AABB enclose(const AABB& firstBoundingBox, const AABB& secondBoundingBox) { AABB ret; ret.min.x = mini(firstBoundingBox.min.x, secondBoundingBox.min.x); ret.min.y = mini(firstBoundingBox.min.y, secondBoundingBox.min.y); ret.min.z = mini(firstBoundingBox.min.z, secondBoundingBox.min.z); ret.max.x = maxi(firstBoundingBox.max.x, secondBoundingBox.max.x); ret.max.y = maxi(firstBoundingBox.max.y, secondBoundingBox.max.y); ret.max.z = maxi(firstBoundingBox.max.z, secondBoundingBox.max.z); return ret; } DEVICE inline AABB enclose(const AABB& boundingBox, const Vec& point) { AABB ret; ret.min.x = mini(boundingBox.min.x, point.x); ret.min.y = mini(boundingBox.min.y, point.y); ret.min.z = mini(boundingBox.min.z, point.z); ret.max.x = maxi(boundingBox.max.x, point.x); ret.max.y = maxi(boundingBox.max.y, point.y); ret.max.z = maxi(boundingBox.max.z, point.z); return ret; } # define RND2(state) (float)hiprand_uniform(&state) # define RND(state) (2.0*RND2(state))-1.0 #define PI 3.1415926536 int width, height; using namespace std; typedef unordered_map<string, double> pl; // create an orthonormal system, assuming v1 is already normalized DEVICE void ons(const Vec& v1, Vec& v2, Vec& v3) { if (std::abs(v1.x) > std::abs(v1.y)) { // project to the y = 0 plane and construct a normalized orthogonal vector in this plane float invLen = 1.f / sqrtf(v1.x * v1.x + v1.z * v1.z); v2 = Vec(-v1.z * invLen, 0.0f, v1.x * invLen); } else { // project to the x = 0 plane and construct a normalized orthogonal vector in this plane float invLen = 1.0f / sqrtf(v1.y * v1.y + v1.z * v1.z); v2 = Vec(0.0f, v1.z * invLen, -v1.y * invLen); } v3 = v1 % v2; } void render(Vec* pix, int s) { fstream myfile; std::cout << "iter " << s << std::endl; myfile.open("render.ppm", fstream::out); myfile << "P3\n" << width << " " << height << "\n255\n"; for (int col = 0; col < width; col++) { for (int row = 0; row < height; row++) { int pixel_index = row * width + col; myfile << ::min((int)(pix[pixel_index].x / (float)(s + 1)), 255) << " "; myfile << ::min((int)(pix[pixel_index].y / (float)(s + 1)), 255) << " "; myfile << ::min((int)(pix[pixel_index].z / (float)(s + 1)), 255) << std::endl; } } } // Objects have color, emission, type (diffuse, specular, refractive) // All object should be intersectable and should be able to compute their surface normals. class Obj { public: Vec cl; Vec emission; int type; DEVICE void setMat(Vec color_ = Vec(), Vec emission_ = 0, int type_ = 0) { cl = color_; emission = emission_; type = type_; } DEVICE virtual double intersect(const Ray&) const = 0; DEVICE virtual Vec normal(const Vec&) const = 0; DEVICE virtual AABB getAABB() const = 0; }; class Plane : public Obj { public: Vec n; double d; DEVICE Plane(double d_ = 0, Vec n_ = 0) { d = d_; n = n_; } DEVICE double intersect(const Ray& ray) const { double d0 = n.dot(ray.d); if (d0 != 0) { double t = -1 * (((n.dot(ray.o)) + d) / d0); return (t > eps) ? t : 0; } else return 0; } DEVICE Vec normal(const Vec& p0) const { return n; } DEVICE AABB getAABB() const { if (n.x == 0 && n.y == 0) return AABB(Vec(-inf, -inf, d * n.z), Vec(inf, inf, d * n.z)); if (n.x == 0 && n.z == 0) return AABB(Vec(-inf, d * n.y, -inf), Vec(inf, d * n.y, inf)); if (n.y == 0 && n.z == 0) return AABB(Vec(d * n.x, -inf, -inf), Vec(d * n.x, inf, inf)); return AABB(Vec(-inf, -inf, -inf), Vec(inf, inf, inf)); } }; class Sphere : public Obj { public: Vec c; double r; DEVICE Sphere(double r_ = 0, Vec c_ = 0) { c = c_; r = r_; } DEVICE double intersect(const Ray& ray) const { double b = ((ray.o - c) * 2).dot(ray.d); double c_ = (ray.o - c).dot((ray.o - c)) - (r * r); double disc = b * b - 4 * c_; if (disc < 0) return 0; else disc = sqrt(disc); double sol1 = -b + disc; double sol2 = -b - disc; return (sol2 > eps) ? sol2 / 2 : ((sol1 > eps) ? sol1 / 2 : 0); } DEVICE Vec normal(const Vec& p0) const { return (p0 - c).norm(); } DEVICE AABB getAABB() const { return AABB(Vec(c.x - r, c.y - r, c.z - r), Vec(c.x + r, c.y + r, c.z + r)); } }; class Intersection { public: DEVICE Intersection() { t = inf; object = nullptr; } DEVICE Intersection(double t_, Obj* object_) { t = t_; object = object_; } DEVICE operator bool() { return object != nullptr; } double t; Obj* object; }; class Scene { Obj** objects; int list_size; public: DEVICE Scene() {} DEVICE Scene(Obj** objs, int size) { objects = objs; list_size = size; } DEVICE Intersection intersect(const Ray& ray) const { Intersection closestIntersection; // intersect all objects, one after the other for (int i = 0; i < list_size; i++) { Intersection inter = Intersection(objects[i]->intersect(ray), objects[i]); if (inter.t > eps && inter.t < closestIntersection.t) { closestIntersection = inter; } } return closestIntersection; } }; class Data { public: int type; Vec clr; double cost; Vec emission; DEVICE Data(int t = 0, Vec cl = Vec(0), double co = 0, Vec em = Vec(0)) { type = t; clr = cl; cost = co, emission = em; } }; // Class for generating the Halton low-discrepancy series for Quasi // Monte Carlo integration. class Halton { double value, inv_base; public: DEVICE void number(int i, int base) { double f = inv_base = 1.0 / base; value = 0.0; while (i > 0) { value += f * (double)(i % base); i /= base; f *= inv_base; } } DEVICE void next() { double r = 1.0 - value - 0.0000001; if (inv_base < r) value += inv_base; else { double h = inv_base, hh; do { hh = h; h *= inv_base; } while (h >= r); value += hh + h - 1.0; } } DEVICE double get() { return value; } }; // Input is the pixel offset, output is the appropriate coordinate // on the image plane DEVICE Vec camcr(const double x, const double y, int width, int height) { double w = width; double h = height; float fovx = PI / 4; float fovy = (h / w) * fovx; return Vec(((2 * x - w) / w) * tan(fovx), -((2 * y - h) / h) * tan(fovy), -1.0); } // Uniform sampling on a hemisphere to produce outgoing ray directions. // courtesy of http://www.rorydriscoll.com/2009/01/07/better-sampling/ DEVICE Vec hemisphere(double u1, double u2) { const double r = sqrt(1.0 - u1 * u1); const double phi = 2 * PI * u2; return Vec(cos(phi) * r, sin(phi) * r, u1); } __device__ void trace(Data* clrlist, Ray& ray, Scene** scene, Vec& clr, float& refr_ind, const int bounce_max, hiprandState_t localState, int penetration_index) { // Russian roulette: starting at depth 5, each recursive step will stop with a probability of 0.1 Data dt; Vec tmp; Intersection intersection; Vec hp; Vec N; Vec rotX, rotY; Vec sampledDir; Vec rotatedDir; int iter = 0; double cost; double rrFactor = 1.0; double n; double R0; double cost1; double cost2; double Rprob; const double rrStopProbability = 0.1; for (int depth = 0; depth < bounce_max; depth++) { if (depth >= 5) { if ((RND2(localState)) <= rrStopProbability) { break; } rrFactor = 1.0 / (1.0 - rrStopProbability); } intersection = (*scene)->intersect(ray); if (!intersection) break; // Travel the ray to the hit point where the closest object lies and compute the surface normal there. hp = ray.o + ray.d * intersection.t; N = intersection.object->normal(hp); ray.o = hp; // Add the emission, the L_e(x,w) part of the rendering equation, but scale it with the Russian Roulette // probability weight. const Vec emission = intersection.object->emission; tmp = emission * rrFactor; // Diffuse BRDF - choose an outgoing direction with hemisphere sampling. if (intersection.object->type == 1) { ons(N, rotX, rotY); sampledDir = hemisphere((RND2(localState)), (RND2(localState))); rotatedDir.x = Vec(rotX.x, rotY.x, N.x).dot(sampledDir); rotatedDir.y = Vec(rotX.y, rotY.y, N.y).dot(sampledDir); rotatedDir.z = Vec(rotX.z, rotY.z, N.z).dot(sampledDir); ray.d = rotatedDir; // already normalized cost = ray.d.dot(N); dt = Data(1, intersection.object->cl, cost, tmp); } // Specular BRDF - this is a singularity in the rendering equation that follows // delta distribution, therefore we handle this case explicitly - one incoming // direction -> one outgoing direction, that is, the perfect reflection direction. if (intersection.object->type == 2) { cost = ray.d.dot(N); ray.d = (ray.d - N * (cost * 2)).norm(); dt = Data(2, intersection.object->cl, cost, tmp); } // Glass/refractive BRDF - we use the vector version of Snell's law and Fresnel's law // to compute the outgoing reflection and refraction directions and probability weights. if (intersection.object->type == 3) { n = refr_ind; R0 = (1.0 - n) / (1.0 + n); R0 = R0 * R0; if (N.dot(ray.d) > 0) { // we're inside the medium N = N * -1; n = 1 / n; } n = 1 / n; cost1 = (N.dot(ray.d)) * -1; // cosine of theta_1 cost2 = 1.0 - n * n * (1.0 - cost1 * cost1); // cosine of theta_2 Rprob = R0 + (1.0 - R0) * pow(1.0 - cost1, 5.0); // Schlick-approximation if (cost2 > 0 && (RND2(localState)) > Rprob) { // refraction direction ray.d = ((ray.d * n) + (N * (n * cost1 - sqrt(cost2)))).norm(); } else { // reflection direction ray.d = (ray.d + N * (cost1 * 2)).norm(); } dt = Data(3, intersection.object->cl, cost1, tmp); } clrlist[bounce_max - depth - 1] = dt; iter++; } for (int i = bounce_max - iter; i < bounce_max; i++) { if (clrlist[i].type == 1) { clr = clrlist[i].emission + (clr * clrlist[i].clr) * clrlist[i].cost * 0.1 * rrFactor; } if (clrlist[i].type == 2) { clr = clrlist[i].emission + clr * rrFactor; } if (clrlist[i].type == 3) { if (i == bounce_max - 1 || (i - 2 >= 0 && clrlist[i - 2].emission != Vec(0))) clr = clrlist[i].emission + (clr * clrlist[i].clr) * 1.15 * rrFactor; else clr = clrlist[i].emission + clr * 1.15 * rrFactor; } } } __global__ void calc_render(int spt, int bounce_max, Data* clrlist, float refr_ind, int spp, Scene** scene, Vec* pix, hiprandState_t* rand_state, int actual, int width, int height, int penetration_index) { int row = threadIdx.x + blockIdx.x * blockDim.x; int col = threadIdx.y + blockIdx.y * blockDim.y; if ((col >= width) || (row >= height)) return; int pixel_index = col * width + row; hiprand_init((double)((actual + 1) * pixel_index), pixel_index, 0, &rand_state[pixel_index]); for (int s = 0; s < spt; s++) { //hiprandState_t localState = rand_state[pixel_index]; Vec clr = Vec(0, 0, 0); Ray ray; ray.o = (Vec(0, 0, 0)); // rays start out from here Vec cam = camcr(col, row, width, height); // construct image plane coordinates cam.x = cam.x + (RND(rand_state[pixel_index])) / 700; // anti-aliasing for free cam.y = cam.y + (RND(rand_state[pixel_index])) / 700; ray.d = (cam - ray.o).norm(); // point from the origin to the camera plane trace(&(clrlist[pixel_index * bounce_max]), ray, scene, clr, refr_ind, bounce_max, rand_state[pixel_index], penetration_index); pix[pixel_index] = pix[pixel_index] + clr; } } __global__ void create_world(Obj** d_list, int size, Scene** d_scene) { d_list[0] = new Sphere(1.05, Vec(-0.75, -1.45, -4.4)); d_list[0]->setMat(Vec(4, 8, 4), Vec(0), 2); d_list[1] = new Sphere(0.5, Vec(1.5, -1.8, -3.7)); d_list[1]->setMat(Vec(1, 1, 1), Vec(0), 3); d_list[2] = new Sphere(0.6, Vec(-1.75, -1.95, -3.1)); d_list[2]->setMat(Vec(4, 4, 12), Vec(0), 1); d_list[3] = new Plane(2.5, Vec(0, 1, 0)); d_list[3]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[4] = new Plane(5.5, Vec(0, 0, 1)); d_list[4]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[5] = new Plane(2.75, Vec(1, 0, 0)); d_list[5]->setMat(Vec(10, 2, 2), Vec(0), 1); d_list[6] = new Plane(2.75, Vec(-1, 0, 0)); d_list[6]->setMat(Vec(2, 10, 2), Vec(0), 1); d_list[7] = new Plane(3.0, Vec(0, -1, 0)); d_list[7]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[8] = new Plane(0.5, Vec(0, 0, -1)); d_list[8]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[9] = new Sphere(0.5, Vec(0, 1.9, -3)); d_list[9]->setMat(Vec(2, 2, 10), Vec(254 * 30, 248 * 30, 221 * 30), 1); *d_scene = new Scene(d_list, size); } void render(int id, int size, int spp, double refr_index, int spt) { srand(time(NULL)); int tx = 16; int ty = 16; int bounce_max = 7; int obj_num = 10; int penetration_index = 1; Obj** list; hipMalloc((void**)&list, obj_num * sizeof(Obj*)); Scene** scene; hipMalloc((void**)&scene, sizeof(Scene*)); hipLaunchKernelGGL(( create_world), dim3(1), dim3(1), 0, 0, list, obj_num, scene); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); width = size; height = size; dim3 blocks(width / tx + 1, height / ty + 1); dim3 threads(tx, ty); Vec* d_pix; hipMalloc((void**)&d_pix, width * height * sizeof(Vec)); Vec* h_pix = (Vec*)malloc(width * height * sizeof(Vec)); hiprandState_t* d_rand_state; hipMalloc((void**)&d_rand_state, width * height * sizeof(hiprandState_t)); Data* clrlist; hipMalloc((void**)&clrlist, sizeof(Data) * bounce_max * width * height); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); // correlated Halton-sequence dimensions Halton hal, hal2; hal.number(0, 2); hal2.number(0, 2); bool running = true; for (int s = 0; s < spp; s += spt) { if (running) { hipLaunchKernelGGL(( calc_render), dim3(blocks), dim3(threads), 0, 0, spt, bounce_max, clrlist, refr_index, spp, scene, d_pix, d_rand_state, s, width, height, penetration_index); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); } if (!running) return; hipMemcpy(h_pix, d_pix, width * height * sizeof(Vec), hipMemcpyDeviceToHost); render(h_pix, s + spt); } hipFree(d_rand_state); } int main() { render(1, 500, 20000, 1.5, 500); }
27212bbd83dfb22a39d2a844749ce82ad04e30ab.cu
#define inf 1e9 #define eps 1e-6 #include <fstream> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <limits.h> #include <ctime> #include <vector> #include <string> #include <unordered_map> #include <random> #include <cstdint> #include <algorithm> #include <curand_kernel.h> #include <curand.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust/sort.h> #include <stdio.h> #include <cmath> #define DEVICE __host__ __device__ #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(cudaError_t result, char const* const func, const char* const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } struct Vec { public: double x, y, z; DEVICE Vec(double x0, double y0, double z0) : x(x0), y(y0), z(z0) {} DEVICE Vec() : x(0), y(0), z(0) {} DEVICE Vec(double d) : x(d), y(d), z(d) {} DEVICE Vec operator+(const Vec& b) const { return Vec(x + b.x, y + b.y, z + b.z); } DEVICE Vec operator-(const Vec& b) const { return Vec(x - b.x, y - b.y, z - b.z); } DEVICE Vec operator*(double b) const { return Vec(x * b, y * b, z * b); } DEVICE Vec operator/(double b) const { return Vec(x / b, y / b, z / b); } DEVICE Vec operator*(const Vec& b) const { return Vec(x * b.x, y * b.y, z * b.z); } DEVICE Vec& norm() { return *this = *this / length(); } DEVICE double length() { return sqrt(x * x + y * y + z * z); } DEVICE double dot(const Vec& b) const { return x * b.x + y * b.y + z * b.z; } DEVICE Vec operator%(Vec& b) const { return Vec(y * b.z - z * b.y, z * b.x - x * b.z, x * b.y - y * b.x); } DEVICE bool operator!=(const Vec& b) const { return (b.x != x || b.y != y || b.z != z); } }; struct Ray { Vec o, d; DEVICE Ray(Vec o0 = 0, Vec d0 = 0) { o = o0, d = d0.norm(); } }; #include <algorithm> struct AABB { DEVICE inline AABB() { min = Vec(inf, inf, inf); max = Vec(-inf, -inf, -inf); } // an empty interval DEVICE inline AABB(Vec min_, Vec max_) { min = min_; max = max_; } DEVICE inline bool unbounded() const { return min.x == -inf || min.y == -inf || min.z == -inf || max.x == inf || max.y == inf || max.z == inf; } DEVICE inline size_t largestDimension() const { double dx = abs(max.x - min.x); double dy = abs(max.y - min.y); double dz = abs(max.z - min.z); if (dx > dy && dx > dz) { return 0; } if (dy > dz) { return 1; } return 2; } // ray-slab tests, see PBRT 2nd edition, section 4.2.1 DEVICE inline bool intersect(const Ray& ray, const Vec& inverseDirection, double closestKnownT) const { bool xDirNegative = ray.d.x < 0; bool yDirNegative = ray.d.y < 0; bool zDirNegative = ray.d.z < 0; // check for ray intersection against x and y slabs float tmin = ((xDirNegative ? max.x : min.x) - ray.o.x) * inverseDirection.x; float tmax = ((xDirNegative ? min.x : max.x) - ray.o.x) * inverseDirection.x; float tymin = ((yDirNegative ? max.y : min.y) - ray.o.y) * inverseDirection.y; float tymax = ((yDirNegative ? min.y : max.y) - ray.o.y) * inverseDirection.y; if (tmin > tymax || tymin > tmax) { return false; } if (tymin > tmin) { tmin = tymin; } if (tymax < tmax) { tmax = tymax; } // check for ray intersection against z slab float tzmin = ((zDirNegative ? max.z : min.z) - ray.o.z) * inverseDirection.z; float tzmax = ((zDirNegative ? min.z : max.z) - ray.o.z) * inverseDirection.z; if (tmin > tzmax || tzmin > tmax) { return false; } if (tzmin > tmin) { tmin = tzmin; } if (tzmax < tmax) { tmax = tzmax; } return (tmin < closestKnownT) && (tmax > eps); } Vec min; Vec max; }; DEVICE double mini(double a, double b) { if (a < b) return a; return b; } DEVICE double maxi(double a, double b) { if (a > b) return a; return b; } DEVICE inline AABB enclose(const AABB& firstBoundingBox, const AABB& secondBoundingBox) { AABB ret; ret.min.x = mini(firstBoundingBox.min.x, secondBoundingBox.min.x); ret.min.y = mini(firstBoundingBox.min.y, secondBoundingBox.min.y); ret.min.z = mini(firstBoundingBox.min.z, secondBoundingBox.min.z); ret.max.x = maxi(firstBoundingBox.max.x, secondBoundingBox.max.x); ret.max.y = maxi(firstBoundingBox.max.y, secondBoundingBox.max.y); ret.max.z = maxi(firstBoundingBox.max.z, secondBoundingBox.max.z); return ret; } DEVICE inline AABB enclose(const AABB& boundingBox, const Vec& point) { AABB ret; ret.min.x = mini(boundingBox.min.x, point.x); ret.min.y = mini(boundingBox.min.y, point.y); ret.min.z = mini(boundingBox.min.z, point.z); ret.max.x = maxi(boundingBox.max.x, point.x); ret.max.y = maxi(boundingBox.max.y, point.y); ret.max.z = maxi(boundingBox.max.z, point.z); return ret; } # define RND2(state) (float)curand_uniform(&state) # define RND(state) (2.0*RND2(state))-1.0 #define PI 3.1415926536 int width, height; using namespace std; typedef unordered_map<string, double> pl; // create an orthonormal system, assuming v1 is already normalized DEVICE void ons(const Vec& v1, Vec& v2, Vec& v3) { if (std::abs(v1.x) > std::abs(v1.y)) { // project to the y = 0 plane and construct a normalized orthogonal vector in this plane float invLen = 1.f / sqrtf(v1.x * v1.x + v1.z * v1.z); v2 = Vec(-v1.z * invLen, 0.0f, v1.x * invLen); } else { // project to the x = 0 plane and construct a normalized orthogonal vector in this plane float invLen = 1.0f / sqrtf(v1.y * v1.y + v1.z * v1.z); v2 = Vec(0.0f, v1.z * invLen, -v1.y * invLen); } v3 = v1 % v2; } void render(Vec* pix, int s) { fstream myfile; std::cout << "iter " << s << std::endl; myfile.open("render.ppm", fstream::out); myfile << "P3\n" << width << " " << height << "\n255\n"; for (int col = 0; col < width; col++) { for (int row = 0; row < height; row++) { int pixel_index = row * width + col; myfile << std::min((int)(pix[pixel_index].x / (float)(s + 1)), 255) << " "; myfile << std::min((int)(pix[pixel_index].y / (float)(s + 1)), 255) << " "; myfile << std::min((int)(pix[pixel_index].z / (float)(s + 1)), 255) << std::endl; } } } // Objects have color, emission, type (diffuse, specular, refractive) // All object should be intersectable and should be able to compute their surface normals. class Obj { public: Vec cl; Vec emission; int type; DEVICE void setMat(Vec color_ = Vec(), Vec emission_ = 0, int type_ = 0) { cl = color_; emission = emission_; type = type_; } DEVICE virtual double intersect(const Ray&) const = 0; DEVICE virtual Vec normal(const Vec&) const = 0; DEVICE virtual AABB getAABB() const = 0; }; class Plane : public Obj { public: Vec n; double d; DEVICE Plane(double d_ = 0, Vec n_ = 0) { d = d_; n = n_; } DEVICE double intersect(const Ray& ray) const { double d0 = n.dot(ray.d); if (d0 != 0) { double t = -1 * (((n.dot(ray.o)) + d) / d0); return (t > eps) ? t : 0; } else return 0; } DEVICE Vec normal(const Vec& p0) const { return n; } DEVICE AABB getAABB() const { if (n.x == 0 && n.y == 0) return AABB(Vec(-inf, -inf, d * n.z), Vec(inf, inf, d * n.z)); if (n.x == 0 && n.z == 0) return AABB(Vec(-inf, d * n.y, -inf), Vec(inf, d * n.y, inf)); if (n.y == 0 && n.z == 0) return AABB(Vec(d * n.x, -inf, -inf), Vec(d * n.x, inf, inf)); return AABB(Vec(-inf, -inf, -inf), Vec(inf, inf, inf)); } }; class Sphere : public Obj { public: Vec c; double r; DEVICE Sphere(double r_ = 0, Vec c_ = 0) { c = c_; r = r_; } DEVICE double intersect(const Ray& ray) const { double b = ((ray.o - c) * 2).dot(ray.d); double c_ = (ray.o - c).dot((ray.o - c)) - (r * r); double disc = b * b - 4 * c_; if (disc < 0) return 0; else disc = sqrt(disc); double sol1 = -b + disc; double sol2 = -b - disc; return (sol2 > eps) ? sol2 / 2 : ((sol1 > eps) ? sol1 / 2 : 0); } DEVICE Vec normal(const Vec& p0) const { return (p0 - c).norm(); } DEVICE AABB getAABB() const { return AABB(Vec(c.x - r, c.y - r, c.z - r), Vec(c.x + r, c.y + r, c.z + r)); } }; class Intersection { public: DEVICE Intersection() { t = inf; object = nullptr; } DEVICE Intersection(double t_, Obj* object_) { t = t_; object = object_; } DEVICE operator bool() { return object != nullptr; } double t; Obj* object; }; class Scene { Obj** objects; int list_size; public: DEVICE Scene() {} DEVICE Scene(Obj** objs, int size) { objects = objs; list_size = size; } DEVICE Intersection intersect(const Ray& ray) const { Intersection closestIntersection; // intersect all objects, one after the other for (int i = 0; i < list_size; i++) { Intersection inter = Intersection(objects[i]->intersect(ray), objects[i]); if (inter.t > eps && inter.t < closestIntersection.t) { closestIntersection = inter; } } return closestIntersection; } }; class Data { public: int type; Vec clr; double cost; Vec emission; DEVICE Data(int t = 0, Vec cl = Vec(0), double co = 0, Vec em = Vec(0)) { type = t; clr = cl; cost = co, emission = em; } }; // Class for generating the Halton low-discrepancy series for Quasi // Monte Carlo integration. class Halton { double value, inv_base; public: DEVICE void number(int i, int base) { double f = inv_base = 1.0 / base; value = 0.0; while (i > 0) { value += f * (double)(i % base); i /= base; f *= inv_base; } } DEVICE void next() { double r = 1.0 - value - 0.0000001; if (inv_base < r) value += inv_base; else { double h = inv_base, hh; do { hh = h; h *= inv_base; } while (h >= r); value += hh + h - 1.0; } } DEVICE double get() { return value; } }; // Input is the pixel offset, output is the appropriate coordinate // on the image plane DEVICE Vec camcr(const double x, const double y, int width, int height) { double w = width; double h = height; float fovx = PI / 4; float fovy = (h / w) * fovx; return Vec(((2 * x - w) / w) * tan(fovx), -((2 * y - h) / h) * tan(fovy), -1.0); } // Uniform sampling on a hemisphere to produce outgoing ray directions. // courtesy of http://www.rorydriscoll.com/2009/01/07/better-sampling/ DEVICE Vec hemisphere(double u1, double u2) { const double r = sqrt(1.0 - u1 * u1); const double phi = 2 * PI * u2; return Vec(cos(phi) * r, sin(phi) * r, u1); } __device__ void trace(Data* clrlist, Ray& ray, Scene** scene, Vec& clr, float& refr_ind, const int bounce_max, curandState localState, int penetration_index) { // Russian roulette: starting at depth 5, each recursive step will stop with a probability of 0.1 Data dt; Vec tmp; Intersection intersection; Vec hp; Vec N; Vec rotX, rotY; Vec sampledDir; Vec rotatedDir; int iter = 0; double cost; double rrFactor = 1.0; double n; double R0; double cost1; double cost2; double Rprob; const double rrStopProbability = 0.1; for (int depth = 0; depth < bounce_max; depth++) { if (depth >= 5) { if ((RND2(localState)) <= rrStopProbability) { break; } rrFactor = 1.0 / (1.0 - rrStopProbability); } intersection = (*scene)->intersect(ray); if (!intersection) break; // Travel the ray to the hit point where the closest object lies and compute the surface normal there. hp = ray.o + ray.d * intersection.t; N = intersection.object->normal(hp); ray.o = hp; // Add the emission, the L_e(x,w) part of the rendering equation, but scale it with the Russian Roulette // probability weight. const Vec emission = intersection.object->emission; tmp = emission * rrFactor; // Diffuse BRDF - choose an outgoing direction with hemisphere sampling. if (intersection.object->type == 1) { ons(N, rotX, rotY); sampledDir = hemisphere((RND2(localState)), (RND2(localState))); rotatedDir.x = Vec(rotX.x, rotY.x, N.x).dot(sampledDir); rotatedDir.y = Vec(rotX.y, rotY.y, N.y).dot(sampledDir); rotatedDir.z = Vec(rotX.z, rotY.z, N.z).dot(sampledDir); ray.d = rotatedDir; // already normalized cost = ray.d.dot(N); dt = Data(1, intersection.object->cl, cost, tmp); } // Specular BRDF - this is a singularity in the rendering equation that follows // delta distribution, therefore we handle this case explicitly - one incoming // direction -> one outgoing direction, that is, the perfect reflection direction. if (intersection.object->type == 2) { cost = ray.d.dot(N); ray.d = (ray.d - N * (cost * 2)).norm(); dt = Data(2, intersection.object->cl, cost, tmp); } // Glass/refractive BRDF - we use the vector version of Snell's law and Fresnel's law // to compute the outgoing reflection and refraction directions and probability weights. if (intersection.object->type == 3) { n = refr_ind; R0 = (1.0 - n) / (1.0 + n); R0 = R0 * R0; if (N.dot(ray.d) > 0) { // we're inside the medium N = N * -1; n = 1 / n; } n = 1 / n; cost1 = (N.dot(ray.d)) * -1; // cosine of theta_1 cost2 = 1.0 - n * n * (1.0 - cost1 * cost1); // cosine of theta_2 Rprob = R0 + (1.0 - R0) * pow(1.0 - cost1, 5.0); // Schlick-approximation if (cost2 > 0 && (RND2(localState)) > Rprob) { // refraction direction ray.d = ((ray.d * n) + (N * (n * cost1 - sqrt(cost2)))).norm(); } else { // reflection direction ray.d = (ray.d + N * (cost1 * 2)).norm(); } dt = Data(3, intersection.object->cl, cost1, tmp); } clrlist[bounce_max - depth - 1] = dt; iter++; } for (int i = bounce_max - iter; i < bounce_max; i++) { if (clrlist[i].type == 1) { clr = clrlist[i].emission + (clr * clrlist[i].clr) * clrlist[i].cost * 0.1 * rrFactor; } if (clrlist[i].type == 2) { clr = clrlist[i].emission + clr * rrFactor; } if (clrlist[i].type == 3) { if (i == bounce_max - 1 || (i - 2 >= 0 && clrlist[i - 2].emission != Vec(0))) clr = clrlist[i].emission + (clr * clrlist[i].clr) * 1.15 * rrFactor; else clr = clrlist[i].emission + clr * 1.15 * rrFactor; } } } __global__ void calc_render(int spt, int bounce_max, Data* clrlist, float refr_ind, int spp, Scene** scene, Vec* pix, curandState* rand_state, int actual, int width, int height, int penetration_index) { int row = threadIdx.x + blockIdx.x * blockDim.x; int col = threadIdx.y + blockIdx.y * blockDim.y; if ((col >= width) || (row >= height)) return; int pixel_index = col * width + row; curand_init((double)((actual + 1) * pixel_index), pixel_index, 0, &rand_state[pixel_index]); for (int s = 0; s < spt; s++) { //curandState localState = rand_state[pixel_index]; Vec clr = Vec(0, 0, 0); Ray ray; ray.o = (Vec(0, 0, 0)); // rays start out from here Vec cam = camcr(col, row, width, height); // construct image plane coordinates cam.x = cam.x + (RND(rand_state[pixel_index])) / 700; // anti-aliasing for free cam.y = cam.y + (RND(rand_state[pixel_index])) / 700; ray.d = (cam - ray.o).norm(); // point from the origin to the camera plane trace(&(clrlist[pixel_index * bounce_max]), ray, scene, clr, refr_ind, bounce_max, rand_state[pixel_index], penetration_index); pix[pixel_index] = pix[pixel_index] + clr; } } __global__ void create_world(Obj** d_list, int size, Scene** d_scene) { d_list[0] = new Sphere(1.05, Vec(-0.75, -1.45, -4.4)); d_list[0]->setMat(Vec(4, 8, 4), Vec(0), 2); d_list[1] = new Sphere(0.5, Vec(1.5, -1.8, -3.7)); d_list[1]->setMat(Vec(1, 1, 1), Vec(0), 3); d_list[2] = new Sphere(0.6, Vec(-1.75, -1.95, -3.1)); d_list[2]->setMat(Vec(4, 4, 12), Vec(0), 1); d_list[3] = new Plane(2.5, Vec(0, 1, 0)); d_list[3]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[4] = new Plane(5.5, Vec(0, 0, 1)); d_list[4]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[5] = new Plane(2.75, Vec(1, 0, 0)); d_list[5]->setMat(Vec(10, 2, 2), Vec(0), 1); d_list[6] = new Plane(2.75, Vec(-1, 0, 0)); d_list[6]->setMat(Vec(2, 10, 2), Vec(0), 1); d_list[7] = new Plane(3.0, Vec(0, -1, 0)); d_list[7]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[8] = new Plane(0.5, Vec(0, 0, -1)); d_list[8]->setMat(Vec(6, 6, 6), Vec(0), 1); d_list[9] = new Sphere(0.5, Vec(0, 1.9, -3)); d_list[9]->setMat(Vec(2, 2, 10), Vec(254 * 30, 248 * 30, 221 * 30), 1); *d_scene = new Scene(d_list, size); } void render(int id, int size, int spp, double refr_index, int spt) { srand(time(NULL)); int tx = 16; int ty = 16; int bounce_max = 7; int obj_num = 10; int penetration_index = 1; Obj** list; cudaMalloc((void**)&list, obj_num * sizeof(Obj*)); Scene** scene; cudaMalloc((void**)&scene, sizeof(Scene*)); create_world<<<1, 1>>>(list, obj_num, scene); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); width = size; height = size; dim3 blocks(width / tx + 1, height / ty + 1); dim3 threads(tx, ty); Vec* d_pix; cudaMalloc((void**)&d_pix, width * height * sizeof(Vec)); Vec* h_pix = (Vec*)malloc(width * height * sizeof(Vec)); curandState* d_rand_state; cudaMalloc((void**)&d_rand_state, width * height * sizeof(curandState)); Data* clrlist; cudaMalloc((void**)&clrlist, sizeof(Data) * bounce_max * width * height); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); // correlated Halton-sequence dimensions Halton hal, hal2; hal.number(0, 2); hal2.number(0, 2); bool running = true; for (int s = 0; s < spp; s += spt) { if (running) { calc_render<<<blocks, threads>>>(spt, bounce_max, clrlist, refr_index, spp, scene, d_pix, d_rand_state, s, width, height, penetration_index); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); } if (!running) return; cudaMemcpy(h_pix, d_pix, width * height * sizeof(Vec), cudaMemcpyDeviceToHost); render(h_pix, s + spt); } cudaFree(d_rand_state); } int main() { render(1, 500, 20000, 1.5, 500); }
c31f6075b46a304f467df1df98832d0ae413ad86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------------------------------------------------------------- /** * @file CudaMarch.cuh * @author Daniel Princ * @date 2012/12/13 * @brief Prepared file for marching cubes on GPU, not implemented * * This file is preparation for marching cubes in cuda, * however it is not implemented. * */ //---------------------------------------------------------------------------------------- #include "cudaCommon.cuh" #include "GPUComputation.cuh" Voxel * device_data_pointer = 0; __device__ unsigned long getThreadID() { return NULL; } __global__ void marchCubes(Voxel * data, float * verticies, int count) { } float * vertex_data; int vertex_count; void cudaMarchInit(Voxel * host_data) { device_data_pointer = cudaGetDeviceDataPointer(); //host_data_pointer = device_data; CHECK_ERR(hipMalloc((void**)&vertex_data, DATA_SIZE * 3 * sizeof(float))); CHECK_ERR(hipMalloc((void**)&vertex_count, sizeof(int))); } void cudaMarchingCubes() { }
c31f6075b46a304f467df1df98832d0ae413ad86.cu
//---------------------------------------------------------------------------------------- /** * @file CudaMarch.cuh * @author Daniel Princ * @date 2012/12/13 * @brief Prepared file for marching cubes on GPU, not implemented * * This file is preparation for marching cubes in cuda, * however it is not implemented. * */ //---------------------------------------------------------------------------------------- #include "cudaCommon.cuh" #include "GPUComputation.cuh" Voxel * device_data_pointer = 0; __device__ unsigned long getThreadID() { return NULL; } __global__ void marchCubes(Voxel * data, float * verticies, int count) { } float * vertex_data; int vertex_count; void cudaMarchInit(Voxel * host_data) { device_data_pointer = cudaGetDeviceDataPointer(); //host_data_pointer = device_data; CHECK_ERR(cudaMalloc((void**)&vertex_data, DATA_SIZE * 3 * sizeof(float))); CHECK_ERR(cudaMalloc((void**)&vertex_count, sizeof(int))); } void cudaMarchingCubes() { }
70783034c0deb3445bff7f38c0509a92af09f6a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file imagemodel_k.cu * \brief Kernel declarations for image model computation. * \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details * \license 3-clause BSD, see LICENSE for more details */ #include "flowfilter/gpu/device/image_k.h" #include "flowfilter/gpu/device/imagemodel_k.h" namespace flowfilter { namespace gpu { //###################### // 5 support //###################### #define IMS_R 2 #define IMS_W 5 __constant__ float smooth_mask[] = {0.0625, 0.25, 0.375, 0.25, 0.0625}; __constant__ float diff_mask[] = {-0.2, -0.1, 0.0, 0.1, 0.2}; /** * \brief Apply a smooth mask to input image in X and Y directions. * * NOTE: reading float, either from a float image or a normalized * image is faster than reading unsigned char directly. */ __global__ void imagePrefilter_k(hipTextureObject_t inputImage, gpuimage_t<float2> imgPrefiltered) { const int height = imgPrefiltered.height; const int width = imgPrefiltered.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } //################################# // SMOOTHING IN X //################################# float smooth_x = 0.0f; #pragma unroll for(int c = -IMS_R; c <= IMS_R; c ++) { smooth_x += smooth_mask[c + IMS_R] * tex2D<float>(inputImage, pix.x + c, pix.y); } //################################# // SMOOTHING IN Y //################################# float smooth_y = 0.0f; #pragma unroll for(int r = -IMS_R; r <= IMS_R; r ++) { smooth_y += smooth_mask[r + IMS_R] * tex2D<float>(inputImage, pix.x, pix.y + r); } //################################# // PACK RESULTS //################################# // {smooth_y, smooth_x} *coordPitch(imgPrefiltered, pix) = make_float2(smooth_y, smooth_x); } /** * \brief Compute image gradient and constant term from XY smoothed image. */ __global__ void imageModel_k(hipTextureObject_t imgPrefiltered, gpuimage_t<float> imgConstant, gpuimage_t<float2> imgGradient) { const int height = imgConstant.height; const int width = imgConstant.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } // imgPrefiltered texture element float2 imElement; float diff_x = 0.0; float diff_y = 0.0; float smooth = 0.0; //################################# // DIFFERENCING IN X //################################# #pragma unroll for(int c = -IMS_R; c <= IMS_R; c ++) { // texture coordinate imElement = tex2D<float2>(imgPrefiltered, pix.x + c, pix.y); // convolution with difference kernel diff_x += diff_mask[c + IMS_R]*imElement.x; // convolution with smooth kernel smooth += smooth_mask[c + IMS_R]*imElement.x; } //################################# // DIFFERENCING IN Y //################################# #pragma unroll for(int r = -IMS_R; r <= IMS_R; r ++) { imElement = tex2D<float2>(imgPrefiltered, pix.x, pix.y + r); // convolution difference kernel diff_y += diff_mask[r + IMS_R]*imElement.y; } //################################# // PACK RESULTS //################################# // {diff_x, diff_y} *coordPitch(imgGradient, pix) = make_float2(diff_x, diff_y); *coordPitch(imgConstant, pix) = smooth; } }; // namespace gpu }; // namespace flowfilter
70783034c0deb3445bff7f38c0509a92af09f6a4.cu
/** * \file imagemodel_k.cu * \brief Kernel declarations for image model computation. * \copyright 2015, Juan David Adarve, ANU. See AUTHORS for more details * \license 3-clause BSD, see LICENSE for more details */ #include "flowfilter/gpu/device/image_k.h" #include "flowfilter/gpu/device/imagemodel_k.h" namespace flowfilter { namespace gpu { //###################### // 5 support //###################### #define IMS_R 2 #define IMS_W 5 __constant__ float smooth_mask[] = {0.0625, 0.25, 0.375, 0.25, 0.0625}; __constant__ float diff_mask[] = {-0.2, -0.1, 0.0, 0.1, 0.2}; /** * \brief Apply a smooth mask to input image in X and Y directions. * * NOTE: reading float, either from a float image or a normalized * image is faster than reading unsigned char directly. */ __global__ void imagePrefilter_k(cudaTextureObject_t inputImage, gpuimage_t<float2> imgPrefiltered) { const int height = imgPrefiltered.height; const int width = imgPrefiltered.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } //################################# // SMOOTHING IN X //################################# float smooth_x = 0.0f; #pragma unroll for(int c = -IMS_R; c <= IMS_R; c ++) { smooth_x += smooth_mask[c + IMS_R] * tex2D<float>(inputImage, pix.x + c, pix.y); } //################################# // SMOOTHING IN Y //################################# float smooth_y = 0.0f; #pragma unroll for(int r = -IMS_R; r <= IMS_R; r ++) { smooth_y += smooth_mask[r + IMS_R] * tex2D<float>(inputImage, pix.x, pix.y + r); } //################################# // PACK RESULTS //################################# // {smooth_y, smooth_x} *coordPitch(imgPrefiltered, pix) = make_float2(smooth_y, smooth_x); } /** * \brief Compute image gradient and constant term from XY smoothed image. */ __global__ void imageModel_k(cudaTextureObject_t imgPrefiltered, gpuimage_t<float> imgConstant, gpuimage_t<float2> imgGradient) { const int height = imgConstant.height; const int width = imgConstant.width; // pixel coordinate const int2 pix = make_int2(blockIdx.x*blockDim.x + threadIdx.x, blockIdx.y*blockDim.y + threadIdx.y); if(pix.x >= width || pix.y >= height) { return; } // imgPrefiltered texture element float2 imElement; float diff_x = 0.0; float diff_y = 0.0; float smooth = 0.0; //################################# // DIFFERENCING IN X //################################# #pragma unroll for(int c = -IMS_R; c <= IMS_R; c ++) { // texture coordinate imElement = tex2D<float2>(imgPrefiltered, pix.x + c, pix.y); // convolution with difference kernel diff_x += diff_mask[c + IMS_R]*imElement.x; // convolution with smooth kernel smooth += smooth_mask[c + IMS_R]*imElement.x; } //################################# // DIFFERENCING IN Y //################################# #pragma unroll for(int r = -IMS_R; r <= IMS_R; r ++) { imElement = tex2D<float2>(imgPrefiltered, pix.x, pix.y + r); // convolution difference kernel diff_y += diff_mask[r + IMS_R]*imElement.y; } //################################# // PACK RESULTS //################################# // {diff_x, diff_y} *coordPitch(imgGradient, pix) = make_float2(diff_x, diff_y); *coordPitch(imgConstant, pix) = smooth; } }; // namespace gpu }; // namespace flowfilter
7af2d301819ef0684667f461ac066cf565b25fc4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Mark Gates @author Tingxing Dong @author Azzam Haidar @generated from magmablas/zgemv_fermi.cu normal z -> c, Tue Feb 9 16:05:35 2016 */ #include "magma_internal.h" #include "commonblas_c.h" #include "magma_templates.h" #define PRECISION_c #include "gemv_template_device.cuh" #include "gemv_config/gemvn_param.h" #include "gemv_config/gemvt_param.h" #define version(s,v) s ## _V_ ## v ////////////////////////////////////////////////////////////////////////////////////////// // NoTrans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> __global__ void cgemvn_template_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvn_template_device<magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE> (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } ////////////////////////////////////////////////////////////////////////////////////////// // Trans/ConjTans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans> __global__ void cgemvc_template_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvc_template_device< magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE, trans > (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } ////////////////////////////////////////////////////////////////////////////////////////// // NoTrans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void cgemvn_template_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, magma_int_t lda, const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 ); dim3 threads( DIM_X, DIM_Y ); hipLaunchKernelGGL(( cgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE>) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, A, lda, x, incx, beta, y, incy); } ////////////////////////////////////////////////////////////////////////////////////////// // Trans/ConjTans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void cgemvc_template_fermi( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, magma_int_t lda, const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 ); dim3 threads ( DIM_X, DIM_Y ); if (trans == MagmaConjTrans) { hipLaunchKernelGGL(( cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans >) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, A, lda, x, incx, beta, y, incy); } else { hipLaunchKernelGGL(( cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans >) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, A, lda, x, incx, beta, y, incy); } } ////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX array of dimension ( LDDA, n ) on the GPU. @param[in] ldda INTEGER LDDA specifies the leading dimension of A. @param[in] dx COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy COMPLEX array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_cgemv_q( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dx, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t incy, magma_queue_t queue) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { if (m <= 256) { cgemvn_template_fermi<version(N, 137)> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } else { cgemvn_template_fermi<version(N, 140)> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } } else { cgemvc_template_fermi<version(T, 189)> ( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } }
7af2d301819ef0684667f461ac066cf565b25fc4.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @author Mark Gates @author Tingxing Dong @author Azzam Haidar @generated from magmablas/zgemv_fermi.cu normal z -> c, Tue Feb 9 16:05:35 2016 */ #include "magma_internal.h" #include "commonblas_c.h" #include "magma_templates.h" #define PRECISION_c #include "gemv_template_device.cuh" #include "gemv_config/gemvn_param.h" #include "gemv_config/gemvt_param.h" #define version(s,v) s ## _V_ ## v ////////////////////////////////////////////////////////////////////////////////////////// // NoTrans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> __global__ void cgemvn_template_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvn_template_device<magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE> (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } ////////////////////////////////////////////////////////////////////////////////////////// // Trans/ConjTans kernel template<const int DIM_X, const int DIM_Y, const int TILE_SIZE, magma_trans_t trans> __global__ void cgemvc_template_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) gemvc_template_device< magmaFloatComplex, DIM_X, DIM_Y, TILE_SIZE, trans > (m, n, alpha, A, lda, x, incx, beta, y, incy); #endif /* (__CUDA_ARCH__ >= 200) */ } ////////////////////////////////////////////////////////////////////////////////////////// // NoTrans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void cgemvn_template_fermi( magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, magma_int_t lda, const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid( magma_ceildiv(m, TILE_SIZE), 1 ); dim3 threads( DIM_X, DIM_Y ); cgemvn_template_kernel_fermi<DIM_X, DIM_Y, TILE_SIZE> <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, A, lda, x, incx, beta, y, incy); } ////////////////////////////////////////////////////////////////////////////////////////// // Trans/ConjTans CPU driver template<const int DIM_X, const int DIM_Y, const int TILE_SIZE> void cgemvc_template_fermi( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, magma_int_t lda, const magmaFloatComplex * __restrict__ x, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, magma_int_t incy, magma_queue_t queue) { dim3 grid ( magma_ceildiv(n, TILE_SIZE), 1 ); dim3 threads ( DIM_X, DIM_Y ); if (trans == MagmaConjTrans) { cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaConjTrans > <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, A, lda, x, incx, beta, y, incy); } else { cgemvc_template_kernel_fermi< DIM_X, DIM_Y, TILE_SIZE, MagmaTrans > <<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, A, lda, x, incx, beta, y, incy); } } ////////////////////////////////////////////////////////////////////////////////////////// /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX array of dimension ( LDDA, n ) on the GPU. @param[in] ldda INTEGER LDDA specifies the leading dimension of A. @param[in] dx COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy COMPLEX array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_cblas2 ********************************************************************/ extern "C" void magmablas_cgemv_q( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dx, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t incy, magma_queue_t queue) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { if (m <= 256) { cgemvn_template_fermi<version(N, 137)> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } else { cgemvn_template_fermi<version(N, 140)> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } } else { cgemvc_template_fermi<version(T, 189)> ( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy, queue ); } }
f4aaccbc02598c45e1167c287d15503cd1dd063f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> const int threshold=400; const int m=100; __global__ void selection_sort(int *a, int left, int right) { int temp; for(int i=left;i<right;i++) for(int j=i+1;j<=right;j++) if(a[i]>a[j]) { temp=a[i]; a[i]=a[j]; a[j]=temp; } } __global__ void partition(int *a,int left,int right,int pivot,int *al,int *ah) { int l,h; int diff=(right-left+1)/m; int k1=threadIdx.x*diff+left; int k2=k1+diff-1; if(threadIdx.x==m-1) k2=right; l=h=k1; for(int i=k1;i<=k2;i++) { al[i]=ah[i]=-999; } for(int i=k1;i<=k2;i++) { if(a[i]<pivot) { al[l++]=a[i]; } else { if(a[i]>pivot) { ah[h++]=a[i]; } } } } void quicksort(int *a, const int left, const int right) { if (right-left <= threshold) { int *ad; hipMalloc((void **)&ad,(right-left+1)*sizeof(int)); hipMemcpy(ad,a,(right-left+1)*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( selection_sort), dim3(1),dim3(1), 0, 0, ad, left, right); hipMemcpy(a,ad,(right-left+1)*sizeof(int),hipMemcpyDeviceToHost); return; } int pivot=a[left]; int *al,*ah; int *ad; hipMalloc((void **)&ad,(right-left+1)*sizeof(int)); hipMalloc((void **)&al,(right-left+1)*sizeof(int)); hipMalloc((void **)&ah,(right-left+1)*sizeof(int)); hipMemcpy(ad,a,(right-left+1)*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( partition), dim3(1),dim3(m), 0, 0, ad,left,right,pivot,al,ah); int al_h[right-left+1],ah_h[right-left+1]; hipMemcpy(al_h,al,(right-left+1)*sizeof(int),hipMemcpyDeviceToHost); hipMemcpy(ah_h,ah,(right-left+1)*sizeof(int),hipMemcpyDeviceToHost); int i=0,k=0; while(i<right-left+1) { while(al_h[i]==-999 && i<right-left+1) i++; while(al_h[i]!=-999 && i<right-left+1) { al_h[k++]=al_h[i++]; } } quicksort(al_h,0,k-1); int p=left; int x=0; while(x<k) { a[p++]=al_h[x++]; } a[p]=pivot; i=0; k=0; while(i<right-left+1) { while(ah_h[i]==-999 && i<right-left+1) i++; while(ah_h[i]!=-999 && i<right-left+1) { ah_h[k++]=ah_h[i++]; } } quicksort(ah_h,0,k-1); i=0; p++; while(i<k) { a[p++]=ah_h[i++]; } } int main() { int n; printf("\nEnter the number of elements you want to sort: "); scanf("%d", &n); int a[n]; time_t t; srand((unsigned)time(&t)); float start, end, total_time_taken; int x,flag; for (unsigned i = 0 ; i < n ; i++) { x=rand()%n; flag=0; for(int j=0;j<i;j++) { if(a[j]==x) { i--; flag=1; break; } } if(flag==0) a[i]=x; } //printf("\n\n original array\n"); //for(int i=0;i<n;i++) // printf("%d\t ",a[i]); start = clock(); quicksort(a,0,n-1); end = clock(); total_time_taken = ((double) (end - start)) / CLOCKS_PER_SEC; printf("\n Time taken - %f\n",total_time_taken); //printf("\n\n after sorting\n"); //for(int i=0;i<n;i++) //printf("%d\t ",a[i]); }
f4aaccbc02598c45e1167c287d15503cd1dd063f.cu
#include <stdio.h> const int threshold=400; const int m=100; __global__ void selection_sort(int *a, int left, int right) { int temp; for(int i=left;i<right;i++) for(int j=i+1;j<=right;j++) if(a[i]>a[j]) { temp=a[i]; a[i]=a[j]; a[j]=temp; } } __global__ void partition(int *a,int left,int right,int pivot,int *al,int *ah) { int l,h; int diff=(right-left+1)/m; int k1=threadIdx.x*diff+left; int k2=k1+diff-1; if(threadIdx.x==m-1) k2=right; l=h=k1; for(int i=k1;i<=k2;i++) { al[i]=ah[i]=-999; } for(int i=k1;i<=k2;i++) { if(a[i]<pivot) { al[l++]=a[i]; } else { if(a[i]>pivot) { ah[h++]=a[i]; } } } } void quicksort(int *a, const int left, const int right) { if (right-left <= threshold) { int *ad; cudaMalloc((void **)&ad,(right-left+1)*sizeof(int)); cudaMemcpy(ad,a,(right-left+1)*sizeof(int),cudaMemcpyHostToDevice); selection_sort<<<1,1>>>(ad, left, right); cudaMemcpy(a,ad,(right-left+1)*sizeof(int),cudaMemcpyDeviceToHost); return; } int pivot=a[left]; int *al,*ah; int *ad; cudaMalloc((void **)&ad,(right-left+1)*sizeof(int)); cudaMalloc((void **)&al,(right-left+1)*sizeof(int)); cudaMalloc((void **)&ah,(right-left+1)*sizeof(int)); cudaMemcpy(ad,a,(right-left+1)*sizeof(int),cudaMemcpyHostToDevice); partition<<<1,m>>>(ad,left,right,pivot,al,ah); int al_h[right-left+1],ah_h[right-left+1]; cudaMemcpy(al_h,al,(right-left+1)*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(ah_h,ah,(right-left+1)*sizeof(int),cudaMemcpyDeviceToHost); int i=0,k=0; while(i<right-left+1) { while(al_h[i]==-999 && i<right-left+1) i++; while(al_h[i]!=-999 && i<right-left+1) { al_h[k++]=al_h[i++]; } } quicksort(al_h,0,k-1); int p=left; int x=0; while(x<k) { a[p++]=al_h[x++]; } a[p]=pivot; i=0; k=0; while(i<right-left+1) { while(ah_h[i]==-999 && i<right-left+1) i++; while(ah_h[i]!=-999 && i<right-left+1) { ah_h[k++]=ah_h[i++]; } } quicksort(ah_h,0,k-1); i=0; p++; while(i<k) { a[p++]=ah_h[i++]; } } int main() { int n; printf("\nEnter the number of elements you want to sort: "); scanf("%d", &n); int a[n]; time_t t; srand((unsigned)time(&t)); float start, end, total_time_taken; int x,flag; for (unsigned i = 0 ; i < n ; i++) { x=rand()%n; flag=0; for(int j=0;j<i;j++) { if(a[j]==x) { i--; flag=1; break; } } if(flag==0) a[i]=x; } //printf("\n\n original array\n"); //for(int i=0;i<n;i++) // printf("%d\t ",a[i]); start = clock(); quicksort(a,0,n-1); end = clock(); total_time_taken = ((double) (end - start)) / CLOCKS_PER_SEC; printf("\n Time taken - %f\n",total_time_taken); //printf("\n\n after sorting\n"); //for(int i=0;i<n;i++) //printf("%d\t ",a[i]); }
09f1fd4152e09acbfcb2230fe5c23d69e5248698.hip
// !!! This is a file automatically generated by hipify!!! #include "tools.h" #include <stdlib.h> namespace pilar { __host__ __device__ Vector3f::Vector3f() { x = 0.0f; y = 0.0f; z = 0.0f; } __host__ __device__ Vector3f::Vector3f(float x, float y, float z) { this->x = x; this->y = y; this->z = z; } __host__ __device__ Vector3f::Vector3f(const Vector3i &v) { this->x = float(v.x); this->y = float(v.y); this->z = float(v.z); } __host__ __device__ Vector3f& Vector3f::operator= (Vector3f v) // operator= sets values of v to this Vector3f. example: v1 = v2 means that values of v2 are set onto v1 { x = v.x; y = v.y; z = v.z; return *this; } __host__ __device__ Vector3f& Vector3f::operator= (Vector3i v) { x = float(v.x); y = float(v.y); z = float(v.z); return *this; } __host__ __device__ Vector3f Vector3f::operator+ (Vector3f v) // operator+ is used to add two Vector3f's. operator+ returns a new Vector3f { return Vector3f(x + v.x, y + v.y, z + v.z); } __host__ __device__ Vector3f Vector3f::operator- (Vector3f v) // operator- is used to take difference of two Vector3f's. operator- returns a new Vector3f { return Vector3f(x - v.x, y - v.y, z - v.z); } __host__ __device__ Vector3f Vector3f::operator* (float value) // operator* is used to scale a Vector3f by a value. This value multiplies the Vector3f's x, y and z. { return Vector3f(x * value, y * value, z * value); } __host__ __device__ Vector3f Vector3f::operator/ (float value) // operator/ is used to scale a Vector3f by a value. This value divides the Vector3f's x, y and z. { return Vector3f(x / value, y / value, z / value); } __host__ __device__ Vector3f& Vector3f::operator+= (Vector3f v) // operator+= is used to add another Vector3f to this Vector3f. { x += v.x; y += v.y; z += v.z; return *this; } __host__ __device__ Vector3f& Vector3f::operator-= (Vector3f v) // operator-= is used to subtract another Vector3f from this Vector3f. { x -= v.x; y -= v.y; z -= v.z; return *this; } __host__ __device__ Vector3f& Vector3f::operator*= (float value) // operator*= is used to scale this Vector3f by a value. { x *= value; y *= value; z *= value; return *this; } __host__ __device__ Vector3f& Vector3f::operator/= (float value) // operator/= is used to scale this Vector3f by a value. { x /= value; y /= value; z /= value; return *this; } __host__ __device__ Vector3f Vector3f::operator- () // operator- is used to set this Vector3f's x, y, and z to the negative of them. { return Vector3f(-x, -y, -z); } __host__ __device__ float Vector3f::length() // length() returns the length of this Vector3f { return sqrtf(length_sqr()); } __host__ __device__ float Vector3f::length_sqr() //length_sqr()return the squared length of this Vector3f { return x*x + y*y + z*z; } __host__ __device__ float Vector3f::length_inverse() { float number = length_sqr(); if(number != 0) { float xhalf = 0.5f*number; int i = *(int*)&number; // get bits for floating value i = 0x5f375a86- (i>>1); // gives initial guess y0 number = *(float*)&i; // convert bits back to float number = number*(1.5f-xhalf*number*number); // Newton step, repeating increases accuracy } return number; } __host__ __device__ void Vector3f::unitize() // unitize() normalizes this Vector3f that its direction remains the same but its length is 1. { float length = this->length(); if (length == 0) return; x /= length; y /= length; z /= length; } __host__ __device__ Vector3f Vector3f::unit() // unit() returns a new Vector3f. The returned value is a unitized version of this Vector3f. { float length = this->length(); if (length == 0) return *this; return Vector3f(x / length, y / length, z / length); } __host__ __device__ float Vector3f::dot(const Vector3f &v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ float Vector3f::dot(const Vector3i& v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ Vector3f Vector3f::cross(const Vector3f& v) { return Vector3f(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ Vector3f Vector3f::cross(const Vector3i& v) { return Vector3f(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ float Vector3f::determinant(const Vector3f& a, const Vector3f& b, const Vector3f& c) { float ff = a.x * b.y * c.z - a.x * b.z * c.y; float ss = a.y * b.z * c.x - a.y * b.x * c.z; float tt = a.z * b.x * c.y - a.z * b.y * c.x; return ff + ss + tt; } //Generate a random vector with component values in the starting at "low" up to "high", exclusive. //Make sure high is greater than low. __device__ Vector3f Vector3f::random(float low, float high, hiprandStatePhilox4_32_10_t* rng) { float x = low + hiprand_uniform(rng) * (high - low); float y = low + hiprand_uniform(rng) * (high - low); float z = low + hiprand_uniform(rng) * (high - low); return Vector3f(x,y,z); } __host__ Vector3f Vector3f::random(float low, float high) { float x = low + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(high-low))); float y = low + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(high-low))); float z = low + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(high-low))); return Vector3f(x,y,z); } __host__ __device__ Vector3i::Vector3i() { x = 0; y = 0; z = 0; } __host__ __device__ Vector3i::Vector3i(int x, int y, int z) { this->x = x; this->y = y; this->z = z; } __host__ __device__ bool Vector3i::operator== (const Vector3i& v) const { return this->x == v.x && this->y == v.y && this->z == v.z; } __host__ __device__ bool Vector3i::operator!= (const Vector3i& v) const { return !(*this == v); } __host__ __device__ Vector3i& Vector3i::operator= (Vector3i v) // operator= sets values of v to this Vector3i. example: v1 = v2 means that values of v2 are set onto v1 { x = v.x; y = v.y; z = v.z; return *this; } __host__ __device__ Vector3i Vector3i::operator+ (Vector3i v) // operator+ is used to add two Vector3i's. operator+ returns a new Vector3i { return Vector3i(x + v.x, y + v.y, z + v.z); } __host__ __device__ int Vector3i::dot(const Vector3i& v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ float Vector3i::dot(const Vector3f& v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ Vector3i Vector3i::cross(const Vector3i& v) { return Vector3i(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ Vector3f Vector3i::cross(const Vector3f& v) { return Vector3f(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ int Vector3i::determinant(const Vector3i& a, const Vector3i& b, const Vector3i& c) { int ff = a.x * b.y * c.z - a.x * b.z * c.y; int ss = a.y * b.z * c.x - a.y * b.x * c.z; int tt = a.z * b.x * c.y - a.z * b.y * c.x; return ff + ss + tt; } }
09f1fd4152e09acbfcb2230fe5c23d69e5248698.cu
#include "tools.h" #include <stdlib.h> namespace pilar { __host__ __device__ Vector3f::Vector3f() { x = 0.0f; y = 0.0f; z = 0.0f; } __host__ __device__ Vector3f::Vector3f(float x, float y, float z) { this->x = x; this->y = y; this->z = z; } __host__ __device__ Vector3f::Vector3f(const Vector3i &v) { this->x = float(v.x); this->y = float(v.y); this->z = float(v.z); } __host__ __device__ Vector3f& Vector3f::operator= (Vector3f v) // operator= sets values of v to this Vector3f. example: v1 = v2 means that values of v2 are set onto v1 { x = v.x; y = v.y; z = v.z; return *this; } __host__ __device__ Vector3f& Vector3f::operator= (Vector3i v) { x = float(v.x); y = float(v.y); z = float(v.z); return *this; } __host__ __device__ Vector3f Vector3f::operator+ (Vector3f v) // operator+ is used to add two Vector3f's. operator+ returns a new Vector3f { return Vector3f(x + v.x, y + v.y, z + v.z); } __host__ __device__ Vector3f Vector3f::operator- (Vector3f v) // operator- is used to take difference of two Vector3f's. operator- returns a new Vector3f { return Vector3f(x - v.x, y - v.y, z - v.z); } __host__ __device__ Vector3f Vector3f::operator* (float value) // operator* is used to scale a Vector3f by a value. This value multiplies the Vector3f's x, y and z. { return Vector3f(x * value, y * value, z * value); } __host__ __device__ Vector3f Vector3f::operator/ (float value) // operator/ is used to scale a Vector3f by a value. This value divides the Vector3f's x, y and z. { return Vector3f(x / value, y / value, z / value); } __host__ __device__ Vector3f& Vector3f::operator+= (Vector3f v) // operator+= is used to add another Vector3f to this Vector3f. { x += v.x; y += v.y; z += v.z; return *this; } __host__ __device__ Vector3f& Vector3f::operator-= (Vector3f v) // operator-= is used to subtract another Vector3f from this Vector3f. { x -= v.x; y -= v.y; z -= v.z; return *this; } __host__ __device__ Vector3f& Vector3f::operator*= (float value) // operator*= is used to scale this Vector3f by a value. { x *= value; y *= value; z *= value; return *this; } __host__ __device__ Vector3f& Vector3f::operator/= (float value) // operator/= is used to scale this Vector3f by a value. { x /= value; y /= value; z /= value; return *this; } __host__ __device__ Vector3f Vector3f::operator- () // operator- is used to set this Vector3f's x, y, and z to the negative of them. { return Vector3f(-x, -y, -z); } __host__ __device__ float Vector3f::length() // length() returns the length of this Vector3f { return sqrtf(length_sqr()); } __host__ __device__ float Vector3f::length_sqr() //length_sqr()return the squared length of this Vector3f { return x*x + y*y + z*z; } __host__ __device__ float Vector3f::length_inverse() { float number = length_sqr(); if(number != 0) { float xhalf = 0.5f*number; int i = *(int*)&number; // get bits for floating value i = 0x5f375a86- (i>>1); // gives initial guess y0 number = *(float*)&i; // convert bits back to float number = number*(1.5f-xhalf*number*number); // Newton step, repeating increases accuracy } return number; } __host__ __device__ void Vector3f::unitize() // unitize() normalizes this Vector3f that its direction remains the same but its length is 1. { float length = this->length(); if (length == 0) return; x /= length; y /= length; z /= length; } __host__ __device__ Vector3f Vector3f::unit() // unit() returns a new Vector3f. The returned value is a unitized version of this Vector3f. { float length = this->length(); if (length == 0) return *this; return Vector3f(x / length, y / length, z / length); } __host__ __device__ float Vector3f::dot(const Vector3f &v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ float Vector3f::dot(const Vector3i& v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ Vector3f Vector3f::cross(const Vector3f& v) { return Vector3f(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ Vector3f Vector3f::cross(const Vector3i& v) { return Vector3f(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ float Vector3f::determinant(const Vector3f& a, const Vector3f& b, const Vector3f& c) { float ff = a.x * b.y * c.z - a.x * b.z * c.y; float ss = a.y * b.z * c.x - a.y * b.x * c.z; float tt = a.z * b.x * c.y - a.z * b.y * c.x; return ff + ss + tt; } //Generate a random vector with component values in the starting at "low" up to "high", exclusive. //Make sure high is greater than low. __device__ Vector3f Vector3f::random(float low, float high, curandStatePhilox4_32_10_t* rng) { float x = low + curand_uniform(rng) * (high - low); float y = low + curand_uniform(rng) * (high - low); float z = low + curand_uniform(rng) * (high - low); return Vector3f(x,y,z); } __host__ Vector3f Vector3f::random(float low, float high) { float x = low + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(high-low))); float y = low + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(high-low))); float z = low + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(high-low))); return Vector3f(x,y,z); } __host__ __device__ Vector3i::Vector3i() { x = 0; y = 0; z = 0; } __host__ __device__ Vector3i::Vector3i(int x, int y, int z) { this->x = x; this->y = y; this->z = z; } __host__ __device__ bool Vector3i::operator== (const Vector3i& v) const { return this->x == v.x && this->y == v.y && this->z == v.z; } __host__ __device__ bool Vector3i::operator!= (const Vector3i& v) const { return !(*this == v); } __host__ __device__ Vector3i& Vector3i::operator= (Vector3i v) // operator= sets values of v to this Vector3i. example: v1 = v2 means that values of v2 are set onto v1 { x = v.x; y = v.y; z = v.z; return *this; } __host__ __device__ Vector3i Vector3i::operator+ (Vector3i v) // operator+ is used to add two Vector3i's. operator+ returns a new Vector3i { return Vector3i(x + v.x, y + v.y, z + v.z); } __host__ __device__ int Vector3i::dot(const Vector3i& v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ float Vector3i::dot(const Vector3f& v) { return x*v.x + y*v.y + z*v.z; } __host__ __device__ Vector3i Vector3i::cross(const Vector3i& v) { return Vector3i(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ Vector3f Vector3i::cross(const Vector3f& v) { return Vector3f(y*v.z - z*v.y, z*v.x - x*v.z, x*v.y - y*v.x); } __host__ __device__ int Vector3i::determinant(const Vector3i& a, const Vector3i& b, const Vector3i& c) { int ff = a.x * b.y * c.z - a.x * b.z * c.y; int ss = a.y * b.z * c.x - a.y * b.x * c.z; int tt = a.z * b.x * c.y - a.z * b.y * c.x; return ff + ss + tt; } }
bcbb24ffc06815e8a25d2db2c2d2f0d9c3856012.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vectorAdd(const uint16_t* A, const uint16_t* B, uint16_t* C, uint32_t numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } }
bcbb24ffc06815e8a25d2db2c2d2f0d9c3856012.cu
#include "includes.h" __global__ void vectorAdd(const uint16_t* A, const uint16_t* B, uint16_t* C, uint32_t numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } }
0dafa359d2e74e3c3d5319a4409305cfb201436f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* PDE solver Written by: Riccardo Fontanini Start date: 7 May 2018 Note: This program is a PDE solver R O T A S O P E R A T E N E T A R E P O S A T O R */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/time.h> #ifndef N_X #define N_x 10 #endif #ifndef N_y #define N_y 10 #endif #ifndef BLOCKDIM #define BLOCKDIM 32 #endif #ifndef MAXITER #define MAXITER 50 #endif #ifndef SIMTIME #define SIMTIME 200000 #endif __global__ void solve(double *grid, int odd) { //odd means: in odd rows swap odd, x%2 == odd -> elaborate int x = blockIdx.x * BLOCKDIM + threadIdx.x; int y = blockIdx.y * BLOCKDIM + threadIdx.y; if(y % 2 == 1 && odd == 0) odd = 1; else if(y % 2 == 1 && odd == 1) odd = 0; if(y == 0 || y >= N_y-1 || x == 0 || x >= N_x-1 || x % 2 == odd) return; *(grid + y*N_x + x) = 0.2 * (*(grid + y*N_x + x) + *(grid + (y-1)*N_x + x) + *(grid + (y+1)*N_x + x) + *(grid + y*N_x + x - 1) + *(grid + y*N_x + x + 1)); } __host__ void solve_CPU(double *grid){ int odd =0; for (int i = 0; i<MAXITER; i++) { odd = i%2; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++) { if(y == 0 || y >= N_y-1 || x == 0 || x >= N_x-1 || x % 2 == odd) continue; *(grid + y*N_x + x) = 0.2 * (*(grid + y*N_x + x) + *(grid + (y-1)*N_x + x) + *(grid + (y+1)*N_x + x) + *(grid + y*N_x + x - 1) + *(grid + y*N_x + x + 1)); } } #ifdef SIMULATION fprintf(stderr,"\033c"); fprintf (stderr, "Ciclo: %d\n", i); fprintf (stderr, "\tGRID \n\n"); double *ptrP = grid; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++, ptrP++) fprintf (stderr, "%3.f ", *ptrP); fprintf (stderr, "\n"); } usleep(SIMTIME); #endif } } __global__ void set_default(double *grid, double def) { int x = blockIdx.x * BLOCKDIM + threadIdx.x; int y = blockIdx.y * BLOCKDIM + threadIdx.y; if(y == 0 || y >= N_y-1 || x == 0 || x >= N_x-1) return; *(grid + y*N_x + x) = def; } int main () { double *grid; int gridx = N_x / BLOCKDIM; int gridy = N_y / BLOCKDIM; if( N_x % BLOCKDIM != 0) gridx++; if(N_y % BLOCKDIM != 0) gridy++; double toll = 0; //tollerance checked every cycle ifdef TOLLERANCE //TIME EVALUATION struct timespec start, execution; fprintf (stderr, "grid x : %d y: %d \n\n", gridx, gridy); dim3 blocksPerGrid (gridx, gridy); dim3 threadsPerBlock (BLOCKDIM, BLOCKDIM); hipMallocManaged ( &(grid), N_x * N_x * sizeof (double) ); /* SET BOUNDARY */ grid[0] = 120; grid[1] = 120; grid[2] = 120; grid[3] = 120; /* SET DEFAULT VALUE TO SPACE */ hipDeviceSynchronize (); hipLaunchKernelGGL(( set_default) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, grid, 50); hipDeviceSynchronize (); #ifdef ELABCPU clock_gettime(CLOCK_MONOTONIC_RAW, &start); solve_CPU(grid); clock_gettime(CLOCK_MONOTONIC_RAW, &execution); fprintf(stderr, "\n\nExecution time for CPU: %lu\n\n", (execution.tv_sec - start.tv_sec) * 1000000 + (execution.tv_nsec - start.tv_nsec) / 1000); #ifdef SHOWRESULT fprintf (stderr, "\tGRID CPU\n\n"); double *ptrC = grid; for (int y = 0; y < 10; y++) { for (int x = 0; x < 10; x++, ptrC++) fprintf (stderr, "%3.f ", *ptrC); ptrC = grid + N_x * (y+1); fprintf (stderr, "\n"); } #endif #endif hipDeviceSynchronize (); hipLaunchKernelGGL(( set_default) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, grid, 50); hipDeviceSynchronize (); for (int i =0; i<MAXITER; i++){ hipLaunchKernelGGL(( solve) , dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, grid, i%2); hipDeviceSynchronize (); #ifdef SIMULATION fprintf(stderr,"\033c"); fprintf (stderr, "Ciclo: %d\n", i); fprintf (stderr, "\tGRID \n\n"); double *ptrP = grid; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++, ptrP++) fprintf (stderr, "%3.f ", *ptrP); fprintf (stderr, "\n"); } usleep(SIMTIME); #endif #ifdef TOLLERANCE double newtoll = 0; double *point = grid; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++, point++) newtoll += abs(*point); } if( abs(newtoll - toll) < TOLLERANCE){ fprintf(stderr, "Breaked for tollerance trigger!\n"); break; } else toll = newtoll; #endif } #ifdef SHOWRESULT fprintf (stderr, "\tGRID GPU\n\n"); double *ptrG = grid; for (int y = 0; y < 10; y++) { for (int x = 0; x < 10; x++, ptrG++) fprintf (stderr, "%3.f ", *ptrG); ptrG = grid + N_x * (y+1); fprintf (stderr, "\n"); } #endif return 0; }
0dafa359d2e74e3c3d5319a4409305cfb201436f.cu
/* PDE solver Written by: Riccardo Fontanini Start date: 7 May 2018 Note: This program is a PDE solver R O T A S O P E R A T E N E T A R E P O S A T O R */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/time.h> #ifndef N_X #define N_x 10 #endif #ifndef N_y #define N_y 10 #endif #ifndef BLOCKDIM #define BLOCKDIM 32 #endif #ifndef MAXITER #define MAXITER 50 #endif #ifndef SIMTIME #define SIMTIME 200000 #endif __global__ void solve(double *grid, int odd) { //odd means: in odd rows swap odd, x%2 == odd -> elaborate int x = blockIdx.x * BLOCKDIM + threadIdx.x; int y = blockIdx.y * BLOCKDIM + threadIdx.y; if(y % 2 == 1 && odd == 0) odd = 1; else if(y % 2 == 1 && odd == 1) odd = 0; if(y == 0 || y >= N_y-1 || x == 0 || x >= N_x-1 || x % 2 == odd) return; *(grid + y*N_x + x) = 0.2 * (*(grid + y*N_x + x) + *(grid + (y-1)*N_x + x) + *(grid + (y+1)*N_x + x) + *(grid + y*N_x + x - 1) + *(grid + y*N_x + x + 1)); } __host__ void solve_CPU(double *grid){ int odd =0; for (int i = 0; i<MAXITER; i++) { odd = i%2; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++) { if(y == 0 || y >= N_y-1 || x == 0 || x >= N_x-1 || x % 2 == odd) continue; *(grid + y*N_x + x) = 0.2 * (*(grid + y*N_x + x) + *(grid + (y-1)*N_x + x) + *(grid + (y+1)*N_x + x) + *(grid + y*N_x + x - 1) + *(grid + y*N_x + x + 1)); } } #ifdef SIMULATION fprintf(stderr,"\033c"); fprintf (stderr, "Ciclo: %d\n", i); fprintf (stderr, "\tGRID \n\n"); double *ptrP = grid; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++, ptrP++) fprintf (stderr, "%3.f ", *ptrP); fprintf (stderr, "\n"); } usleep(SIMTIME); #endif } } __global__ void set_default(double *grid, double def) { int x = blockIdx.x * BLOCKDIM + threadIdx.x; int y = blockIdx.y * BLOCKDIM + threadIdx.y; if(y == 0 || y >= N_y-1 || x == 0 || x >= N_x-1) return; *(grid + y*N_x + x) = def; } int main () { double *grid; int gridx = N_x / BLOCKDIM; int gridy = N_y / BLOCKDIM; if( N_x % BLOCKDIM != 0) gridx++; if(N_y % BLOCKDIM != 0) gridy++; double toll = 0; //tollerance checked every cycle ifdef TOLLERANCE //TIME EVALUATION struct timespec start, execution; fprintf (stderr, "grid x : %d y: %d \n\n", gridx, gridy); dim3 blocksPerGrid (gridx, gridy); dim3 threadsPerBlock (BLOCKDIM, BLOCKDIM); cudaMallocManaged ( &(grid), N_x * N_x * sizeof (double) ); /* SET BOUNDARY */ grid[0] = 120; grid[1] = 120; grid[2] = 120; grid[3] = 120; /* SET DEFAULT VALUE TO SPACE */ cudaDeviceSynchronize (); set_default <<<blocksPerGrid, threadsPerBlock>>> (grid, 50); cudaDeviceSynchronize (); #ifdef ELABCPU clock_gettime(CLOCK_MONOTONIC_RAW, &start); solve_CPU(grid); clock_gettime(CLOCK_MONOTONIC_RAW, &execution); fprintf(stderr, "\n\nExecution time for CPU: %lu\n\n", (execution.tv_sec - start.tv_sec) * 1000000 + (execution.tv_nsec - start.tv_nsec) / 1000); #ifdef SHOWRESULT fprintf (stderr, "\tGRID CPU\n\n"); double *ptrC = grid; for (int y = 0; y < 10; y++) { for (int x = 0; x < 10; x++, ptrC++) fprintf (stderr, "%3.f ", *ptrC); ptrC = grid + N_x * (y+1); fprintf (stderr, "\n"); } #endif #endif cudaDeviceSynchronize (); set_default <<<blocksPerGrid, threadsPerBlock>>> (grid, 50); cudaDeviceSynchronize (); for (int i =0; i<MAXITER; i++){ solve <<<blocksPerGrid, threadsPerBlock>>> (grid, i%2); cudaDeviceSynchronize (); #ifdef SIMULATION fprintf(stderr,"\033c"); fprintf (stderr, "Ciclo: %d\n", i); fprintf (stderr, "\tGRID \n\n"); double *ptrP = grid; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++, ptrP++) fprintf (stderr, "%3.f ", *ptrP); fprintf (stderr, "\n"); } usleep(SIMTIME); #endif #ifdef TOLLERANCE double newtoll = 0; double *point = grid; for (int y = 0; y < N_y; y++) { for (int x = 0; x < N_x; x++, point++) newtoll += abs(*point); } if( abs(newtoll - toll) < TOLLERANCE){ fprintf(stderr, "Breaked for tollerance trigger!\n"); break; } else toll = newtoll; #endif } #ifdef SHOWRESULT fprintf (stderr, "\tGRID GPU\n\n"); double *ptrG = grid; for (int y = 0; y < 10; y++) { for (int x = 0; x < 10; x++, ptrG++) fprintf (stderr, "%3.f ", *ptrG); ptrG = grid + N_x * (y+1); fprintf (stderr, "\n"); } #endif return 0; }
2636fefeb0a493e833dc1c6fd9cbb6d406aeb0e0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cu_pooling_overlap_max(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int sizex, const int sizey, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ int cdst = tid % colsdst; int rdst = tid / colsdst; int rsrc = rdst; int csrc = cdst; int xend = (csrc + sizex - 1); int yend = (rsrc + sizey - 1); loc[tid] = (float)(rsrc * colssrc + csrc); for(int i = rsrc; i <= yend; ++i){ for(int j = csrc; j <= xend; ++j){ if(src[i * colssrc + j] > dst[tid]){ dst[tid] = src[i * colssrc + j]; loc[tid] = (float)(i * colssrc + j); } } } tid += stride; } }
2636fefeb0a493e833dc1c6fd9cbb6d406aeb0e0.cu
#include "includes.h" __global__ void cu_pooling_overlap_max(const float* src, float* dst, float *loc, const int rowssrc, const int colssrc, const int rowsdst, const int colsdst, const int sizex, const int sizey, const int n){ int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; while(tid < n){ int cdst = tid % colsdst; int rdst = tid / colsdst; int rsrc = rdst; int csrc = cdst; int xend = (csrc + sizex - 1); int yend = (rsrc + sizey - 1); loc[tid] = (float)(rsrc * colssrc + csrc); for(int i = rsrc; i <= yend; ++i){ for(int j = csrc; j <= xend; ++j){ if(src[i * colssrc + j] > dst[tid]){ dst[tid] = src[i * colssrc + j]; loc[tid] = (float)(i * colssrc + j); } } } tid += stride; } }
60de5577a96cde0421b0563fd72b4ebc716199a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * keygen.cu * * Created on: Dec 23, 2020 * Author: dcs */ #include "keygen.h" __global__ void KeyGenKernel(uint8_t *sk, uint8_t *pk, uint8_t *seed_d, int *s_bar, int *e_bar, int16_t *A_bar, int8_t *s, int8_t *e, int8_t *i_zeta_tab) { int tid = blockIdx.x*blockDim.x + threadIdx.x; uint8_t rho_sgm[2*KYBER_SEED_LEN]; //rho,sigma = G(d) Sha3512Dev(rho_sgm, seed_d + tid*KYBER_SEED_LEN, KYBER_SEED_LEN); //Matrix A GenMtrxADev(A_bar + tid*KYBER_K*KYBER_K*KYBER_N, rho_sgm); //s ==> s_bar, e ==> e_bar { uint8_t nounce=0; GensreDev(s+tid*KYBER_K*KYBER_N, rho_sgm+KYBER_SEED_LEN, nounce); __syncthreads(); RawNTTKernel_512(s_bar, s, i_zeta_tab); nounce += KYBER_K; GensreDev(e+tid*KYBER_K*KYBER_N, rho_sgm+KYBER_SEED_LEN, nounce); __syncthreads(); RawNTTKernel_512(e_bar, e, i_zeta_tab); } //t_bar = A_bar * t_bar + e_bar }
60de5577a96cde0421b0563fd72b4ebc716199a5.cu
/* * keygen.cu * * Created on: Dec 23, 2020 * Author: dcs */ #include "keygen.h" __global__ void KeyGenKernel(uint8_t *sk, uint8_t *pk, uint8_t *seed_d, int *s_bar, int *e_bar, int16_t *A_bar, int8_t *s, int8_t *e, int8_t *i_zeta_tab) { int tid = blockIdx.x*blockDim.x + threadIdx.x; uint8_t rho_sgm[2*KYBER_SEED_LEN]; //rho,sigma = G(d) Sha3512Dev(rho_sgm, seed_d + tid*KYBER_SEED_LEN, KYBER_SEED_LEN); //Matrix A GenMtrxADev(A_bar + tid*KYBER_K*KYBER_K*KYBER_N, rho_sgm); //s ==> s_bar, e ==> e_bar { uint8_t nounce=0; GensreDev(s+tid*KYBER_K*KYBER_N, rho_sgm+KYBER_SEED_LEN, nounce); __syncthreads(); RawNTTKernel_512(s_bar, s, i_zeta_tab); nounce += KYBER_K; GensreDev(e+tid*KYBER_K*KYBER_N, rho_sgm+KYBER_SEED_LEN, nounce); __syncthreads(); RawNTTKernel_512(e_bar, e, i_zeta_tab); } //t_bar = A_bar * t_bar + e_bar }
f180eb89ac8c91fb98c9e58581287fe7fb235f45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILEWIDTH 32 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float subTileA[TILEWIDTH][TILEWIDTH]; __shared__ float subTileB[TILEWIDTH][TILEWIDTH]; // Store row and column that thread is calculating. 1-1 correspondence from thread to position in tile int row = blockIdx.y * TILEWIDTH + threadIdx.y; int col = blockIdx.x * TILEWIDTH + threadIdx.x; // Running sum float runningSum = 0; // Loop over each tile in input matrices for(int i = 0; i < ceil((float)numAColumns / (float)TILEWIDTH); ++i) { // Read the individual tiles into shared memory // If thread is within limits of input A matrix if(row < numARows && (i * TILEWIDTH + threadIdx.x) < numAColumns) { // Read in corresponding element in subtile of input matrices subTileA[threadIdx.y][threadIdx.x] = A[row * numAColumns + i * TILEWIDTH + threadIdx.x]; } // Set subtile values to 0 so they do not affect threads inside of output matrix else { subTileA[threadIdx.y][threadIdx.x] = 0; } if((i * TILEWIDTH + threadIdx.y) < numBRows && col < numBColumns) { subTileB[threadIdx.y][threadIdx.x] = B[(i * TILEWIDTH + threadIdx.y) * numBColumns + col]; } // Set subtile values to 0 so they do not affect threads inside of output matrix else { subTileB[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); // Make sure all threads are done reading // Let each thread work on one piece of the subtile for(int j = 0; j < TILEWIDTH; ++j) { runningSum += subTileA[threadIdx.y][j] * subTileB[j][threadIdx.x]; } __syncthreads(); // Make sure all threads are done calculating before moving on } // If thread is calculating something inside output matrix, place runningSum into output matrix if(row < numCRows && col < numCColumns) { C[row * numCColumns + col] = runningSum; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCColumns * numCRows * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); // wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); // wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here hipMalloc(&deviceA, numAColumns * numARows * sizeof(float)); hipMalloc(&deviceB, numBColumns * numBRows * sizeof(float)); hipMalloc(&deviceC, numCColumns * numCRows * sizeof(float)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here hipMemcpy(deviceA, hostA, numAColumns * numARows * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(deviceB, hostB, numBColumns * numBRows * sizeof(float), hipMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 blocksPerGrid(ceilf((float)numCColumns / (float)TILEWIDTH), ceilf((float)numCRows / (float)TILEWIDTH), 1); dim3 threadsPerBlock(TILEWIDTH, TILEWIDTH, 1); wbLog(TRACE, "The dimensions of grid is ", blocksPerGrid.x, " x ", blocksPerGrid.y); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiplyShared), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here hipMemcpy(hostC, deviceC, numCColumns * numCRows * sizeof(float), hipMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here hipFree(deviceA); hipFree(deviceB); hipFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
f180eb89ac8c91fb98c9e58581287fe7fb235f45.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) #define TILEWIDTH 32 // Compute C = A * B __global__ void matrixMultiplyShared(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here //@@ You have to use shared memory for this MP __shared__ float subTileA[TILEWIDTH][TILEWIDTH]; __shared__ float subTileB[TILEWIDTH][TILEWIDTH]; // Store row and column that thread is calculating. 1-1 correspondence from thread to position in tile int row = blockIdx.y * TILEWIDTH + threadIdx.y; int col = blockIdx.x * TILEWIDTH + threadIdx.x; // Running sum float runningSum = 0; // Loop over each tile in input matrices for(int i = 0; i < ceil((float)numAColumns / (float)TILEWIDTH); ++i) { // Read the individual tiles into shared memory // If thread is within limits of input A matrix if(row < numARows && (i * TILEWIDTH + threadIdx.x) < numAColumns) { // Read in corresponding element in subtile of input matrices subTileA[threadIdx.y][threadIdx.x] = A[row * numAColumns + i * TILEWIDTH + threadIdx.x]; } // Set subtile values to 0 so they do not affect threads inside of output matrix else { subTileA[threadIdx.y][threadIdx.x] = 0; } if((i * TILEWIDTH + threadIdx.y) < numBRows && col < numBColumns) { subTileB[threadIdx.y][threadIdx.x] = B[(i * TILEWIDTH + threadIdx.y) * numBColumns + col]; } // Set subtile values to 0 so they do not affect threads inside of output matrix else { subTileB[threadIdx.y][threadIdx.x] = 0; } __syncthreads(); // Make sure all threads are done reading // Let each thread work on one piece of the subtile for(int j = 0; j < TILEWIDTH; ++j) { runningSum += subTileA[threadIdx.y][j] * subTileB[j][threadIdx.x]; } __syncthreads(); // Make sure all threads are done calculating before moving on } // If thread is calculating something inside output matrix, place runningSum into output matrix if(row < numCRows && col < numCColumns) { C[row * numCColumns + col] = runningSum; } } int main(int argc, char **argv) { wbArg_t args; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set // this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *)wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *)wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; //@@ Allocate the hostC matrix hostC = (float *)malloc(numCColumns * numCRows * sizeof(float)); wbTime_stop(Generic, "Importing data and creating memory on host"); // wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); // wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here cudaMalloc(&deviceA, numAColumns * numARows * sizeof(float)); cudaMalloc(&deviceB, numBColumns * numBRows * sizeof(float)); cudaMalloc(&deviceC, numCColumns * numCRows * sizeof(float)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here cudaMemcpy(deviceA, hostA, numAColumns * numARows * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, numBColumns * numBRows * sizeof(float), cudaMemcpyHostToDevice); wbTime_stop(GPU, "Copying input memory to the GPU."); //@@ Initialize the grid and block dimensions here dim3 blocksPerGrid(ceilf((float)numCColumns / (float)TILEWIDTH), ceilf((float)numCRows / (float)TILEWIDTH), 1); dim3 threadsPerBlock(TILEWIDTH, TILEWIDTH, 1); wbLog(TRACE, "The dimensions of grid is ", blocksPerGrid.x, " x ", blocksPerGrid.y); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiplyShared<<<blocksPerGrid, threadsPerBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here cudaMemcpy(hostC, deviceC, numCColumns * numCRows * sizeof(float), cudaMemcpyDeviceToHost); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
6395006adf71defd44a5a0dba0d10d30e4b5f797.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zlaswp.cu, normal z -> c, Sun Nov 20 20:20:30 2016 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } claswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswp_kernel( int n, magmaFloatComplex *dAT, int ldda, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in cgessm, cgetrf_incpiv. extern "C" void magmablas_claswp( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( claswp_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswpx_kernel( int n, magmaFloatComplex *dA, int ldx, int ldy, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; magmaFloatComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dA + i2*ldx; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA COMPLEX array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswpx( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } hipLaunchKernelGGL(( claswpx_kernel) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_claswp // (including copying pivots to the GPU). __global__ void claswp2_kernel( int n, magmaFloatComplex *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswp2( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); hipLaunchKernelGGL(( claswp2_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
6395006adf71defd44a5a0dba0d10d30e4b5f797.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zlaswp.cu, normal z -> c, Sun Nov 20 20:20:30 2016 @author Stan Tomov @author Mathieu Faverge @author Ichitaro Yamazaki @author Mark Gates */ #include "magma_internal.h" // MAX_PIVOTS is maximum number of pivots to apply in each kernel launch // NTHREADS is number of threads in a block // 64 and 256 are better on Kepler; //#define MAX_PIVOTS 64 //#define NTHREADS 256 #define MAX_PIVOTS 32 #define NTHREADS 64 typedef struct { int npivots; int ipiv[MAX_PIVOTS]; } claswp_params_t; // Matrix A is stored row-wise in dAT. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswp_kernel( int n, magmaFloatComplex *dAT, int ldda, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,M) The M-by-N matrix, stored transposed as N-by-M matrix embedded in LDDA-by-M array. M is not given; it is implicit. On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. ldda >= n. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k1.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (Fortran one-based index: 1 <= k2.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, INCI > 0. TODO: If INCI is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ // used in cgessm, cgetrf_incpiv. extern "C" void magmablas_claswp( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( n > ldda ) info = -3; else if ( k1 < 1 ) info = -4; else if ( k2 < 1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } claswp_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k,0), ldda, params ); } #undef dAT } /******************************************************************************/ // Extended version has stride in both directions (ldx, ldy) // to handle both row-wise and column-wise storage. // Matrix A is stored row or column-wise in dA. // Divide matrix A into block-columns of NTHREADS columns each. // Each GPU block processes one block-column of A. // Each thread goes down a column of A, // swapping rows according to pivots stored in params. __global__ void claswpx_kernel( int n, magmaFloatComplex *dA, int ldx, int ldy, claswp_params_t params ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dA += tid*ldy; magmaFloatComplex *A1 = dA; for( int i1 = 0; i1 < params.npivots; ++i1 ) { int i2 = params.ipiv[i1]; magmaFloatComplex *A2 = dA + i2*ldx; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldx; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWPX performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored either row-wise or column-wise, depending on ldx and ldy. ** Otherwise, this is identical to LAPACK's interface. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dA COMPLEX array on GPU, dimension (*,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldx INTEGER Stride between elements in same column. @param[in] ldy INTEGER Stride between elements in same row. For A stored row-wise, set ldx=ldda and ldy=1. For A stored column-wise, set ldx=1 and ldy=ldda. @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] ipiv INTEGER array, on CPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswpx( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldx, magma_int_t ldy, magma_int_t k1, magma_int_t k2, const magma_int_t *ipiv, magma_int_t inci, magma_queue_t queue ) { #define dA(i_, j_) (dA + (i_)*ldx + (j_)*ldy) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp_params_t params; for( int k = k1-1; k < k2; k += MAX_PIVOTS ) { int npivots = min( MAX_PIVOTS, k2-k ); params.npivots = npivots; for( int j = 0; j < npivots; ++j ) { params.ipiv[j] = ipiv[(k+j)*inci] - k - 1; } claswpx_kernel <<< grid, threads, 0, queue->cuda_stream() >>> ( n, dA(k,0), ldx, ldy, params ); } #undef dA } /******************************************************************************/ // This version takes d_ipiv on the GPU. Thus it does not pass pivots // as an argument using a structure, avoiding all the argument size // limitations of CUDA and OpenCL. It also needs just one kernel launch // with all the pivots, instead of multiple kernel launches with small // batches of pivots. On Fermi, it is faster than magmablas_claswp // (including copying pivots to the GPU). __global__ void claswp2_kernel( int n, magmaFloatComplex *dAT, int ldda, int npivots, const magma_int_t *d_ipiv, int inci ) { int tid = threadIdx.x + blockDim.x*blockIdx.x; if ( tid < n ) { dAT += tid; magmaFloatComplex *A1 = dAT; for( int i1 = 0; i1 < npivots; ++i1 ) { int i2 = d_ipiv[i1*inci] - 1; // Fortran index magmaFloatComplex *A2 = dAT + i2*ldda; magmaFloatComplex temp = *A1; *A1 = *A2; *A2 = temp; A1 += ldda; // A1 = dA + i1*ldx } } } /***************************************************************************//** Purpose: ============= CLASWP2 performs a series of row interchanges on the matrix A. One row interchange is initiated for each of rows K1 through K2 of A. ** Unlike LAPACK, here A is stored row-wise (hence dAT). ** Otherwise, this is identical to LAPACK's interface. Here, d_ipiv is passed in GPU memory. Arguments: ========== @param[in] n INTEGER The number of columns of the matrix A. @param[in,out] dAT COMPLEX array on GPU, stored row-wise, dimension (LDDA,*) On entry, the matrix of column dimension N to which the row interchanges will be applied. On exit, the permuted matrix. @param[in] ldda INTEGER The leading dimension of the array A. (I.e., stride between elements in a column.) @param[in] k1 INTEGER The first element of IPIV for which a row interchange will be done. (One based index.) @param[in] k2 INTEGER The last element of IPIV for which a row interchange will be done. (One based index.) @param[in] d_ipiv INTEGER array, on GPU, dimension (K2*abs(INCI)) The vector of pivot indices. Only the elements in positions K1 through K2 of IPIV are accessed. IPIV(K) = L implies rows K and L are to be interchanged. @param[in] inci INTEGER The increment between successive values of IPIV. Currently, IPIV > 0. TODO: If IPIV is negative, the pivots are applied in reverse order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_laswp *******************************************************************************/ extern "C" void magmablas_claswp2( magma_int_t n, magmaFloatComplex_ptr dAT, magma_int_t ldda, magma_int_t k1, magma_int_t k2, magmaInt_const_ptr d_ipiv, magma_int_t inci, magma_queue_t queue ) { #define dAT(i_, j_) (dAT + (i_)*ldda + (j_)) magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( k1 < 0 ) info = -4; else if ( k2 < 0 || k2 < k1 ) info = -5; else if ( inci <= 0 ) info = -7; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t nb = k2-(k1-1); dim3 threads( NTHREADS ); dim3 grid( magma_ceildiv( n, NTHREADS ) ); claswp2_kernel<<< grid, threads, 0, queue->cuda_stream() >>> ( n, dAT(k1-1,0), ldda, nb, d_ipiv, inci ); }
08e33d5e31feebc0763b029f52a121badaba6df4.hip
// !!! This is a file automatically generated by hipify!!! #define FP float #include <stdio.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <math.h> // Assume the matrix dimensions are multiples of the tile width __global__ void gpu_matrixmult(FP *a, FP *b, FP *c, int n, int p, int m, int TW, int NTB) { extern __shared__ FP bigarray[]; FP *atile = &bigarray[0], *btile = &bigarray[TW * TW]; int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y; // if (bx * NTB * TW >= m) return; int row = ty + by * blockDim.y, col = tx + bx * NTB * TW; int indexa, indexb; // Loop over tiles for (int t = 0; t < p / TW; t++) { // Reset col col = tx + bx * NTB * TW; indexa = row * p + t * TW + tx; atile[ty * TW + tx] = a[indexa]; // Copy to shared memory // Loop over muti-tiles for (int t1 = 0; t1 < NTB; t1++, col += TW) { if (col >= m) break; indexb = (t * TW + ty) * m + col; btile[ty * TW + tx] = b[indexb]; // Copy to shared memory __syncthreads(); // Compute tile ctile = atile x btile (each thread computes one element) for (int k = 0; k < TW; k++) c[row * m + col] += atile[ty * TW + k] * btile[k * TW + tx]; __syncthreads(); } } } void cpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m) { int index; for (int k = 0; k < p; k++) for (int row = 0; row < n; row++) { index = row * m; for (int col = 0; col < m; col++, index++) { c[index] -= a[row * p + k] * b[k * m + col]; } } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int Grid_Dim_x = 1, Grid_Dim_y = 1; //Grid dimension, x and y, square int Block_Dim_x = 1, Block_Dim_y = 1; //Block dimension, x and y, square int TW = 1; int NTB = 1; int n, m, p; // matrix dimensions FP *a,*b,*c; FP *dev_a, *dev_b, *dev_c; int sizeA, sizeB, sizeC; // number of bytes in arrays hipEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also hipError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = hipGetDeviceCount(&gpucount); if (errorcode == hipErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if (argc != 6) { printf("Usage: matmul <matrix dim n> <matrix dim m> <matrix dim p> <block dim> <tiles>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); NTB = atoi(argv[5]); TW = Block_Dim_x = Block_Dim_y = atoi(argv[4]); // Square block if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim_x = (m - 1) / (NTB * TW) + 1; Grid_Dim_y = (n - 1) / Block_Dim_y + 1; if (Grid_Dim_x * NTB * TW < m || Grid_Dim_y * Block_Dim_y < n) { printf("Error, number of threads in x/y dimensions less than number of array elements\n"); exit (-1); } hipSetDevice(gpunum); printf("Using device %d\n",gpunum); printf("Matrix Dimensions = %d, %d, %d\n", n, p, m); printf("Block_Dim = (%d, %d), Grid_Dim = (%d, %d), NTB = %d, \n", Block_Dim_x, Block_Dim_y, Grid_Dim_x, Grid_Dim_y, NTB); dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure sizeA = n * p * sizeof(FP); sizeB = p * m * sizeof(FP); sizeC = n * m * sizeof(FP); a = (FP*) malloc(sizeA); // dynamically allocated memory for arrays on host b = (FP*) malloc(sizeB); c = (FP*) malloc(sizeC); // results from GPU srand(12345); for(i = 0; i < n; i++) for(j = 0; j < p; j++) { a[i * p + j] = (FP) rand() / (FP) RAND_MAX; // a[i * p + j] = (FP) i+j; // may be helpful for debugging } for(i = 0; i < p; i++) for(j = 0; j < m; j++) { b[i * m + j] = (FP) rand() / (FP) RAND_MAX; // b[i * m + j] = (FP) i+j; // may be helpful for debugging } memset(c, 0., sizeC); // ------------- COMPUTATION DONE ON GPU ---------------------------- hipMalloc((void**)&dev_a, sizeA); // allocate memory on device hipMalloc((void**)&dev_b, sizeB); hipMalloc((void**)&dev_c, sizeC); hipMemcpy(dev_a, a , sizeA ,hipMemcpyHostToDevice); hipMemcpy(dev_b, b , sizeB ,hipMemcpyHostToDevice); hipEventCreate(&start); // instrument code to measure start time hipEventCreate(&stop); hipEventRecord(start, 0); // hipEventSynchronize(start); // not needed size_t Ns = 2 * TW * TW * sizeof(FP); hipLaunchKernelGGL(( gpu_matrixmult), dim3(Grid), dim3(Block), Ns, 0, dev_a, dev_b, dev_c, n, p, m, TW, NTB); hipEventRecord(stop, 0); // instrument code to measure end time hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_time_ms, start, stop); hipMemcpy(c, dev_c, sizeC, hipMemcpyDeviceToHost); printf("Time to calculate results on GPU: %f ms.\n\n", elapsed_time_ms); // exec. time // -------------- clean up --------------------------------------- free(a); free(b); free(c); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); hipEventDestroy(start); hipEventDestroy(stop); return 0; }
08e33d5e31feebc0763b029f52a121badaba6df4.cu
#define FP float #include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> // Assume the matrix dimensions are multiples of the tile width __global__ void gpu_matrixmult(FP *a, FP *b, FP *c, int n, int p, int m, int TW, int NTB) { extern __shared__ FP bigarray[]; FP *atile = &bigarray[0], *btile = &bigarray[TW * TW]; int tx = threadIdx.x, ty = threadIdx.y, bx = blockIdx.x, by = blockIdx.y; // if (bx * NTB * TW >= m) return; int row = ty + by * blockDim.y, col = tx + bx * NTB * TW; int indexa, indexb; // Loop over tiles for (int t = 0; t < p / TW; t++) { // Reset col col = tx + bx * NTB * TW; indexa = row * p + t * TW + tx; atile[ty * TW + tx] = a[indexa]; // Copy to shared memory // Loop over muti-tiles for (int t1 = 0; t1 < NTB; t1++, col += TW) { if (col >= m) break; indexb = (t * TW + ty) * m + col; btile[ty * TW + tx] = b[indexb]; // Copy to shared memory __syncthreads(); // Compute tile ctile = atile x btile (each thread computes one element) for (int k = 0; k < TW; k++) c[row * m + col] += atile[ty * TW + k] * btile[k * TW + tx]; __syncthreads(); } } } void cpu_matrixmult(FP *a,FP *b, FP *c, int n, int p, int m) { int index; for (int k = 0; k < p; k++) for (int row = 0; row < n; row++) { index = row * m; for (int col = 0; col < m; col++, index++) { c[index] -= a[row * p + k] * b[k * m + col]; } } } int main(int argc, char *argv[]) { int i, j; // loop counters int gpucount = 0; // Count of available GPUs int gpunum = 0; // Device number to use int Grid_Dim_x = 1, Grid_Dim_y = 1; //Grid dimension, x and y, square int Block_Dim_x = 1, Block_Dim_y = 1; //Block dimension, x and y, square int TW = 1; int NTB = 1; int n, m, p; // matrix dimensions FP *a,*b,*c; FP *dev_a, *dev_b, *dev_c; int sizeA, sizeB, sizeC; // number of bytes in arrays cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; // --------------------SET PARAMETERS AND DATA ----------------------- errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } else { printf("Device count = %d\n",gpucount); } if (argc != 6) { printf("Usage: matmul <matrix dim n> <matrix dim m> <matrix dim p> <block dim> <tiles>\n"); exit (-1); } n = atoi(argv[1]); m = atoi(argv[2]); p = atoi(argv[3]); NTB = atoi(argv[5]); TW = Block_Dim_x = Block_Dim_y = atoi(argv[4]); // Square block if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } Grid_Dim_x = (m - 1) / (NTB * TW) + 1; Grid_Dim_y = (n - 1) / Block_Dim_y + 1; if (Grid_Dim_x * NTB * TW < m || Grid_Dim_y * Block_Dim_y < n) { printf("Error, number of threads in x/y dimensions less than number of array elements\n"); exit (-1); } cudaSetDevice(gpunum); printf("Using device %d\n",gpunum); printf("Matrix Dimensions = %d, %d, %d\n", n, p, m); printf("Block_Dim = (%d, %d), Grid_Dim = (%d, %d), NTB = %d, \n", Block_Dim_x, Block_Dim_y, Grid_Dim_x, Grid_Dim_y, NTB); dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure sizeA = n * p * sizeof(FP); sizeB = p * m * sizeof(FP); sizeC = n * m * sizeof(FP); a = (FP*) malloc(sizeA); // dynamically allocated memory for arrays on host b = (FP*) malloc(sizeB); c = (FP*) malloc(sizeC); // results from GPU srand(12345); for(i = 0; i < n; i++) for(j = 0; j < p; j++) { a[i * p + j] = (FP) rand() / (FP) RAND_MAX; // a[i * p + j] = (FP) i+j; // may be helpful for debugging } for(i = 0; i < p; i++) for(j = 0; j < m; j++) { b[i * m + j] = (FP) rand() / (FP) RAND_MAX; // b[i * m + j] = (FP) i+j; // may be helpful for debugging } memset(c, 0., sizeC); // ------------- COMPUTATION DONE ON GPU ---------------------------- cudaMalloc((void**)&dev_a, sizeA); // allocate memory on device cudaMalloc((void**)&dev_b, sizeB); cudaMalloc((void**)&dev_c, sizeC); cudaMemcpy(dev_a, a , sizeA ,cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b , sizeB ,cudaMemcpyHostToDevice); cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); // cudaEventSynchronize(start); // not needed size_t Ns = 2 * TW * TW * sizeof(FP); gpu_matrixmult<<<Grid, Block, Ns>>>(dev_a, dev_b, dev_c, n, p, m, TW, NTB); cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop); cudaMemcpy(c, dev_c, sizeC, cudaMemcpyDeviceToHost); printf("Time to calculate results on GPU: %f ms.\n\n", elapsed_time_ms); // exec. time // -------------- clean up --------------------------------------- free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
84d27781e9ccc0c0e2414a3e8e080ffcc6a5945f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "monolithic.h" void test_kernel() { int m = 80*32, n = 32, k = 16*512; // 512 iterations float *C_host, *C_dev; C_host = (float *) malloc (sizeof(float) * m * n); hipMalloc((void **)&C_dev, sizeof(float) * m * n); hipMemcpy(C_dev, C_host, sizeof(float) * m * n, hipMemcpyHostToDevice); // 4 warps per thread block // 80 blocks hipLaunchKernelGGL(( gpu_tc_gemm_) , dim3(80),dim3(4*32) , 0, 0, NULL, NULL, C_dev, NULL, NULL, NULL, NULL, NULL, m, n, k ); hipDeviceSynchronize(); hipMemcpy(C_host, C_dev, sizeof(float) * m * n, hipMemcpyDeviceToHost); hipDeviceSynchronize(); print_matrix(C_host, "C_host", 64, n, n, 1); }
84d27781e9ccc0c0e2414a3e8e080ffcc6a5945f.cu
#include "monolithic.h" void test_kernel() { int m = 80*32, n = 32, k = 16*512; // 512 iterations float *C_host, *C_dev; C_host = (float *) malloc (sizeof(float) * m * n); cudaMalloc((void **)&C_dev, sizeof(float) * m * n); cudaMemcpy(C_dev, C_host, sizeof(float) * m * n, cudaMemcpyHostToDevice); // 4 warps per thread block // 80 blocks gpu_tc_gemm_ <<< 80,4*32 >>> ( NULL, NULL, C_dev, NULL, NULL, NULL, NULL, NULL, m, n, k ); cudaDeviceSynchronize(); cudaMemcpy(C_host, C_dev, sizeof(float) * m * n, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); print_matrix(C_host, "C_host", 64, n, n, 1); }
21be45810a22a231063f9b5875dfb0dce3fe703c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated c Tue Aug 13 16:45:18 2013 */ #include "common_magma.h" #define PRECISION_c /*The version for tesla can be found in csymv_tesla.cu */ #if (GPUSHMEM >= 200) #define magmablas_csymv_200 magmablas_csymv #define magmablas_csymv2_200 magmablas_csymv2 #define csymv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Functions for each specific cases - Lower case */ __global__ void magmablas_csymv_200_L_special( int n, magmaFloatComplex alpha, const magmaFloatComplex *A, int lda, const magmaFloatComplex *x, int incx, magmaFloatComplex beta, magmaFloatComplex *y, int incy, magmaFloatComplex *WC) { int tx = threadIdx.x ; int ty = threadIdx.y ; int blkc = blockIdx.x ; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2]; __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2 [thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[4]; int break_d = thread_x * blkc; const int td = (thread_x * ty ) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += ty_* lda + tx_ ; if( ty == 0 ){ buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_ ; ty = ty_ ; #pragma unroll for(int j =0; j<half_thread_x; j +=8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_ * 4 + 4) ; i++){ if ( i < tx_ ) la[0][bank_shift * tx_ + i] = la[0][ bank_shift * i + tx_]; else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res+= la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res1,0); } __syncthreads(); MAGMA_C_SET2REAL(res, 0) ; A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(int j =0; j<half_thread_x; j+=8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); magmaFloatComplex res2; MAGMA_C_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); MAGMA_C_SET2REAL(res,0); A-=half_thread_x *lda ; MAGMA_C_SET2REAL(res_,0); #pragma unroll for(int j=0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; #pragma unroll for(int j=0; j < 4 ; j++){ res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { MAGMA_C_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_C_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda * blkc * thread_x; x= x - blkc * thread_x *incx ; //x= x- tx*incx; A+=4 * ty* lda ; A+=tx; int wc_c = 0 ; int count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; if( blkc * thread_x >=thread_x) #pragma unroll for(int i=0; i<thread_x; i += thread_x ) { MAGMA_C_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0;k<4;k++) { #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j]; la[( j + ty * 4)][tx] = tr[j] * buff[tx]; } __syncthreads(); MAGMA_C_SET2REAL(res_,0); #pragma unroll for(int j=0; j < 4 ; j++) { res_+=la[tx_][ty_*4+j] ; } b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(int i=thread_x; i< (blkc * thread_x); i += thread_x ) { MAGMA_C_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0;k<4;k++) { #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(int j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)]; la[( j + ty * 4)][tx] = tr[j] * buff[tx]; } __syncthreads(); MAGMA_C_SET2REAL(res_,0); #pragma unroll for(int j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j] ; b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_csymv_200_L_generic( int n, magmaFloatComplex alpha, const magmaFloatComplex *A, int lda, const magmaFloatComplex *x, int incx, magmaFloatComplex beta, magmaFloatComplex *y, int incy, magmaFloatComplex *WC, int m_mod_thread_x) { int tx = threadIdx.x ; int ty = threadIdx.y ; int blkc = blockIdx.x ; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2]; __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2[thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty ) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC+= break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += lda * ty_; int trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_C_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx_; A += trackA ; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j =0; j<half_thread_x; j+=8){ if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-=trackA; } else { #pragma unroll for(int j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_ ; ty = ty_ ; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_*4+4) ; i++){ if ( i < tx_ ) la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res += la[0][bank_shift*tx_+j+ty_*4]* buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res1,0); } __syncthreads(); MAGMA_C_SET2REAL(res,0); if( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A+= trackA+half_thread_x*lda ; #pragma unroll for(int j =0; j<half_thread_x; j+=8){ if( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-= trackA+half_thread_x*lda ; A+=tx_ ; A+= half_thread_x + half_thread_x *lda ; } else { A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(int j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); magmaFloatComplex res2; MAGMA_C_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); MAGMA_C_SET2REAL(res,0); MAGMA_C_SET2REAL(res_,0); A-=half_thread_x *lda ; if( blkc == ( gridDim.x - 1 ) ) { A-=tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A+= trackA ; #pragma unroll for(int j =0; j<half_thread_x; j+=8) if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_C_SET2REAL(tr[j/8], 99999); } else tr[j/8] = A[ j * lda]; A-=trackA; A+=tx_; } else { #pragma unroll for(int j =0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++){ res+= tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { MAGMA_C_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_C_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda*break_d; x= x - break_d *incx ; A+=4 * ty* lda ; if( blkc == ( gridDim.x - 1 ) ) { if(tx <= m_mod_thread_x ) A+=tx; else A+=m_mod_thread_x; } else{ A+=tx; } int wc_c = 0 ; int count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(int j=0; j < 4 ; j++) b[j] = buff[ty_*4+j]; if( break_d > 0) #pragma unroll for(int i=0; i< thread_x; i += thread_x ){ MAGMA_C_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0;k<4;k++){ #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(int j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); MAGMA_C_SET2REAL(res_, 0) ; #pragma unroll for(int j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(int i=thread_x; i<break_d; i += thread_x ){ MAGMA_C_SET2REAL(res_, 0) ; count++; if(ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( int k=0;k<4;k++){ #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(int j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); MAGMA_C_SET2REAL(res_, 0) ; #pragma unroll for(int j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx] ; WC[0+lda*(blkc)] = res; } } __global__ void magmablas_csymv_200_L_update( int n, magmaFloatComplex alpha, const magmaFloatComplex* A, int lda, const magmaFloatComplex *x, int incx, magmaFloatComplex beta, magmaFloatComplex *y, int incy, magmaFloatComplex *WC ) { int i; int tx = threadIdx.x ; int ind = blockIdx.x * thread_x + tx ; magmaFloatComplex Ca; MAGMA_C_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i<n; i+=thread_x){ Ca += WC[0] ; WC += thread_x; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_csymv_200_L(magma_int_t m, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *X, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *Y, magma_int_t incy, magmaFloatComplex *dC_work) { magma_int_t blocks; if (m % csymv_bs==0) blocks = m / csymv_bs; else blocks = m / csymv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(csymv_bs, 1, 1); /* * If matrix size is multiple of csymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % csymv_bs == 0 ) { hipLaunchKernelGGL(( magmablas_csymv_200_L_special) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } else{ magma_int_t m_mod_thread_x = m%csymv_bs - 1; hipLaunchKernelGGL(( magmablas_csymv_200_L_generic) , dim3(grid), dim3(threads), 0, magma_stream , m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x); } hipLaunchKernelGGL(( magmablas_csymv_200_L_update), dim3(grid), dim3(threads_u), 0, magma_stream , m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } /************************************************************************* Purpose ======= magmablas_csymv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - COMPLEX*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - COMPLEX*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_csymv_200( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *X, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *Y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasCsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { magmaFloatComplex *dC_work; magma_int_t blocks = n / thread_x + (n % thread_x != 0); magma_int_t workspace = lda * (blocks + 1); /* TODO: need to add a MAGMA context to handle workspaces */ hipblasAlloc( workspace, sizeof(magmaFloatComplex), (void**)&dC_work ) ; hipblasGetError( ) ; magmablas_csymv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); hipblasFree(dC_work); hipblasGetError( ) ; } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_csymv2 performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. the interface of magmablas_csymv2 is different from magmablas_csymv in the last argument dC_work As magma implements csymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dC_work is the working space 2) sum the intermediate values and store the final result in y. the size of dC_work is lda * (n/thread_x + (n%thread_x !=0) where thread_x = 64 magamblasw_csymv requires users to explicitly a working space, while magmablas_csymv is a wrapper routine of magmabalsw_csymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call csymv frequently, we suggest to use magmablas_csymv2 instead of magmablas_csymv. As the overhead of allocating and free in device memory in magmablas_csymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_csymv2_200( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *X, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *Y, magma_int_t incy, magmaFloatComplex *dC_work, magma_int_t lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else hipblasCsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { magmablas_csymv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } return MAGMA_SUCCESS; } #endif /* (GPUSHMEM >= 200) */
21be45810a22a231063f9b5875dfb0dce3fe703c.cu
/* -- MAGMA (version 1.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver August 2013 @generated c Tue Aug 13 16:45:18 2013 */ #include "common_magma.h" #define PRECISION_c /*The version for tesla can be found in csymv_tesla.cu */ #if (GPUSHMEM >= 200) #define magmablas_csymv_200 magmablas_csymv #define magmablas_csymv2_200 magmablas_csymv2 #define csymv_bs 64 #define thread_x 64 #define thread_y 4 #define bank_shift 33 #define quarter_thread_x 16 #define half_thread_x 32 /******************************************************************************* * Functions for each specific cases - Lower case */ __global__ void magmablas_csymv_200_L_special( int n, magmaFloatComplex alpha, const magmaFloatComplex *A, int lda, const magmaFloatComplex *x, int incx, magmaFloatComplex beta, magmaFloatComplex *y, int incy, magmaFloatComplex *WC) { int tx = threadIdx.x ; int ty = threadIdx.y ; int blkc = blockIdx.x ; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2]; __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2 [thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[4]; int break_d = thread_x * blkc; const int td = (thread_x * ty ) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC += break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += ty_* lda + tx_ ; if( ty == 0 ){ buff[tx] = x[0]; } // obtain the vector x store in buff; tx = tx_ ; ty = ty_ ; #pragma unroll for(int j =0; j<half_thread_x; j +=8) la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_ * 4 + 4) ; i++){ if ( i < tx_ ) la[0][bank_shift * tx_ + i] = la[0][ bank_shift * i + tx_]; else la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res+= la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res1,0); } __syncthreads(); MAGMA_C_SET2REAL(res, 0) ; A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(int j =0; j<half_thread_x; j+=8) la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); magmaFloatComplex res2; MAGMA_C_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); MAGMA_C_SET2REAL(res,0); A-=half_thread_x *lda ; MAGMA_C_SET2REAL(res_,0); #pragma unroll for(int j=0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; #pragma unroll for(int j=0; j < 4 ; j++){ res += tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7]; } else { MAGMA_C_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_C_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda * blkc * thread_x; x= x - blkc * thread_x *incx ; //x= x- tx*incx; A+=4 * ty* lda ; A+=tx; int wc_c = 0 ; int count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; if( blkc * thread_x >=thread_x) #pragma unroll for(int i=0; i<thread_x; i += thread_x ) { MAGMA_C_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0;k<4;k++) { #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda]; #pragma unroll for(int j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x * k + ty * 4 + j]; la[( j + ty * 4)][tx] = tr[j] * buff[tx]; } __syncthreads(); MAGMA_C_SET2REAL(res_,0); #pragma unroll for(int j=0; j < 4 ; j++) { res_+=la[tx_][ty_*4+j] ; } b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(int i=thread_x; i< (blkc * thread_x); i += thread_x ) { MAGMA_C_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0;k<4;k++) { #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(int j=0; j < 4 ; j++) { res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)]; la[( j + ty * 4)][tx] = tr[j] * buff[tx]; } __syncthreads(); MAGMA_C_SET2REAL(res_,0); #pragma unroll for(int j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j] ; b[k] = res_ ; __syncthreads(); A += lda * quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res = la[0][tx]+ la[1][tx] + la[2][tx]+ la[3][tx]; WC[0+lda*(blkc) ] = res; } } /************************************************************** * Lower case for generic sizes */ __global__ void magmablas_csymv_200_L_generic( int n, magmaFloatComplex alpha, const magmaFloatComplex *A, int lda, const magmaFloatComplex *x, int incx, magmaFloatComplex beta, magmaFloatComplex *y, int incy, magmaFloatComplex *WC, int m_mod_thread_x) { int tx = threadIdx.x ; int ty = threadIdx.y ; int blkc = blockIdx.x ; magmaFloatComplex res = MAGMA_C_ZERO; magmaFloatComplex res_ = MAGMA_C_ZERO; magmaFloatComplex res1 = MAGMA_C_ZERO; __shared__ magmaFloatComplex la [quarter_thread_x][thread_x+2]; __shared__ magmaFloatComplex buff [thread_x]; __shared__ magmaFloatComplex buff2[thread_x]; magmaFloatComplex tr[4]; magmaFloatComplex b[8]; int break_d = thread_x * blkc; const int td = (thread_x * ty ) + tx; int tx_ = td % half_thread_x; int ty_ = td / half_thread_x; WC+= break_d + tx; x += (break_d + tx ) * incx; A += break_d * (lda+1); A += lda * ty_; int trackA ; if( blkc == ( gridDim.x - 1 ) ) { if( ty == 0 ){ if( tx > m_mod_thread_x ) { MAGMA_C_SET2REAL(buff[tx],0); } else buff[tx] = x[0]; } if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A += trackA ; } else { if( ty == 0 ){ buff[tx] = x[0]; } trackA = tx_; A += trackA ; } // Somehow merging these two if - else creates problem // It could be a potential bug -- from synchronization or from cuda or compiler if( blkc == ( gridDim.x - 1 ) ) { #pragma unroll for(int j =0; j<half_thread_x; j+=8){ if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 9999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-=trackA; } else { #pragma unroll for(int j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } tx = tx_ ; ty = ty_ ; __syncthreads(); #pragma unroll for(int i=ty_*4; i<(ty_*4+4) ; i++){ if ( i < tx_ ) la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res += la[0][bank_shift*tx_+j+ty_*4]* buff[j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_== 0 ) res1 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res1,0); } __syncthreads(); MAGMA_C_SET2REAL(res,0); if( blkc == ( gridDim.x - 1 ) ) { if ( (tx_+half_thread_x) > m_mod_thread_x ) trackA = m_mod_thread_x; else trackA = tx_ + half_thread_x; A+= trackA+half_thread_x*lda ; #pragma unroll for(int j =0; j<half_thread_x; j+=8){ if( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) { MAGMA_C_SET2REAL(la[0][bank_shift*(ty_+j)+tx_], 99999); } else la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } A-= trackA+half_thread_x*lda ; A+=tx_ ; A+= half_thread_x + half_thread_x *lda ; } else { A+= half_thread_x + half_thread_x *lda ; #pragma unroll for(int j =0; j<half_thread_x; j+=8){ la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda]; } } __syncthreads(); #pragma unroll for(int i=ty_*4; i<(4+ty_*4) ; i++){ if ( i < tx_ ) { la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_]; } else la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); magmaFloatComplex res2; MAGMA_C_SET2REAL(res2,0); if( ty_== 1 ) res2 = la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); MAGMA_C_SET2REAL(res,0); MAGMA_C_SET2REAL(res_,0); A-=half_thread_x *lda ; if( blkc == ( gridDim.x - 1 ) ) { A-=tx_; if ( tx_ > m_mod_thread_x ) trackA=m_mod_thread_x; else trackA=tx_; A+= trackA ; #pragma unroll for(int j =0; j<half_thread_x; j+=8) if( ( ty_ + j ) > m_mod_thread_x ) { MAGMA_C_SET2REAL(tr[j/8], 99999); } else tr[j/8] = A[ j * lda]; A-=trackA; A+=tx_; } else { #pragma unroll for(int j =0; j<half_thread_x; j+=8) tr[j/8] = A[ j * lda]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++){ res+= tr[j] * buff[ j*8 + ty_]; la[0][bank_shift*(ty_+j*8)+tx_] = tr[j]; } __syncthreads(); #pragma unroll for(int j=0; j < 4 ; j++) res_+= la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4]; __syncthreads(); la[0][bank_shift*tx_+ty_]= res ; __syncthreads(); if( ty_ == 1 ) res2 = res2 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; else { MAGMA_C_SET2REAL(res2,0); } __syncthreads(); la[0][bank_shift*tx_+ty_]= res_ ; __syncthreads(); if( ty_ == 0 ) { res1 = res1 + la[0][tx_*bank_shift+0] + la[0][tx_*bank_shift+1] + la[0][tx_*bank_shift+2] + la[0][tx_*bank_shift+3] + la[0][tx_*bank_shift+4] + la[0][tx_*bank_shift+5] + la[0][tx_*bank_shift+6] + la[0][tx_*bank_shift+7]; } else { MAGMA_C_SET2REAL(res1,0); } A-=half_thread_x; __syncthreads(); tx = threadIdx.x ; ty = threadIdx.y ; if( ty_ == 0 && ty == 0 ) res = res1 ; else if( ty_ == 1 && ty == 0 ) res = res2 ; else { MAGMA_C_SET2REAL(res,0); } A-=ty_* lda ; A-=tx_; A= A - lda*break_d; x= x - break_d *incx ; A+=4 * ty* lda ; if( blkc == ( gridDim.x - 1 ) ) { if(tx <= m_mod_thread_x ) A+=tx; else A+=m_mod_thread_x; } else{ A+=tx; } int wc_c = 0 ; int count = 0 ; tx_ = td % quarter_thread_x ; ty_ = td / quarter_thread_x ; WC-=tx ; WC+=tx_; #pragma unroll for(int j=0; j < 4 ; j++) b[j] = buff[ty_*4+j]; if( break_d > 0) #pragma unroll for(int i=0; i< thread_x; i += thread_x ){ MAGMA_C_SET2REAL(res_,0); count++; if( ty== 0 ) { buff2[tx] = x[i*incx]; } __syncthreads(); #pragma unroll for( int k=0;k<4;k++){ #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(int j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); MAGMA_C_SET2REAL(res_, 0) ; #pragma unroll for(int j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } for(int i=thread_x; i<break_d; i += thread_x ){ MAGMA_C_SET2REAL(res_, 0) ; count++; if(ty == 0 ) buff2[tx] = x[i*incx]; __syncthreads(); #pragma unroll for( int k=0;k<4;k++){ #pragma unroll for(int j=0; j < 4 ; j++) tr[j] = A[j*lda] ; #pragma unroll for(int j=0; j < 4 ; j++){ res+=tr[j]*buff2[quarter_thread_x*k + ty*4+(j)]; la[( (j)+ty*4)][tx] = tr[j]; } __syncthreads(); MAGMA_C_SET2REAL(res_, 0) ; #pragma unroll for(int j=0; j < 4 ; j++) res_+=la[tx_][ty_*4+j]* b[j] ; b[4+k] = res_ ; __syncthreads(); A+=lda* quarter_thread_x ; } #pragma unroll for(int k=0; k < 4 ; k++){ la[tx_][ty_+quarter_thread_x*k]= b[4+k] ; } __syncthreads(); if( ty_ < 4 ) { int k = ty_*quarter_thread_x; res_ = la[tx_][0+k] + la[tx_][1+k] + la[tx_][2+k] + la[tx_][3+k] + la[tx_][4+k] + la[tx_][5+k] + la[tx_][6+k] + la[tx_][7+k] + la[tx_][8+k] + la[tx_][9+k] + la[tx_][10+k]+ la[tx_][11+k] + la[tx_][12+k]+ la[tx_][13+k] + la[tx_][14+k]+ la[tx_][15+k]; WC[k + wc_c*lda ] = res_; } wc_c++; __syncthreads(); } WC+=tx ; WC-=tx_; la[ty][tx]= res ; __syncthreads(); if( ty == 0 ) { res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx] ; WC[0+lda*(blkc)] = res; } } __global__ void magmablas_csymv_200_L_update( int n, magmaFloatComplex alpha, const magmaFloatComplex* A, int lda, const magmaFloatComplex *x, int incx, magmaFloatComplex beta, magmaFloatComplex *y, int incy, magmaFloatComplex *WC ) { int i; int tx = threadIdx.x ; int ind = blockIdx.x * thread_x + tx ; magmaFloatComplex Ca; MAGMA_C_SET2REAL(Ca, 0) ; WC+= ind + lda * blockIdx.x; for(i = blockIdx.x*thread_x; i<n; i+=thread_x){ Ca += WC[0] ; WC += thread_x; } if( ind < n ) y[ind * incy] = beta * y[ind * incy] + alpha * Ca ; } extern "C" void magmablas_csymv_200_L(magma_int_t m, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *X, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *Y, magma_int_t incy, magmaFloatComplex *dC_work) { magma_int_t blocks; if (m % csymv_bs==0) blocks = m / csymv_bs; else blocks = m / csymv_bs + 1; dim3 grid(blocks, 1, 1); dim3 threads(thread_x, thread_y, 1); dim3 threads_u(csymv_bs, 1, 1); /* * If matrix size is multiple of csymv_bs, we use a specific code. * otherwise, we call the generic case. */ if(m % csymv_bs == 0 ) { magmablas_csymv_200_L_special <<< grid, threads, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } else{ magma_int_t m_mod_thread_x = m%csymv_bs - 1; magmablas_csymv_200_L_generic <<< grid, threads, 0, magma_stream >>> ( m, alpha, A, lda, X, incx ,beta, Y, incy, dC_work, m_mod_thread_x); } magmablas_csymv_200_L_update<<< grid, threads_u, 0, magma_stream >>>( m, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } /************************************************************************* Purpose ======= magmablas_csymv performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n symmetric matrix. Arguments ========== UPLO - CHARACTER*1. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: UPLO = 'U' or 'u' Only the upper triangular part of A is to be referenced. UPLO = 'L' or 'l' Only the lower triangular part of A is to be referenced. Unchanged on exit. N - INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. Unchanged on exit. ALPHA - COMPLEX*16 . On entry, ALPHA specifies the scalar alpha. Unchanged on exit. A - COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = 'U' or 'u', the leading n by n upper triangular part of the array A must contain the upper triangular part of the hermitian matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = 'L' or 'l', the leading n by n lower triangular part of the array A must contain the lower triangular part of the hermitian matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. Unchanged on exit. LDA - INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). Unchanged on exit. It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. X - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. Unchanged on exit. INCX - INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. Unchanged on exit. BETA - COMPLEX*16 . On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. Unchanged on exit. Y - COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. INCY - INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. Unchanged on exit. */ extern "C" magma_int_t magmablas_csymv_200( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *X, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *Y, magma_int_t incy) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasCsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { magmaFloatComplex *dC_work; magma_int_t blocks = n / thread_x + (n % thread_x != 0); magma_int_t workspace = lda * (blocks + 1); /* TODO: need to add a MAGMA context to handle workspaces */ cublasAlloc( workspace, sizeof(magmaFloatComplex), (void**)&dC_work ) ; cublasGetError( ) ; magmablas_csymv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); cublasFree(dC_work); cublasGetError( ) ; } return MAGMA_SUCCESS; } /************************************************************************* Purpose ======= magmablas_csymv2 performs the matrix-vector operation on fermi: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n hermitian matrix. the interface of magmablas_csymv2 is different from magmablas_csymv in the last argument dC_work As magma implements csymv through two steps: 1) perform the multiplication in each thread blocks and put the intermediate value in a space of device memory which we call working space. dC_work is the working space 2) sum the intermediate values and store the final result in y. the size of dC_work is lda * (n/thread_x + (n%thread_x !=0) where thread_x = 64 magamblasw_csymv requires users to explicitly a working space, while magmablas_csymv is a wrapper routine of magmabalsw_csymv allocating the working space inside the routine and provides the same interface with cublas. If users need to call csymv frequently, we suggest to use magmablas_csymv2 instead of magmablas_csymv. As the overhead of allocating and free in device memory in magmablas_csymv would hurt performance. Our tests show that this penalty is about 10Gflop/s when matrix size is around 10000. */ extern "C" magma_int_t magmablas_csymv2_200( char uplo, magma_int_t n, magmaFloatComplex alpha, const magmaFloatComplex *A, magma_int_t lda, const magmaFloatComplex *X, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex *Y, magma_int_t incy, magmaFloatComplex *dC_work, magma_int_t lwork) { char uplo_[2] = {uplo, 0}; int upper = lapackf77_lsame(uplo_, "U"); /* * Test the input parameters. */ if ((! upper) && (! lapackf77_lsame(uplo_, "L"))) { return -1; } else if ( n < 0 ) { return -2; } else if ( lda < max(1,n) ) { return -5; } else if ( incx == 0 ) { return -7; } else if ( incy == 0 ) { return -10; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_C_EQUAL(alpha, MAGMA_C_ZERO) && MAGMA_C_EQUAL(beta, MAGMA_C_ONE) ) ) return MAGMA_SUCCESS; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { #if defined(PRECISION_z) || defined(PRECISION_c) fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); #else cublasCsymv(uplo, n, alpha, A, lda, X, incx, beta, Y, incy); #endif } else { magmablas_csymv_200_L(n, alpha, A, lda, X, incx, beta, Y, incy, dC_work); } return MAGMA_SUCCESS; } #endif /* (GPUSHMEM >= 200) */
df8008d53326a28b5b5e5073188331862368df13.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ #include "warpAcceleration.h" __global__ void test_shfl_broadcast(float *d_out, float *d_in, const int srcLane) { float value = d_in[threadIdx.x]; value = __shfl_sync(0xffffffff,value,srcLane,BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_up(float *d_out, float *d_in, const int delta) { float value = d_in[threadIdx.x]; value = __shfl_up_sync(0xffffffff,value,delta,16); d_out[threadIdx.x] = value; } __global__ void test_shfl_down(float *d_out, float *d_in, const int delta) { float value = d_in[threadIdx.x]; value = __shfl_down_sync(0xffffffff,value,delta,16); d_out[threadIdx.x] = value; } __global__ void test_shfl_xor(float *d_out, float *d_in, const int mask) { float value = d_in[threadIdx.x]; value = __shfl_xor_sync(0xffffffff,value,mask,BDIMX); d_out[threadIdx.x] = value; } __inline__ __device__ float warpReduce(float mySum) { mySum+=__shfl_xor_sync(0xffffffff,mySum,16); mySum+=__shfl_xor_sync(0xffffffff,mySum,8); mySum+=__shfl_xor_sync(0xffffffff,mySum,4); mySum+=__shfl_xor_sync(0xffffffff,mySum,2); mySum+=__shfl_xor_sync(0xffffffff,mySum,1); return mySum; } __global__ void reduceShfl(int *g_idata, int *g_odata, unsigned int n) { // shared memory for each warp sum __shared__ int smem[SMEMDIM]; // boundary check unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx<n) { // read from global memory int mySum = g_idata[idx]; // calculate lane index and warp index int laneIdx = threadIdx.x%warpSize; int warpIdx = threadIdx.x/warpSize; // block-wide warp reduce mySum = warpReduce(mySum); // save warp sum to shared memory if(laneIdx==0) smem[warpIdx] = mySum; // block synchronization __syncthreads(); // last warp reduce mySum = (threadIdx.x<SMEMDIM) ? smem[laneIdx] : 0; if(warpIdx==0) mySum = warpReduce(mySum); // write result for this block to global mem if(threadIdx.x==0) g_odata[blockIdx.x] = mySum; } }
df8008d53326a28b5b5e5073188331862368df13.cu
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ #include "warpAcceleration.h" __global__ void test_shfl_broadcast(float *d_out, float *d_in, const int srcLane) { float value = d_in[threadIdx.x]; value = __shfl_sync(0xffffffff,value,srcLane,BDIMX); d_out[threadIdx.x] = value; } __global__ void test_shfl_up(float *d_out, float *d_in, const int delta) { float value = d_in[threadIdx.x]; value = __shfl_up_sync(0xffffffff,value,delta,16); d_out[threadIdx.x] = value; } __global__ void test_shfl_down(float *d_out, float *d_in, const int delta) { float value = d_in[threadIdx.x]; value = __shfl_down_sync(0xffffffff,value,delta,16); d_out[threadIdx.x] = value; } __global__ void test_shfl_xor(float *d_out, float *d_in, const int mask) { float value = d_in[threadIdx.x]; value = __shfl_xor_sync(0xffffffff,value,mask,BDIMX); d_out[threadIdx.x] = value; } __inline__ __device__ float warpReduce(float mySum) { mySum+=__shfl_xor_sync(0xffffffff,mySum,16); mySum+=__shfl_xor_sync(0xffffffff,mySum,8); mySum+=__shfl_xor_sync(0xffffffff,mySum,4); mySum+=__shfl_xor_sync(0xffffffff,mySum,2); mySum+=__shfl_xor_sync(0xffffffff,mySum,1); return mySum; } __global__ void reduceShfl(int *g_idata, int *g_odata, unsigned int n) { // shared memory for each warp sum __shared__ int smem[SMEMDIM]; // boundary check unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx<n) { // read from global memory int mySum = g_idata[idx]; // calculate lane index and warp index int laneIdx = threadIdx.x%warpSize; int warpIdx = threadIdx.x/warpSize; // block-wide warp reduce mySum = warpReduce(mySum); // save warp sum to shared memory if(laneIdx==0) smem[warpIdx] = mySum; // block synchronization __syncthreads(); // last warp reduce mySum = (threadIdx.x<SMEMDIM) ? smem[laneIdx] : 0; if(warpIdx==0) mySum = warpReduce(mySum); // write result for this block to global mem if(threadIdx.x==0) g_odata[blockIdx.x] = mySum; } }
3dd3502cd4bbe5d261472d4cbe6059c74daf5109.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <string> #include <hip/hip_runtime.h> #include <time.h> #include <sys/time.h> #define DIMX 10 #define DIMY 10 #define DIMZ 3 #define CUDA_CHECK(cmd) {hipError_t error = cmd; if(error!=hipSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", hipGetErrorString(error));}} /** IDEA: ** 1. Split sequence into DIMX * DIMY * DIMZ parts ** 2. Let each thread sum up the amount of bases on one part ** 3. Let each thread sum up the amoutn of bases that were calculated by its predecessors ** 4. Pick the thread in which the searched value lies (aka the amount of bases of its predecessors are lower than the searched value AND its amount of bases added to that makes the result greater than the searched value ** 5. Search on that part again until we find the desired index ** 6. Return the index ** 7. ?? ** 8. PROFIT! **/ __global__ void sumBases(char *sequence, unsigned *result, unsigned *subSequenceLength, unsigned *searchedBound, size_t *subSequences, size_t sequence_length) { //linearize thread ids int threadId = threadIdx.x + threadIdx.y * DIMX + threadIdx.z * DIMX * DIMY; subSequences[threadId]=0; //count bases in each part of the sequence { for(size_t i=threadId*(*subSequenceLength); i<(threadId+1)*(*subSequenceLength); i++) { if(sequence[i]!='-') { subSequences[threadId]++; } } } __syncthreads(); //sum up the amount of bases which was computed by the "previous" threads (in a linear order) size_t cumulatedAmountOfBases=0; for(size_t i=0; i<threadId; i++) { cumulatedAmountOfBases+=subSequences[i]; } __syncthreads(); //pick the thread that is the last one we look at before we exceed our bound if( (cumulatedAmountOfBases < *searchedBound) && (cumulatedAmountOfBases+subSequences[threadId] > *searchedBound)) { //set the result pointer to the first char of the substring *result=threadId*(*subSequenceLength); //iterate again over the substring for(size_t i=threadId*(*subSequenceLength); cumulatedAmountOfBases<*searchedBound; i++) { if(sequence[i]!='-') { cumulatedAmountOfBases++; } //increase the result pointer *result=i; } *result+=1; } } void print_help(){ std::cout << "usage: \t transalign_killer <file/to/read/sequence/from>\n"; } std::string get_file_contents(const char *filename) { std::ifstream in(filename, std::ios::in | std::ios::binary); std::string contents(""); // print file: if(in.is_open()){ while (in.good()) { contents.push_back(in.get()); } } else { std::cerr << ">> problem opening file at: " << filename << "\n"; } return contents; } int main(int argc, char** argv){ long delta_time; struct timeval start_time, end_time; if(argc!=2){ print_help(); return 1; } //use the following file: http://idisk.mpi-cbg.de/~steinbac/transalign_sequence.txt.tgz //untar it and then use it as input std::string file_loc(argv[1]); std::cout << "reading input from " << file_loc << "\n"; std::string seq = get_file_contents(file_loc.c_str()); if(seq.empty()) return 1; /**convert string to char array**/ char *host_sequence=new char[seq.size()+1]; //set the whole sequence to 0 host_sequence[seq.size()]=0; //copy every char memcpy(host_sequence, seq.c_str(), seq.size()); //get integer part for subSequenceLength double integerPart; modf( seq.size() / (DIMX * DIMY * DIMZ) , &integerPart); int iPart = static_cast<int>(integerPart); int *host_subSequenceLength = &iPart; unsigned *host_searchedBound=(unsigned*) malloc(sizeof(unsigned)); *host_searchedBound=seq.size()/2; //length the part each GPU thread has to deal with unsigned *dev_subSequenceLength; //pointer for result on device unsigned *dev_result; //pointer for result on host unsigned *host_result=(unsigned*) malloc(sizeof(unsigned)); //sequence on device char *dev_sequence; //char array with a slot for each thread on GPU (only a temporary solution for now) size_t *dev_subSequences; unsigned *dev_searchedBound; /**start GPU stuff**/ dim3 block(DIMX, DIMY, DIMZ); CUDA_CHECK(hipMalloc((void**)&dev_result, sizeof(unsigned))); CUDA_CHECK(hipMalloc((void**)&dev_subSequenceLength, sizeof(unsigned))); CUDA_CHECK(hipMalloc((void**)&dev_searchedBound, sizeof(unsigned))); CUDA_CHECK(hipMalloc((void**)&dev_sequence, seq.size()*sizeof(char))); CUDA_CHECK(hipMalloc((void**)&dev_subSequences, DIMX * DIMY * DIMZ * sizeof(size_t))); //set a starting point gettimeofday(&start_time, NULL); //this is where things start to become incredibly slow CUDA_CHECK(hipMemcpy(dev_sequence, host_sequence, seq.size()*sizeof(char), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dev_subSequenceLength, host_subSequenceLength, sizeof(unsigned), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(dev_searchedBound, host_searchedBound, sizeof(unsigned), hipMemcpyHostToDevice)); gettimeofday(&end_time, NULL); long bw1_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); hipLaunchKernelGGL(( sumBases), dim3(1),dim3(block), 0, 0, dev_sequence, dev_result, dev_subSequenceLength, dev_searchedBound, dev_subSequences, seq.size()); hipDeviceSynchronize(); gettimeofday(&end_time, NULL); long gpu_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); CUDA_CHECK(hipMemcpy(host_result, dev_result, sizeof(unsigned), hipMemcpyDeviceToHost)); gettimeofday(&end_time, NULL); long bw2_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); //total time gettimeofday(&end_time, NULL); delta_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); printf("Result: %u \n", *host_result); CUDA_CHECK(hipFree(dev_sequence)); CUDA_CHECK(hipFree(dev_result)); CUDA_CHECK(hipFree(dev_subSequenceLength)); CUDA_CHECK(hipFree(dev_subSequences)); CUDA_CHECK(hipFree(dev_searchedBound)); free(host_result); printf(" - %li s elapsed total\n", delta_time); printf(" - %li s on bandwidth forth\n", bw1_time); printf(" - %li s on GPU\n", gpu_time - bw1_time); printf(" - %li s on bandwidth back\n", bw2_time - gpu_time); printf(" - %li s on CPU\n", delta_time - bw2_time); return 0; }
3dd3502cd4bbe5d261472d4cbe6059c74daf5109.cu
#include <iostream> #include <fstream> #include <string> #include <cuda.h> #include <time.h> #include <sys/time.h> #define DIMX 10 #define DIMY 10 #define DIMZ 3 #define CUDA_CHECK(cmd) {cudaError_t error = cmd; if(error!=cudaSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}} /** IDEA: ** 1. Split sequence into DIMX * DIMY * DIMZ parts ** 2. Let each thread sum up the amount of bases on one part ** 3. Let each thread sum up the amoutn of bases that were calculated by its predecessors ** 4. Pick the thread in which the searched value lies (aka the amount of bases of its predecessors are lower than the searched value AND its amount of bases added to that makes the result greater than the searched value ** 5. Search on that part again until we find the desired index ** 6. Return the index ** 7. ?? ** 8. PROFIT! **/ __global__ void sumBases(char *sequence, unsigned *result, unsigned *subSequenceLength, unsigned *searchedBound, size_t *subSequences, size_t sequence_length) { //linearize thread ids int threadId = threadIdx.x + threadIdx.y * DIMX + threadIdx.z * DIMX * DIMY; subSequences[threadId]=0; //count bases in each part of the sequence { for(size_t i=threadId*(*subSequenceLength); i<(threadId+1)*(*subSequenceLength); i++) { if(sequence[i]!='-') { subSequences[threadId]++; } } } __syncthreads(); //sum up the amount of bases which was computed by the "previous" threads (in a linear order) size_t cumulatedAmountOfBases=0; for(size_t i=0; i<threadId; i++) { cumulatedAmountOfBases+=subSequences[i]; } __syncthreads(); //pick the thread that is the last one we look at before we exceed our bound if( (cumulatedAmountOfBases < *searchedBound) && (cumulatedAmountOfBases+subSequences[threadId] > *searchedBound)) { //set the result pointer to the first char of the substring *result=threadId*(*subSequenceLength); //iterate again over the substring for(size_t i=threadId*(*subSequenceLength); cumulatedAmountOfBases<*searchedBound; i++) { if(sequence[i]!='-') { cumulatedAmountOfBases++; } //increase the result pointer *result=i; } *result+=1; } } void print_help(){ std::cout << "usage: \t transalign_killer <file/to/read/sequence/from>\n"; } std::string get_file_contents(const char *filename) { std::ifstream in(filename, std::ios::in | std::ios::binary); std::string contents(""); // print file: if(in.is_open()){ while (in.good()) { contents.push_back(in.get()); } } else { std::cerr << ">> problem opening file at: " << filename << "\n"; } return contents; } int main(int argc, char** argv){ long delta_time; struct timeval start_time, end_time; if(argc!=2){ print_help(); return 1; } //use the following file: http://idisk.mpi-cbg.de/~steinbac/transalign_sequence.txt.tgz //untar it and then use it as input std::string file_loc(argv[1]); std::cout << "reading input from " << file_loc << "\n"; std::string seq = get_file_contents(file_loc.c_str()); if(seq.empty()) return 1; /**convert string to char array**/ char *host_sequence=new char[seq.size()+1]; //set the whole sequence to 0 host_sequence[seq.size()]=0; //copy every char memcpy(host_sequence, seq.c_str(), seq.size()); //get integer part for subSequenceLength double integerPart; modf( seq.size() / (DIMX * DIMY * DIMZ) , &integerPart); int iPart = static_cast<int>(integerPart); int *host_subSequenceLength = &iPart; unsigned *host_searchedBound=(unsigned*) malloc(sizeof(unsigned)); *host_searchedBound=seq.size()/2; //length the part each GPU thread has to deal with unsigned *dev_subSequenceLength; //pointer for result on device unsigned *dev_result; //pointer for result on host unsigned *host_result=(unsigned*) malloc(sizeof(unsigned)); //sequence on device char *dev_sequence; //char array with a slot for each thread on GPU (only a temporary solution for now) size_t *dev_subSequences; unsigned *dev_searchedBound; /**start GPU stuff**/ dim3 block(DIMX, DIMY, DIMZ); CUDA_CHECK(cudaMalloc((void**)&dev_result, sizeof(unsigned))); CUDA_CHECK(cudaMalloc((void**)&dev_subSequenceLength, sizeof(unsigned))); CUDA_CHECK(cudaMalloc((void**)&dev_searchedBound, sizeof(unsigned))); CUDA_CHECK(cudaMalloc((void**)&dev_sequence, seq.size()*sizeof(char))); CUDA_CHECK(cudaMalloc((void**)&dev_subSequences, DIMX * DIMY * DIMZ * sizeof(size_t))); //set a starting point gettimeofday(&start_time, NULL); //this is where things start to become incredibly slow CUDA_CHECK(cudaMemcpy(dev_sequence, host_sequence, seq.size()*sizeof(char), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dev_subSequenceLength, host_subSequenceLength, sizeof(unsigned), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(dev_searchedBound, host_searchedBound, sizeof(unsigned), cudaMemcpyHostToDevice)); gettimeofday(&end_time, NULL); long bw1_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); sumBases<<<1,block>>>(dev_sequence, dev_result, dev_subSequenceLength, dev_searchedBound, dev_subSequences, seq.size()); cudaThreadSynchronize(); gettimeofday(&end_time, NULL); long gpu_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); CUDA_CHECK(cudaMemcpy(host_result, dev_result, sizeof(unsigned), cudaMemcpyDeviceToHost)); gettimeofday(&end_time, NULL); long bw2_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); //total time gettimeofday(&end_time, NULL); delta_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec); printf("Result: %u \n", *host_result); CUDA_CHECK(cudaFree(dev_sequence)); CUDA_CHECK(cudaFree(dev_result)); CUDA_CHECK(cudaFree(dev_subSequenceLength)); CUDA_CHECK(cudaFree(dev_subSequences)); CUDA_CHECK(cudaFree(dev_searchedBound)); free(host_result); printf(" - %li µs elapsed total\n", delta_time); printf(" - %li µs on bandwidth forth\n", bw1_time); printf(" - %li µs on GPU\n", gpu_time - bw1_time); printf(" - %li µs on bandwidth back\n", bw2_time - gpu_time); printf(" - %li µs on CPU\n", delta_time - bw2_time); return 0; }
f2305aa4a2c5990b776269ebc898062653b47e38.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <vector> #include <hip/hip_runtime_api.h> #include <cufftdx.hpp> #include <hipfft.h> #include "block_io.hpp" #include "common.hpp" #include "random.hpp" // #define CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D // #define CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D_SIMPLE_IO template<unsigned int MaxThreadsPerBlock, class FFT, class ComplexType = typename FFT::value_type> __launch_bounds__(MaxThreadsPerBlock) __global__ void cufftdx_3d_fft_single_block_kernel(const ComplexType* input, ComplexType* output) { using complex_type = ComplexType; static constexpr unsigned int fft_size = cufftdx::size_of<FFT>::value; // Local array for thread complex_type thread_data[FFT::storage_size]; // Shared memory use for exchanging data between threads extern __shared__ complex_type shared_memory[]; // Load data from global memory to registers. static constexpr unsigned int stride_x = fft_size * fft_size; unsigned int index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = input[index]; index += stride_x; } // Execute FFT in X dimension FFT().execute(thread_data); // Exchange/transpose via shared memory index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { shared_memory[index] = thread_data[i]; index += stride_x; } __syncthreads(); static constexpr unsigned int stride_y = cufftdx::size_of<FFT>::value; index = threadIdx.x + threadIdx.y * fft_size * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = shared_memory[index]; index += stride_y; } // Execute FFT in Y dimension FFT().execute(thread_data); // Exchange/transpose via shared memory index = threadIdx.x + threadIdx.y * fft_size * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { shared_memory[index] = thread_data[i]; index += stride_y; } __syncthreads(); index = (threadIdx.x + threadIdx.y * fft_size) * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = shared_memory[index]; index += 1; } // Execute FFT in Z dimension FFT().execute(thread_data); // Save results #ifdef CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D_SIMPLE_IO // Simple IO with poor global memory pattern: // Storing the data with stride=1 results in poor global memory // write pattern with little or none coalescing index = (threadIdx.x + threadIdx.y * fft_size) * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { output[index] = thread_data[i]; index += 1; } #else // Shared memory IO: // Exchanging data via shared memory results in a much better global // memory patter with good coalescing index = (threadIdx.x + threadIdx.y * fft_size) * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { shared_memory[index] = thread_data[i]; index += 1; } __syncthreads(); index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = shared_memory[index]; index += stride_x; } index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { output[index] = thread_data[i]; index += stride_x; } #endif } void cufft_3d_fft(unsigned int fft_size_x, unsigned int fft_size_y, unsigned int fft_size_z, hipfftComplex* input, hipfftComplex* output, hipStream_t stream) { // Create cuFFT plan hipfftHandle plan; CUFFT_CHECK_AND_EXIT(hipfftPlan3d(&plan, fft_size_x, fft_size_y, fft_size_z, HIPFFT_C2C)); CUFFT_CHECK_AND_EXIT(hipfftSetStream(plan, stream)); // Execute cuFFT CUFFT_CHECK_AND_EXIT(hipfftExecC2C(plan, input, output, HIPFFT_FORWARD)) CUDA_CHECK_AND_EXIT(hipDeviceSynchronize()); // Clean-up CUFFT_CHECK_AND_EXIT(hipfftDestroy(plan)); } template<unsigned int FFTSize> void cufftdx_3d_fft_single_block(float2* input, float2* output, hipStream_t stream) { using namespace cufftdx; static constexpr unsigned int fft_size_x = FFTSize; static constexpr unsigned int fft_size_y = FFTSize; static constexpr unsigned int fft_size_z = FFTSize; using FFT = decltype(Thread() + Size<fft_size_x>() + Type<fft_type::c2c>() + Direction<fft_direction::forward>() + Precision<float>()); using complex_type = typename FFT::value_type; constexpr dim3 block_dim = {fft_size_z, fft_size_y, 1}; constexpr unsigned int max_threads_per_block = block_dim.x * block_dim.y * block_dim.z; const size_t shared_memory_size = (fft_size_x * fft_size_y * fft_size_z) * sizeof(complex_type); const auto kernel = cufftdx_3d_fft_single_block_kernel<max_threads_per_block, FFT, complex_type>; // Increase max shared memory if needed CUDA_CHECK_AND_EXIT(hipFuncSetAttribute(kernel, hipFuncAttributeMaxDynamicSharedMemorySize, shared_memory_size)); complex_type* cufftdx_input = reinterpret_cast<complex_type*>(input); complex_type* cufftdx_output = reinterpret_cast<complex_type*>(output); hipLaunchKernelGGL(( kernel), dim3(1), dim3(block_dim), shared_memory_size, stream, cufftdx_input, cufftdx_output); CUDA_CHECK_AND_EXIT(hipPeekAtLastError()); CUDA_CHECK_AND_EXIT(hipDeviceSynchronize()); } int main(int, char**) { // 3D FFT where X=Y=Z static constexpr unsigned int fft_size = 16; // Generate random input data on host const unsigned int flat_fft_size = fft_size * fft_size * fft_size; #ifdef CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D std::vector<float2> host_input(flat_fft_size); for (size_t i = 0; i < flat_fft_size; i++) { float sign = (i % 3 == 0) ? -1.0f : 1.0f; host_input[i].x = sign * static_cast<float>(i) / 100; host_input[i].y = sign * static_cast<float>(i) / 100; } #else auto host_input = example::get_random_complex_data<float>(flat_fft_size, -1, 1); #endif // Allocate managed memory for device input/output // float2 has the same size and alignment as cuFFTDx fp32 complex type cufftdx::complex<float> and hipfftComplex float2* input; float2* output; const auto flat_fft_size_bytes = flat_fft_size * sizeof(float2); CUDA_CHECK_AND_EXIT(hipMalloc(&input, flat_fft_size_bytes)); CUDA_CHECK_AND_EXIT(hipMalloc(&output, flat_fft_size_bytes)); // Allocate host output for cuFFT and cuFFTDx std::vector<float2> cufft_output(flat_fft_size); std::vector<float2> cufftdx_output(flat_fft_size); // Copy input to the device CUDA_CHECK_AND_EXIT(hipMemcpy(input, host_input.data(), flat_fft_size_bytes, hipMemcpyHostToDevice)); CUDA_CHECK_AND_EXIT(hipDeviceSynchronize()); hipStream_t stream; CUDA_CHECK_AND_EXIT(hipStreamCreate(&stream)); cufft_3d_fft(fft_size, fft_size, fft_size, input, output, stream); CUDA_CHECK_AND_EXIT(hipMemcpy(cufft_output.data(), output, flat_fft_size_bytes, hipMemcpyDeviceToHost)); CUDA_CHECK_AND_EXIT(hipDeviceSynchronize()); cufftdx_3d_fft_single_block<fft_size>(input, output, stream); CUDA_CHECK_AND_EXIT(hipMemcpy(cufftdx_output.data(), output, flat_fft_size_bytes, hipMemcpyDeviceToHost)); CUDA_CHECK_AND_EXIT(hipDeviceSynchronize()); // Clean-up CUDA_CHECK_AND_EXIT(hipStreamDestroy(stream)); CUDA_CHECK_AND_EXIT(hipFree(input)); CUDA_CHECK_AND_EXIT(hipFree(output)); // Check if cuFFTDx results are correct auto fft_error = example::fft_signal_error::calculate_for_complex_values(cufftdx_output, cufft_output); #ifdef CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D std::cout << "cuFFT, cuFFTDx\n"; for (size_t i = 0; i < 8; i++) { std::cout << i << ": "; std::cout << "(" << cufft_output[i].x << ", " << cufft_output[i].y << ")"; std::cout << ", "; std::cout << "(" << cufftdx_output[i].x << ", " << cufftdx_output[i].y << ")"; std::cout << "\n"; } #endif std::cout << "Correctness results:\n"; std::cout << "L2 error: " << fft_error.l2_relative_error << "\n"; std::cout << "Peak error (index: " << fft_error.peak_error_index << "): " << fft_error.peak_error << "\n"; if(fft_error.l2_relative_error < 0.001) { std::cout << "Success\n"; return 0; } else { std::cout << "Failure\n"; return 1; } }
f2305aa4a2c5990b776269ebc898062653b47e38.cu
#include <iostream> #include <vector> #include <cuda_runtime_api.h> #include <cufftdx.hpp> #include <cufft.h> #include "block_io.hpp" #include "common.hpp" #include "random.hpp" // #define CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D // #define CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D_SIMPLE_IO template<unsigned int MaxThreadsPerBlock, class FFT, class ComplexType = typename FFT::value_type> __launch_bounds__(MaxThreadsPerBlock) __global__ void cufftdx_3d_fft_single_block_kernel(const ComplexType* input, ComplexType* output) { using complex_type = ComplexType; static constexpr unsigned int fft_size = cufftdx::size_of<FFT>::value; // Local array for thread complex_type thread_data[FFT::storage_size]; // Shared memory use for exchanging data between threads extern __shared__ complex_type shared_memory[]; // Load data from global memory to registers. static constexpr unsigned int stride_x = fft_size * fft_size; unsigned int index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = input[index]; index += stride_x; } // Execute FFT in X dimension FFT().execute(thread_data); // Exchange/transpose via shared memory index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { shared_memory[index] = thread_data[i]; index += stride_x; } __syncthreads(); static constexpr unsigned int stride_y = cufftdx::size_of<FFT>::value; index = threadIdx.x + threadIdx.y * fft_size * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = shared_memory[index]; index += stride_y; } // Execute FFT in Y dimension FFT().execute(thread_data); // Exchange/transpose via shared memory index = threadIdx.x + threadIdx.y * fft_size * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { shared_memory[index] = thread_data[i]; index += stride_y; } __syncthreads(); index = (threadIdx.x + threadIdx.y * fft_size) * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = shared_memory[index]; index += 1; } // Execute FFT in Z dimension FFT().execute(thread_data); // Save results #ifdef CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D_SIMPLE_IO // Simple IO with poor global memory pattern: // Storing the data with stride=1 results in poor global memory // write pattern with little or none coalescing index = (threadIdx.x + threadIdx.y * fft_size) * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { output[index] = thread_data[i]; index += 1; } #else // Shared memory IO: // Exchanging data via shared memory results in a much better global // memory patter with good coalescing index = (threadIdx.x + threadIdx.y * fft_size) * fft_size; for (size_t i = 0; i < FFT::elements_per_thread; i++) { shared_memory[index] = thread_data[i]; index += 1; } __syncthreads(); index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { thread_data[i] = shared_memory[index]; index += stride_x; } index = (threadIdx.x + threadIdx.y * fft_size); for (size_t i = 0; i < FFT::elements_per_thread; i++) { output[index] = thread_data[i]; index += stride_x; } #endif } void cufft_3d_fft(unsigned int fft_size_x, unsigned int fft_size_y, unsigned int fft_size_z, cufftComplex* input, cufftComplex* output, cudaStream_t stream) { // Create cuFFT plan cufftHandle plan; CUFFT_CHECK_AND_EXIT(cufftPlan3d(&plan, fft_size_x, fft_size_y, fft_size_z, CUFFT_C2C)); CUFFT_CHECK_AND_EXIT(cufftSetStream(plan, stream)); // Execute cuFFT CUFFT_CHECK_AND_EXIT(cufftExecC2C(plan, input, output, CUFFT_FORWARD)) CUDA_CHECK_AND_EXIT(cudaDeviceSynchronize()); // Clean-up CUFFT_CHECK_AND_EXIT(cufftDestroy(plan)); } template<unsigned int FFTSize> void cufftdx_3d_fft_single_block(float2* input, float2* output, cudaStream_t stream) { using namespace cufftdx; static constexpr unsigned int fft_size_x = FFTSize; static constexpr unsigned int fft_size_y = FFTSize; static constexpr unsigned int fft_size_z = FFTSize; using FFT = decltype(Thread() + Size<fft_size_x>() + Type<fft_type::c2c>() + Direction<fft_direction::forward>() + Precision<float>()); using complex_type = typename FFT::value_type; constexpr dim3 block_dim = {fft_size_z, fft_size_y, 1}; constexpr unsigned int max_threads_per_block = block_dim.x * block_dim.y * block_dim.z; const size_t shared_memory_size = (fft_size_x * fft_size_y * fft_size_z) * sizeof(complex_type); const auto kernel = cufftdx_3d_fft_single_block_kernel<max_threads_per_block, FFT, complex_type>; // Increase max shared memory if needed CUDA_CHECK_AND_EXIT(cudaFuncSetAttribute(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory_size)); complex_type* cufftdx_input = reinterpret_cast<complex_type*>(input); complex_type* cufftdx_output = reinterpret_cast<complex_type*>(output); kernel<<<1, block_dim, shared_memory_size, stream>>>(cufftdx_input, cufftdx_output); CUDA_CHECK_AND_EXIT(cudaPeekAtLastError()); CUDA_CHECK_AND_EXIT(cudaDeviceSynchronize()); } int main(int, char**) { // 3D FFT where X=Y=Z static constexpr unsigned int fft_size = 16; // Generate random input data on host const unsigned int flat_fft_size = fft_size * fft_size * fft_size; #ifdef CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D std::vector<float2> host_input(flat_fft_size); for (size_t i = 0; i < flat_fft_size; i++) { float sign = (i % 3 == 0) ? -1.0f : 1.0f; host_input[i].x = sign * static_cast<float>(i) / 100; host_input[i].y = sign * static_cast<float>(i) / 100; } #else auto host_input = example::get_random_complex_data<float>(flat_fft_size, -1, 1); #endif // Allocate managed memory for device input/output // float2 has the same size and alignment as cuFFTDx fp32 complex type cufftdx::complex<float> and cufftComplex float2* input; float2* output; const auto flat_fft_size_bytes = flat_fft_size * sizeof(float2); CUDA_CHECK_AND_EXIT(cudaMalloc(&input, flat_fft_size_bytes)); CUDA_CHECK_AND_EXIT(cudaMalloc(&output, flat_fft_size_bytes)); // Allocate host output for cuFFT and cuFFTDx std::vector<float2> cufft_output(flat_fft_size); std::vector<float2> cufftdx_output(flat_fft_size); // Copy input to the device CUDA_CHECK_AND_EXIT(cudaMemcpy(input, host_input.data(), flat_fft_size_bytes, cudaMemcpyHostToDevice)); CUDA_CHECK_AND_EXIT(cudaDeviceSynchronize()); cudaStream_t stream; CUDA_CHECK_AND_EXIT(cudaStreamCreate(&stream)); cufft_3d_fft(fft_size, fft_size, fft_size, input, output, stream); CUDA_CHECK_AND_EXIT(cudaMemcpy(cufft_output.data(), output, flat_fft_size_bytes, cudaMemcpyDeviceToHost)); CUDA_CHECK_AND_EXIT(cudaDeviceSynchronize()); cufftdx_3d_fft_single_block<fft_size>(input, output, stream); CUDA_CHECK_AND_EXIT(cudaMemcpy(cufftdx_output.data(), output, flat_fft_size_bytes, cudaMemcpyDeviceToHost)); CUDA_CHECK_AND_EXIT(cudaDeviceSynchronize()); // Clean-up CUDA_CHECK_AND_EXIT(cudaStreamDestroy(stream)); CUDA_CHECK_AND_EXIT(cudaFree(input)); CUDA_CHECK_AND_EXIT(cudaFree(output)); // Check if cuFFTDx results are correct auto fft_error = example::fft_signal_error::calculate_for_complex_values(cufftdx_output, cufft_output); #ifdef CUFFTDX_EXAMPLE_DETAIL_DEBUG_FFT_3D std::cout << "cuFFT, cuFFTDx\n"; for (size_t i = 0; i < 8; i++) { std::cout << i << ": "; std::cout << "(" << cufft_output[i].x << ", " << cufft_output[i].y << ")"; std::cout << ", "; std::cout << "(" << cufftdx_output[i].x << ", " << cufftdx_output[i].y << ")"; std::cout << "\n"; } #endif std::cout << "Correctness results:\n"; std::cout << "L2 error: " << fft_error.l2_relative_error << "\n"; std::cout << "Peak error (index: " << fft_error.peak_error_index << "): " << fft_error.peak_error << "\n"; if(fft_error.l2_relative_error < 0.001) { std::cout << "Success\n"; return 0; } else { std::cout << "Failure\n"; return 1; } }
183926710c290420af2dd638e3284f23c0c6970b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <sched.h> #include <sys/sysinfo.h> #include <pthread.h> #include <sched.h> #include <sys/syscall.h> #include <sys/time.h> #include <time.h> #include <unistd.h> static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} inline double diff_s(struct timeval start, struct timeval end) { return ((double) (end.tv_usec - start.tv_usec) / 1000000 + (double) (end.tv_sec - start.tv_sec)); } __device__ void busywait(int cycles) { clock_t current_time; current_time = clock64(); int until = current_time + cycles; while (until > current_time) { current_time = clock64(); } } __global__ void copyKernel(unsigned char *output, unsigned char *input, int N) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if(tid < N) output[tid] = input[tid]; } __global__ void nullKernel(unsigned char *output, unsigned char *input, int N) { } static pid_t gettid(void) { return syscall(__NR_gettid); } void set_affinity(int tid, int core_id) { cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(core_id, &mask); int r = sched_setaffinity(tid, sizeof(mask), &mask); if (r < 0) { fprintf(stderr, "couldn't set affinity for %d\n", core_id); exit(1); } } typedef struct { int id; unsigned char *input; unsigned char *output; int memSize; } parm; void *work( void *arg ) { parm *p=(parm *)arg; int tid = gettid(); set_affinity(tid, (p->id)%get_nprocs()); memcpy(p->output,p->input,p->memSize); return 0; } void launch_cpu_threads(int nthreads, unsigned char **out, unsigned char **in, int memSize) { pthread_t *threads; pthread_attr_t attr; parm *p; int j; threads=(pthread_t *)malloc(nthreads * sizeof(pthread_t)); if(threads == NULL) { printf("ERROR malloc failed to create CPU threads\n"); exit(1); } pthread_attr_init(&attr); p=(parm *)malloc(nthreads * sizeof(parm)); for (j=0; j<nthreads; j++) { p[j].id=j; p[j].input=in[j]; p[j].output=out[j]; p[j].memSize=memSize; if(pthread_create(&threads[j], &attr, work, (void *)(p+j))!=0) { printf("ERROR creating threads\n"); exit(1); } } for (j=0; j<nthreads; j++) { if(pthread_join(threads[j],NULL)!=0) { printf("ERROR in joing threads\n"); exit(1); } } pthread_attr_destroy(&attr); free(p); } int main(int argc, char *argv[]) { struct timeval tv1, tv2; int opt; int num_of_blocks = 1024; int num_of_threads_per_block = 1024; int memSize = 4*1024*1024; int benchmarkType = 0; int ITERATIONS = 10; int t = 512; while ((opt = getopt(argc, argv, "m:b:i:t:")) != -1) { switch (opt) { case 'm': memSize = atoi(optarg)*1024*1024; break; case 'b': benchmarkType = atoi(optarg); break; case 'i': ITERATIONS = atoi(optarg); break; case 't': t = atoi(optarg); break; default: /* '?' */ break; } } num_of_blocks = memSize/t; num_of_threads_per_block = t; assert(num_of_blocks <= 2147483647); assert(num_of_threads_per_block <= 1024); int N = num_of_blocks * num_of_threads_per_block; HANDLE_ERROR(hipDeviceReset()); hipFree(0); //set context so that overhead won't be later accounted unsigned char *cpuMemory,*inputhostallocMemory,*outputhostallocMemory,*inputcudamallocMemory,*outputcudamallocMemory, *inputmanagedMemory, *outputmanagedMemory; cpuMemory = (unsigned char *)malloc(memSize); assert(cpuMemory); for(int i = 0; i < memSize/sizeof(unsigned char); i++) cpuMemory[i] = (unsigned char)(i & 0xff); HANDLE_ERROR( hipHostMalloc( (void**)& inputhostallocMemory, sizeof(unsigned char)*memSize, hipHostMallocDefault) ); HANDLE_ERROR( hipHostMalloc( (void**)& outputhostallocMemory, sizeof(unsigned char)*memSize, hipHostMallocDefault) ); HANDLE_ERROR( hipMalloc( (void**)& inputcudamallocMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( hipMalloc( (void**)& outputcudamallocMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( hipMemcpy(inputcudamallocMemory,cpuMemory, sizeof(unsigned char)*memSize,hipMemcpyDefault) ); HANDLE_ERROR( hipMemcpy(inputhostallocMemory,cpuMemory, sizeof(unsigned char)*memSize,hipMemcpyDefault) ); switch (benchmarkType) { case 0: {//Device to Device memcpy test gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { HANDLE_ERROR( hipMemcpy(outputcudamallocMemory,inputcudamallocMemory, sizeof(unsigned char)*memSize,hipMemcpyDefault) ); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("DeviceToDevice Memcpy Bandwitdh = %f GB/s\n",bandwidth); break; } case 1: {//custom kernel with cuda malloced memory gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( nullKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputcudamallocMemory,inputcudamallocMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( copyKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputcudamallocMemory,inputcudamallocMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel(hipMalloc) memcpy Bandwitdh including kernel launch overhead = %f GB/s\n",bandwidth); printf("Custom kernel(hipMalloc) memcpy Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); break; } case 2: {//Custom kernel with host allocated memory gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( nullKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputhostallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( copyKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputhostallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel(hipHostMalloc) memcpy Bandwitdh = %f GB/s\n",bandwidth); printf("Custom kernel(hipHostMalloc) memcpy Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); break; } case 3: {//host allocated memory copy test gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { HANDLE_ERROR( hipMemcpyAsync(outputhostallocMemory,inputhostallocMemory, sizeof(unsigned char)*memSize,hipMemcpyDefault) ); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Device to Device hipHostMalloc memcpy Bandwitdh = %f GB/s\n",bandwidth); break; } case 4: {//managed memory copy test HANDLE_ERROR( hipMallocManaged( (void**)& inputmanagedMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( hipMallocManaged( (void**)& outputmanagedMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( hipMemcpy(inputmanagedMemory,cpuMemory, sizeof(unsigned char)*memSize,hipMemcpyDefault) ); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( nullKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputmanagedMemory,inputmanagedMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( copyKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputmanagedMemory,inputmanagedMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel (managed memory) Bandwitdh = %f GB/s\n",bandwidth); printf("Custom kernel (managed memory) Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); hipFree(inputmanagedMemory); hipFree(outputmanagedMemory); break; } case 5: {//Custom kernel with host allocated to malloc copy gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( nullKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputcudamallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { hipLaunchKernelGGL(( copyKernel), dim3(num_of_blocks),dim3(num_of_threads_per_block), 0, 0, outputcudamallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( hipDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( hipGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel(hipHostMalloc to hipMalloc) memcpy Bandwitdh = %f GB/s\n",bandwidth); printf("Custom kernel(hipHostMalloc to hipMalloc) memcpy Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); break; } case 6: {//Cpu malloc to malloc unsigned char *mallocdOut,*mallocdIn; mallocdOut = (unsigned char *)malloc(sizeof(unsigned char)*memSize); mallocdIn = (unsigned char *)malloc(sizeof(unsigned char)*memSize); for(int i=0; i<memSize/sizeof(unsigned char); i++) mallocdIn[i]=5; if(!mallocdOut || !mallocdIn) { printf("ERROR in malloc\n"); return -1; } for(int i=0; i<memSize/sizeof(unsigned char); i++) mallocdIn[i]=5; gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { memcpy(mallocdOut,mallocdIn,sizeof(unsigned char)*memSize); } gettimeofday(&tv2, NULL); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; printf("cpu malloc to malloc memcpy Bandwitdh = %f GB/s\n",bandwidth); free(mallocdOut); free(mallocdIn); break; } case 7: {//Cpu multithreaded malloc to malloc const int nthreads=4; unsigned char *mallocdOut[nthreads],*mallocdIn[nthreads]; for(int i=0; i<nthreads; i++) { mallocdOut[i] = (unsigned char *)malloc(sizeof(unsigned char)*memSize); mallocdIn[i] = (unsigned char *)malloc(sizeof(unsigned char)*memSize); } if(!mallocdOut || !mallocdIn) { printf("ERROR in malloc\n"); return -1; } for(int i=0; i<memSize/sizeof(unsigned char); i++){ mallocdIn[0][i]=5; mallocdIn[1][i]=5; mallocdIn[2][i]=5; mallocdIn[3][i]=5; } gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { launch_cpu_threads(nthreads,mallocdOut,mallocdIn,memSize); } gettimeofday(&tv2, NULL); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 4 * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; printf("cpu malloc to malloc memcpy Bandwitdh = %f GB/s\n",bandwidth); for(int i=0; i<nthreads; i++){ free(mallocdOut[i]); free(mallocdIn[i]); } break; } } free(cpuMemory); hipHostFree(inputhostallocMemory); hipHostFree(outputhostallocMemory); hipFree(inputcudamallocMemory); hipFree(outputcudamallocMemory); return 0; }
183926710c290420af2dd638e3284f23c0c6970b.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <sched.h> #include <sys/sysinfo.h> #include <pthread.h> #include <sched.h> #include <sys/syscall.h> #include <sys/time.h> #include <time.h> #include <unistd.h> static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} inline double diff_s(struct timeval start, struct timeval end) { return ((double) (end.tv_usec - start.tv_usec) / 1000000 + (double) (end.tv_sec - start.tv_sec)); } __device__ void busywait(int cycles) { clock_t current_time; current_time = clock64(); int until = current_time + cycles; while (until > current_time) { current_time = clock64(); } } __global__ void copyKernel(unsigned char *output, unsigned char *input, int N) { int tid = threadIdx.x + blockIdx.x*blockDim.x; if(tid < N) output[tid] = input[tid]; } __global__ void nullKernel(unsigned char *output, unsigned char *input, int N) { } static pid_t gettid(void) { return syscall(__NR_gettid); } void set_affinity(int tid, int core_id) { cpu_set_t mask; CPU_ZERO(&mask); CPU_SET(core_id, &mask); int r = sched_setaffinity(tid, sizeof(mask), &mask); if (r < 0) { fprintf(stderr, "couldn't set affinity for %d\n", core_id); exit(1); } } typedef struct { int id; unsigned char *input; unsigned char *output; int memSize; } parm; void *work( void *arg ) { parm *p=(parm *)arg; int tid = gettid(); set_affinity(tid, (p->id)%get_nprocs()); memcpy(p->output,p->input,p->memSize); return 0; } void launch_cpu_threads(int nthreads, unsigned char **out, unsigned char **in, int memSize) { pthread_t *threads; pthread_attr_t attr; parm *p; int j; threads=(pthread_t *)malloc(nthreads * sizeof(pthread_t)); if(threads == NULL) { printf("ERROR malloc failed to create CPU threads\n"); exit(1); } pthread_attr_init(&attr); p=(parm *)malloc(nthreads * sizeof(parm)); for (j=0; j<nthreads; j++) { p[j].id=j; p[j].input=in[j]; p[j].output=out[j]; p[j].memSize=memSize; if(pthread_create(&threads[j], &attr, work, (void *)(p+j))!=0) { printf("ERROR creating threads\n"); exit(1); } } for (j=0; j<nthreads; j++) { if(pthread_join(threads[j],NULL)!=0) { printf("ERROR in joing threads\n"); exit(1); } } pthread_attr_destroy(&attr); free(p); } int main(int argc, char *argv[]) { struct timeval tv1, tv2; int opt; int num_of_blocks = 1024; int num_of_threads_per_block = 1024; int memSize = 4*1024*1024; int benchmarkType = 0; int ITERATIONS = 10; int t = 512; while ((opt = getopt(argc, argv, "m:b:i:t:")) != -1) { switch (opt) { case 'm': memSize = atoi(optarg)*1024*1024; break; case 'b': benchmarkType = atoi(optarg); break; case 'i': ITERATIONS = atoi(optarg); break; case 't': t = atoi(optarg); break; default: /* '?' */ break; } } num_of_blocks = memSize/t; num_of_threads_per_block = t; assert(num_of_blocks <= 2147483647); assert(num_of_threads_per_block <= 1024); int N = num_of_blocks * num_of_threads_per_block; HANDLE_ERROR(cudaDeviceReset()); cudaFree(0); //set context so that overhead won't be later accounted unsigned char *cpuMemory,*inputhostallocMemory,*outputhostallocMemory,*inputcudamallocMemory,*outputcudamallocMemory, *inputmanagedMemory, *outputmanagedMemory; cpuMemory = (unsigned char *)malloc(memSize); assert(cpuMemory); for(int i = 0; i < memSize/sizeof(unsigned char); i++) cpuMemory[i] = (unsigned char)(i & 0xff); HANDLE_ERROR( cudaHostAlloc( (void**)& inputhostallocMemory, sizeof(unsigned char)*memSize, cudaHostAllocDefault) ); HANDLE_ERROR( cudaHostAlloc( (void**)& outputhostallocMemory, sizeof(unsigned char)*memSize, cudaHostAllocDefault) ); HANDLE_ERROR( cudaMalloc( (void**)& inputcudamallocMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( cudaMalloc( (void**)& outputcudamallocMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( cudaMemcpy(inputcudamallocMemory,cpuMemory, sizeof(unsigned char)*memSize,cudaMemcpyDefault) ); HANDLE_ERROR( cudaMemcpy(inputhostallocMemory,cpuMemory, sizeof(unsigned char)*memSize,cudaMemcpyDefault) ); switch (benchmarkType) { case 0: {//Device to Device memcpy test gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { HANDLE_ERROR( cudaMemcpy(outputcudamallocMemory,inputcudamallocMemory, sizeof(unsigned char)*memSize,cudaMemcpyDefault) ); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("DeviceToDevice Memcpy Bandwitdh = %f GB/s\n",bandwidth); break; } case 1: {//custom kernel with cuda malloced memory gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { nullKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputcudamallocMemory,inputcudamallocMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { copyKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputcudamallocMemory,inputcudamallocMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel(cudaMalloc) memcpy Bandwitdh including kernel launch overhead = %f GB/s\n",bandwidth); printf("Custom kernel(cudaMalloc) memcpy Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); break; } case 2: {//Custom kernel with host allocated memory gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { nullKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputhostallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { copyKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputhostallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel(cudaHostAlloc) memcpy Bandwitdh = %f GB/s\n",bandwidth); printf("Custom kernel(cudaHostAlloc) memcpy Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); break; } case 3: {//host allocated memory copy test gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { HANDLE_ERROR( cudaMemcpyAsync(outputhostallocMemory,inputhostallocMemory, sizeof(unsigned char)*memSize,cudaMemcpyDefault) ); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Device to Device cudaHostAlloc memcpy Bandwitdh = %f GB/s\n",bandwidth); break; } case 4: {//managed memory copy test HANDLE_ERROR( cudaMallocManaged( (void**)& inputmanagedMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( cudaMallocManaged( (void**)& outputmanagedMemory, sizeof(unsigned char)*memSize) ); HANDLE_ERROR( cudaMemcpy(inputmanagedMemory,cpuMemory, sizeof(unsigned char)*memSize,cudaMemcpyDefault) ); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { nullKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputmanagedMemory,inputmanagedMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { copyKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputmanagedMemory,inputmanagedMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel (managed memory) Bandwitdh = %f GB/s\n",bandwidth); printf("Custom kernel (managed memory) Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); cudaFree(inputmanagedMemory); cudaFree(outputmanagedMemory); break; } case 5: {//Custom kernel with host allocated to malloc copy gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { nullKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputcudamallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double nullElapsedTime = diff_s(tv1,tv2); gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { copyKernel<<<num_of_blocks,num_of_threads_per_block>>>(outputcudamallocMemory,inputhostallocMemory,N); } HANDLE_ERROR( cudaDeviceSynchronize()); gettimeofday(&tv2, NULL); HANDLE_ERROR( cudaGetLastError()); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; float bandwidth_ex = 2.0f * ((double)memSize/(1024*1024*1024))*ITERATIONS/(elapsedTimeSeconds-nullElapsedTime); //float bandwidth = 2.0f * ((float)(1<<10) * memSize * (float)ITERATIONS) / (elapsedTimeSeconds *(1000.0) * (float)(1 << 20)); printf("Custom kernel(cudaHostAlloc to cudaMalloc) memcpy Bandwitdh = %f GB/s\n",bandwidth); printf("Custom kernel(cudaHostAlloc to cudaMalloc) memcpy Bandwitdh excluding kernel launch overhead = %f GB/s\n",bandwidth_ex); break; } case 6: {//Cpu malloc to malloc unsigned char *mallocdOut,*mallocdIn; mallocdOut = (unsigned char *)malloc(sizeof(unsigned char)*memSize); mallocdIn = (unsigned char *)malloc(sizeof(unsigned char)*memSize); for(int i=0; i<memSize/sizeof(unsigned char); i++) mallocdIn[i]=5; if(!mallocdOut || !mallocdIn) { printf("ERROR in malloc\n"); return -1; } for(int i=0; i<memSize/sizeof(unsigned char); i++) mallocdIn[i]=5; gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { memcpy(mallocdOut,mallocdIn,sizeof(unsigned char)*memSize); } gettimeofday(&tv2, NULL); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; printf("cpu malloc to malloc memcpy Bandwitdh = %f GB/s\n",bandwidth); free(mallocdOut); free(mallocdIn); break; } case 7: {//Cpu multithreaded malloc to malloc const int nthreads=4; unsigned char *mallocdOut[nthreads],*mallocdIn[nthreads]; for(int i=0; i<nthreads; i++) { mallocdOut[i] = (unsigned char *)malloc(sizeof(unsigned char)*memSize); mallocdIn[i] = (unsigned char *)malloc(sizeof(unsigned char)*memSize); } if(!mallocdOut || !mallocdIn) { printf("ERROR in malloc\n"); return -1; } for(int i=0; i<memSize/sizeof(unsigned char); i++){ mallocdIn[0][i]=5; mallocdIn[1][i]=5; mallocdIn[2][i]=5; mallocdIn[3][i]=5; } gettimeofday(&tv1, NULL); for(int i = 0; i < ITERATIONS; i++) { launch_cpu_threads(nthreads,mallocdOut,mallocdIn,memSize); } gettimeofday(&tv2, NULL); double elapsedTimeSeconds = diff_s(tv1,tv2); printf("elapsedTime per iteration = %f\n",elapsedTimeSeconds/ITERATIONS); //we multiply by two since the DeviceToDevice copy involves both reading and writing to device memory float bandwidth = 4 * ((double)memSize/(1024*1024*1024))*ITERATIONS/elapsedTimeSeconds; printf("cpu malloc to malloc memcpy Bandwitdh = %f GB/s\n",bandwidth); for(int i=0; i<nthreads; i++){ free(mallocdOut[i]); free(mallocdIn[i]); } break; } } free(cpuMemory); cudaFreeHost(inputhostallocMemory); cudaFreeHost(outputhostallocMemory); cudaFree(inputcudamallocMemory); cudaFree(outputcudamallocMemory); return 0; }
1ad562845a6ecf71dc2e1d18194814a13a2281cc.hip
// !!! This is a file automatically generated by hipify!!! //=================================================================// // CUDA BFS kernel // Topological-Driven: one node per thread, no atomic instructions // Reference: // Pawan Harish, Accelerating large graph algorithms // on the GPU using CUDA (HiPC 2007) //=================================================================// #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" __global__ void initialize(bool * d_graph_frontier, bool * d_updating_graph_frontier, bool * d_graph_visited, uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_frontier[tid] = false; d_updating_graph_frontier[tid] = false; d_graph_visited[tid] = false; d_graph_property[tid] = MY_INFINITY; } } __global__ void BFS_kernel_1( cudaGraph d_graph, bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, uint32_t * device_vpl ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<d_graph.vertex_cnt && device_graph_frontier[tid] ) { device_graph_frontier[tid] = false; uint64_t eidx = d_graph.get_firstedge_index(tid); uint64_t eidx_end = d_graph.get_edge_index_end(tid); for (size_t i=eidx; i<eidx_end; i++) { uint64_t vid = d_graph.get_edge_dest(i); if (device_graph_visited[vid]==false) { device_vpl[vid] = device_vpl[tid]+1; device_updating_graph_frontier[vid] = true; } } } } __global__ void BFS_kernel_2( bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, bool * device_over, uint64_t vl_sz ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < vl_sz && device_updating_graph_frontier[tid] ) { device_graph_frontier[tid] = true; device_graph_visited[tid] = true; device_updating_graph_frontier[tid] = false; *device_over = true; } } void cuda_BFS(uint64_t * vertexlist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt, uint64_t root) { uint32_t * device_vpl = 0; bool * device_graph_frontier = 0; bool * device_updating_graph_frontier = 0; bool * device_graph_visited = 0; bool * device_over = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time int device; hipGetDevice(&device); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp,device); // Try to use as many threads as possible so that each thread // is processing one vertex. If max thread is reached, // split them into multiple blocks. unsigned int num_thread_per_block = (unsigned int) vertex_cnt; if (num_thread_per_block > devProp.maxThreadsPerBlock) num_thread_per_block = devProp.maxThreadsPerBlock; unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block ); // malloc of gpu side cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); cudaErrCheck( hipMalloc((void**)&device_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( hipMalloc((void**)&device_updating_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( hipMalloc((void**)&device_graph_visited, vertex_cnt*sizeof(bool)) ); cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) ); hipEvent_t start_event, stop_event; cudaErrCheck( hipEventCreate(&start_event) ); cudaErrCheck( hipEventCreate(&stop_event) ); // initialization hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt); bool true_flag=true; uint32_t zero_flag=0; // memcpy from host to device hipEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); cudaErrCheck( hipMemcpy(&(device_graph_frontier[root]), &true_flag, sizeof(bool), hipMemcpyHostToDevice) ); // set root vertex as the first frontier cudaErrCheck( hipMemcpy(&(device_graph_visited[root]), &true_flag, sizeof(bool), hipMemcpyHostToDevice) ); // set root vertex as visited cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zero_flag, sizeof(uint32_t), hipMemcpyHostToDevice) ); // set root vertex as visited hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&h2d_copy_time, start_event, stop_event); // BFS traversal bool stop; hipEventRecord(start_event, 0); int k=0; do { // Each iteration processes // one level of BFS traversal stop = false; cudaErrCheck( hipMemcpy(device_over, &stop, sizeof(bool), hipMemcpyHostToDevice) ); // step 1 hipLaunchKernelGGL(( BFS_kernel_1), dim3(num_block), dim3(num_thread_per_block), 0, 0, d_graph, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl); // step 2 hipLaunchKernelGGL(( BFS_kernel_2), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_over, vertex_cnt); cudaErrCheck( hipMemcpy(&stop, device_over, sizeof(bool), hipMemcpyDeviceToHost) ); k++; }while(stop); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&kernel_time, start_event, stop_event); hipEventRecord(start_event, 0); cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), hipMemcpyDeviceToHost) ); hipEventRecord(stop_event, 0); hipEventSynchronize(stop_event); hipEventElapsedTime(&d2h_copy_time, start_event, stop_event); printf("== iteration #: %d\n", k); #ifndef ENABLE_VERIFY printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); #endif hipEventDestroy(start_event); hipEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( hipFree(device_vpl) ); cudaErrCheck( hipFree(device_graph_frontier) ); cudaErrCheck( hipFree(device_updating_graph_frontier) ); cudaErrCheck( hipFree(device_graph_visited) ); }
1ad562845a6ecf71dc2e1d18194814a13a2281cc.cu
//=================================================================// // CUDA BFS kernel // Topological-Driven: one node per thread, no atomic instructions // Reference: // Pawan Harish, Accelerating large graph algorithms // on the GPU using CUDA (HiPC 2007) //=================================================================// #include <cuda.h> #include <stdint.h> #include <stdio.h> #include "cudaGraph.h" __global__ void initialize(bool * d_graph_frontier, bool * d_updating_graph_frontier, bool * d_graph_visited, uint32_t * d_graph_property, uint64_t num_vertex) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < num_vertex ) { d_graph_frontier[tid] = false; d_updating_graph_frontier[tid] = false; d_graph_visited[tid] = false; d_graph_property[tid] = MY_INFINITY; } } __global__ void BFS_kernel_1( cudaGraph d_graph, bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, uint32_t * device_vpl ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid<d_graph.vertex_cnt && device_graph_frontier[tid] ) { device_graph_frontier[tid] = false; uint64_t eidx = d_graph.get_firstedge_index(tid); uint64_t eidx_end = d_graph.get_edge_index_end(tid); for (size_t i=eidx; i<eidx_end; i++) { uint64_t vid = d_graph.get_edge_dest(i); if (device_graph_visited[vid]==false) { device_vpl[vid] = device_vpl[tid]+1; device_updating_graph_frontier[vid] = true; } } } } __global__ void BFS_kernel_2( bool * device_graph_frontier, bool * device_updating_graph_frontier, bool * device_graph_visited, bool * device_over, uint64_t vl_sz ) { size_t tid = blockIdx.x * blockDim.x + threadIdx.x; if ( tid < vl_sz && device_updating_graph_frontier[tid] ) { device_graph_frontier[tid] = true; device_graph_visited[tid] = true; device_updating_graph_frontier[tid] = false; *device_over = true; } } void cuda_BFS(uint64_t * vertexlist, uint64_t * edgelist, uint32_t * vproplist, uint64_t vertex_cnt, uint64_t edge_cnt, uint64_t root) { uint32_t * device_vpl = 0; bool * device_graph_frontier = 0; bool * device_updating_graph_frontier = 0; bool * device_graph_visited = 0; bool * device_over = 0; float h2d_copy_time = 0; // host to device data transfer time float d2h_copy_time = 0; // device to host data transfer time float kernel_time = 0; // kernel execution time int device; cudaGetDevice(&device); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp,device); // Try to use as many threads as possible so that each thread // is processing one vertex. If max thread is reached, // split them into multiple blocks. unsigned int num_thread_per_block = (unsigned int) vertex_cnt; if (num_thread_per_block > devProp.maxThreadsPerBlock) num_thread_per_block = devProp.maxThreadsPerBlock; unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block ); // malloc of gpu side cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) ); cudaErrCheck( cudaMalloc((void**)&device_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( cudaMalloc((void**)&device_updating_graph_frontier, vertex_cnt*sizeof(bool)) ); cudaErrCheck( cudaMalloc((void**)&device_graph_visited, vertex_cnt*sizeof(bool)) ); cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) ); cudaEvent_t start_event, stop_event; cudaErrCheck( cudaEventCreate(&start_event) ); cudaErrCheck( cudaEventCreate(&stop_event) ); // initialization initialize<<<num_block, num_thread_per_block>>>( device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl, vertex_cnt); // prepare graph struct // one for host side, one for device side cudaGraph h_graph, d_graph; // here copy only the pointers h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt); bool true_flag=true; uint32_t zero_flag=0; // memcpy from host to device cudaEventRecord(start_event, 0); // copy graph data to device h_graph.cudaGraphCopy(&d_graph); cudaErrCheck( cudaMemcpy(&(device_graph_frontier[root]), &true_flag, sizeof(bool), cudaMemcpyHostToDevice) ); // set root vertex as the first frontier cudaErrCheck( cudaMemcpy(&(device_graph_visited[root]), &true_flag, sizeof(bool), cudaMemcpyHostToDevice) ); // set root vertex as visited cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zero_flag, sizeof(uint32_t), cudaMemcpyHostToDevice) ); // set root vertex as visited cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event); // BFS traversal bool stop; cudaEventRecord(start_event, 0); int k=0; do { // Each iteration processes // one level of BFS traversal stop = false; cudaErrCheck( cudaMemcpy(device_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ); // step 1 BFS_kernel_1<<<num_block, num_thread_per_block>>>(d_graph, device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_vpl); // step 2 BFS_kernel_2<<<num_block, num_thread_per_block>>>( device_graph_frontier, device_updating_graph_frontier, device_graph_visited, device_over, vertex_cnt); cudaErrCheck( cudaMemcpy(&stop, device_over, sizeof(bool), cudaMemcpyDeviceToHost) ); k++; }while(stop); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&kernel_time, start_event, stop_event); cudaEventRecord(start_event, 0); cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); cudaEventRecord(stop_event, 0); cudaEventSynchronize(stop_event); cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event); printf("== iteration #: %d\n", k); #ifndef ENABLE_VERIFY printf("== host->device copy time: %f ms\n", h2d_copy_time); printf("== device->host copy time: %f ms\n", d2h_copy_time); printf("== kernel time: %f ms\n", kernel_time); #endif cudaEventDestroy(start_event); cudaEventDestroy(stop_event); // free graph struct on device side d_graph.cudaGraphFree(); cudaErrCheck( cudaFree(device_vpl) ); cudaErrCheck( cudaFree(device_graph_frontier) ); cudaErrCheck( cudaFree(device_updating_graph_frontier) ); cudaErrCheck( cudaFree(device_graph_visited) ); }
04b61af30b49eebd9d2f97c552ed4e1332035a79.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<cstdio> using namespace std; __global__ void maxi(int *a,int *b,int n) { int block=256*blockIdx.x; int max=0; for(int i=block;i<min(256+block,n);i++) { if(max<a[i]) { max=a[i]; } } b[blockIdx.x]=max; } int main() { cout<<"Enter the size of array"<<endl; int n; cin>>n; int a[n]; hipEvent_t start,end; cout<<"Enter the elements for the array"<<endl; for(int i=0;i<n;i++) { cin>>a[i]; } int *ad,*bd; int size=n*sizeof(int); hipMalloc(&ad,size); hipMemcpy(ad,a,size,hipMemcpyHostToDevice); int grids=ceil(n*1.0f/256.0f); hipMalloc(&bd,grids*sizeof(int)); dim3 grid(grids,1); dim3 block(1,1); hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); while(n>1) { hipLaunchKernelGGL(( maxi), dim3(grids),dim3(block), 0, 0, ad,bd,n); n=ceil(n*1.0f/256.0f); hipMemcpy(ad,bd,n*sizeof(int),hipMemcpyDeviceToDevice); } hipEventRecord(end); hipEventSynchronize(end); float time=0; hipEventElapsedTime(&time,start,end); int ans[2]; hipMemcpy(ans,ad,4,hipMemcpyDeviceToHost); cout<<"The maximum element is"<<ans[0]<<endl; cout<<"The time required dor it is"; cout<<time<<endl; }
04b61af30b49eebd9d2f97c552ed4e1332035a79.cu
#include<iostream> #include<cstdio> using namespace std; __global__ void maxi(int *a,int *b,int n) { int block=256*blockIdx.x; int max=0; for(int i=block;i<min(256+block,n);i++) { if(max<a[i]) { max=a[i]; } } b[blockIdx.x]=max; } int main() { cout<<"Enter the size of array"<<endl; int n; cin>>n; int a[n]; cudaEvent_t start,end; cout<<"Enter the elements for the array"<<endl; for(int i=0;i<n;i++) { cin>>a[i]; } int *ad,*bd; int size=n*sizeof(int); cudaMalloc(&ad,size); cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice); int grids=ceil(n*1.0f/256.0f); cudaMalloc(&bd,grids*sizeof(int)); dim3 grid(grids,1); dim3 block(1,1); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); while(n>1) { maxi<<<grids,block>>>(ad,bd,n); n=ceil(n*1.0f/256.0f); cudaMemcpy(ad,bd,n*sizeof(int),cudaMemcpyDeviceToDevice); } cudaEventRecord(end); cudaEventSynchronize(end); float time=0; cudaEventElapsedTime(&time,start,end); int ans[2]; cudaMemcpy(ans,ad,4,cudaMemcpyDeviceToHost); cout<<"The maximum element is"<<ans[0]<<endl; cout<<"The time required dor it is"; cout<<time<<endl; }
c62ad68b07dfbccca640e4d88a264818ef49ba44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2020 by Contributors * @file array/cuda/coo_sort.cc * @brief Sort COO index */ #include <dgl/array.h> #include "../../c_api_common.h" #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// COOSort_ ///////////////////////////// /** * @brief Encode row and column IDs into a single scalar per edge. * * @tparam IdType The type to encode as. * @param row The row (src) IDs per edge. * @param col The column (dst) IDs per edge. * @param nnz The number of edges. * @param col_bits The number of bits used to encode the destination. The row * information is packed into the remaining bits. * @param key The encoded edges (output). */ template <typename IdType> __global__ void _COOEncodeEdgesKernel( const IdType* const row, const IdType* const col, const int64_t nnz, const int col_bits, IdType* const key) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { key[tx] = row[tx] << col_bits | col[tx]; } } /** * @brief Decode row and column IDs from the encoded edges. * * @tparam IdType The type the edges are encoded as. * @param key The encoded edges. * @param nnz The number of edges. * @param col_bits The number of bits used to store the column/dst ID. * @param row The row (src) IDs per edge (output). * @param col The col (dst) IDs per edge (output). */ template <typename IdType> __global__ void _COODecodeEdgesKernel( const IdType* const key, const int64_t nnz, const int col_bits, IdType* const row, IdType* const col) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { const IdType k = key[tx]; row[tx] = k >> col_bits; col[tx] = k & ((1 << col_bits) - 1); } } template <DGLDeviceType XPU, typename IdType> void COOSort_(COOMatrix* coo, bool sort_column) { hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA(); const int row_bits = cuda::_NumberOfBits(coo->num_rows); const int64_t nnz = coo->row->shape[0]; if (sort_column) { const int col_bits = cuda::_NumberOfBits(coo->num_cols); const int num_bits = row_bits + col_bits; const int nt = 256; const int nb = (nnz + nt - 1) / nt; CHECK(static_cast<int64_t>(nb) * nt >= nnz); IdArray pos = aten::NewIdArray(nnz, coo->row->ctx, coo->row->dtype.bits); CUDA_KERNEL_CALL( _COOEncodeEdgesKernel, nb, nt, 0, stream, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>(), nnz, col_bits, pos.Ptr<IdType>()); auto sorted = Sort(pos, num_bits); CUDA_KERNEL_CALL( _COODecodeEdgesKernel, nb, nt, 0, stream, sorted.first.Ptr<IdType>(), nnz, col_bits, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>()); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = coo->col_sorted = true; } else { const int num_bits = row_bits; auto sorted = Sort(coo->row, num_bits); coo->row = sorted.first; coo->col = IndexSelect(coo->col, sorted.second); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = true; } } template void COOSort_<kDGLCUDA, int32_t>(COOMatrix* coo, bool sort_column); template void COOSort_<kDGLCUDA, int64_t>(COOMatrix* coo, bool sort_column); ///////////////////////////// COOIsSorted ///////////////////////////// template <typename IdType> __global__ void _COOIsSortedKernel( const IdType* row, const IdType* col, int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < nnz) { if (tx == 0) { row_sorted[0] = 1; col_sorted[0] = 1; } else { row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]); col_sorted[tx] = static_cast<int8_t>(row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]); } tx += stride_x; } } template <DGLDeviceType XPU, typename IdType> std::pair<bool, bool> COOIsSorted(COOMatrix coo) { const int64_t nnz = coo.row->shape[0]; const auto& ctx = coo.row->ctx; hipStream_t stream = runtime::getCurrentHIPStreamMasqueradingAsCUDA(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but // should be fine. int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); const int nt = cuda::FindNumThreads(nnz); const int nb = (nnz + nt - 1) / nt; CUDA_KERNEL_CALL( _COOIsSortedKernel, nb, nt, 0, stream, coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(), nnz, row_flags, col_flags); const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx); const bool col_sorted = row_sorted ? cuda::AllTrue(col_flags, nnz, ctx) : false; device->FreeWorkspace(ctx, row_flags); device->FreeWorkspace(ctx, col_flags); return {row_sorted, col_sorted}; } template std::pair<bool, bool> COOIsSorted<kDGLCUDA, int32_t>(COOMatrix coo); template std::pair<bool, bool> COOIsSorted<kDGLCUDA, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
c62ad68b07dfbccca640e4d88a264818ef49ba44.cu
/** * Copyright (c) 2020 by Contributors * @file array/cuda/coo_sort.cc * @brief Sort COO index */ #include <dgl/array.h> #include "../../c_api_common.h" #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { ///////////////////////////// COOSort_ ///////////////////////////// /** * @brief Encode row and column IDs into a single scalar per edge. * * @tparam IdType The type to encode as. * @param row The row (src) IDs per edge. * @param col The column (dst) IDs per edge. * @param nnz The number of edges. * @param col_bits The number of bits used to encode the destination. The row * information is packed into the remaining bits. * @param key The encoded edges (output). */ template <typename IdType> __global__ void _COOEncodeEdgesKernel( const IdType* const row, const IdType* const col, const int64_t nnz, const int col_bits, IdType* const key) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { key[tx] = row[tx] << col_bits | col[tx]; } } /** * @brief Decode row and column IDs from the encoded edges. * * @tparam IdType The type the edges are encoded as. * @param key The encoded edges. * @param nnz The number of edges. * @param col_bits The number of bits used to store the column/dst ID. * @param row The row (src) IDs per edge (output). * @param col The col (dst) IDs per edge (output). */ template <typename IdType> __global__ void _COODecodeEdgesKernel( const IdType* const key, const int64_t nnz, const int col_bits, IdType* const row, IdType* const col) { int64_t tx = static_cast<int64_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (tx < nnz) { const IdType k = key[tx]; row[tx] = k >> col_bits; col[tx] = k & ((1 << col_bits) - 1); } } template <DGLDeviceType XPU, typename IdType> void COOSort_(COOMatrix* coo, bool sort_column) { cudaStream_t stream = runtime::getCurrentCUDAStream(); const int row_bits = cuda::_NumberOfBits(coo->num_rows); const int64_t nnz = coo->row->shape[0]; if (sort_column) { const int col_bits = cuda::_NumberOfBits(coo->num_cols); const int num_bits = row_bits + col_bits; const int nt = 256; const int nb = (nnz + nt - 1) / nt; CHECK(static_cast<int64_t>(nb) * nt >= nnz); IdArray pos = aten::NewIdArray(nnz, coo->row->ctx, coo->row->dtype.bits); CUDA_KERNEL_CALL( _COOEncodeEdgesKernel, nb, nt, 0, stream, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>(), nnz, col_bits, pos.Ptr<IdType>()); auto sorted = Sort(pos, num_bits); CUDA_KERNEL_CALL( _COODecodeEdgesKernel, nb, nt, 0, stream, sorted.first.Ptr<IdType>(), nnz, col_bits, coo->row.Ptr<IdType>(), coo->col.Ptr<IdType>()); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = coo->col_sorted = true; } else { const int num_bits = row_bits; auto sorted = Sort(coo->row, num_bits); coo->row = sorted.first; coo->col = IndexSelect(coo->col, sorted.second); if (aten::COOHasData(*coo)) coo->data = IndexSelect(coo->data, sorted.second); else coo->data = AsNumBits(sorted.second, coo->row->dtype.bits); coo->row_sorted = true; } } template void COOSort_<kDGLCUDA, int32_t>(COOMatrix* coo, bool sort_column); template void COOSort_<kDGLCUDA, int64_t>(COOMatrix* coo, bool sort_column); ///////////////////////////// COOIsSorted ///////////////////////////// template <typename IdType> __global__ void _COOIsSortedKernel( const IdType* row, const IdType* col, int64_t nnz, int8_t* row_sorted, int8_t* col_sorted) { int tx = blockIdx.x * blockDim.x + threadIdx.x; const int stride_x = gridDim.x * blockDim.x; while (tx < nnz) { if (tx == 0) { row_sorted[0] = 1; col_sorted[0] = 1; } else { row_sorted[tx] = static_cast<int8_t>(row[tx - 1] <= row[tx]); col_sorted[tx] = static_cast<int8_t>(row[tx - 1] < row[tx] || col[tx - 1] <= col[tx]); } tx += stride_x; } } template <DGLDeviceType XPU, typename IdType> std::pair<bool, bool> COOIsSorted(COOMatrix coo) { const int64_t nnz = coo.row->shape[0]; const auto& ctx = coo.row->ctx; cudaStream_t stream = runtime::getCurrentCUDAStream(); auto device = runtime::DeviceAPI::Get(ctx); // We allocate a workspace of 2*nnz bytes. It wastes a little bit memory but // should be fine. int8_t* row_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); int8_t* col_flags = static_cast<int8_t*>(device->AllocWorkspace(ctx, nnz)); const int nt = cuda::FindNumThreads(nnz); const int nb = (nnz + nt - 1) / nt; CUDA_KERNEL_CALL( _COOIsSortedKernel, nb, nt, 0, stream, coo.row.Ptr<IdType>(), coo.col.Ptr<IdType>(), nnz, row_flags, col_flags); const bool row_sorted = cuda::AllTrue(row_flags, nnz, ctx); const bool col_sorted = row_sorted ? cuda::AllTrue(col_flags, nnz, ctx) : false; device->FreeWorkspace(ctx, row_flags); device->FreeWorkspace(ctx, col_flags); return {row_sorted, col_sorted}; } template std::pair<bool, bool> COOIsSorted<kDGLCUDA, int32_t>(COOMatrix coo); template std::pair<bool, bool> COOIsSorted<kDGLCUDA, int64_t>(COOMatrix coo); } // namespace impl } // namespace aten } // namespace dgl
d3a87ddb3a6ebef1edfc0be18097319bcbfd7bef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "graph.h" #include "../timer.h" #include <string> __global__ void truss_kernel(Graph g, int k, int *done, int iteration) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < g.nnzSize) { unsigned int v1 = g.row[index]; unsigned int v2 = g.col[index]; if (v2 != UINT_MAX/* && (g.affected[index] || iteration == 0)*/) { unsigned int commonNeighbors = 0; int index1 = g.rowPtr[v1]; int index2 = g.rowPtr[v2]; while (index1 < g.rowPtr[v1+1] && index2 < g.rowPtr[v2+1]) { if (g.col[index1] == UINT_MAX) { ++index1; continue; } else if (g.col[index2] == UINT_MAX) { ++index2; continue; } if (g.col[index1] < g.col[index2]) { ++index1; } else if (g.col[index1] > g.col[index2]) { ++index2; } else { ++commonNeighbors; ++index1; ++index2; } if (commonNeighbors == k-2) { break; } } int affected = 0; if (commonNeighbors < k-2) { g.col[index] = UINT_MAX; *done = 0; affected = 1; } g.affected[index] = g.affected[index] || affected; for (unsigned int n1 = g.rowPtr[v1]; n1 < g.rowPtr[v1+1]; ++n1) { g.affected[n1] = g.affected[index] || affected; } for (unsigned int n1 = g.rowPtr[v2]; n1 < g.rowPtr[v2+1]; ++n1) { g.affected[n1] = g.affected[index] || affected; } } } } void truss_gpu(Graph * g, int k) { Graph graph_d; int *done_d; Timer timer; startTime(&timer); hipMalloc((void**)&graph_d.row, g->nnzSize*sizeof(int)); hipMalloc((void**)&graph_d.col, g->nnzSize*sizeof(int)); hipMalloc((void**)&graph_d.affected, g->nnzSize*sizeof(int)); hipMalloc((void**)&graph_d.rowPtr, g->rowPtrSize*sizeof(int)); hipMalloc((void **)&done_d, sizeof(int)); hipDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Allocation"); startTime(&timer); hipMemcpy(graph_d.row, g->row, g->nnzSize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(graph_d.col, g->col, g->nnzSize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(graph_d.affected, g->affected, g->nnzSize*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(graph_d.rowPtr, g->rowPtr, g->rowPtrSize*sizeof(int), hipMemcpyHostToDevice); graph_d.nnzSize = g->nnzSize; graph_d.rowPtrSize = g->rowPtrSize; hipDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Copy"); unsigned int numThreads = 1024; unsigned int numBlocks = (g->nnzSize + numThreads - 1) / numThreads; int *done = (int *)malloc(sizeof(int)); *done = 0; startTime(&timer); int iteration = 0; while (!*done) { *done = 1; hipMemcpy(done_d, done, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( truss_kernel), dim3(numThreads), dim3(numBlocks), 0, 0, graph_d, k, done_d, iteration++); hipMemcpy(done, done_d, sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); } stopTime(&timer); printElapsedTime(timer, "Kernel"); startTime(&timer); hipMemcpy(g->row, graph_d.row, g->nnzSize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(g->col, graph_d.col, g->nnzSize*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(g->rowPtr, graph_d.rowPtr, g->rowPtrSize*sizeof(int), hipMemcpyDeviceToHost); hipDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Copy back"); startTime(&timer); hipFree(graph_d.row); hipFree(graph_d.col); hipFree(graph_d.rowPtr); hipFree(&graph_d); hipFree(done_d); hipDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Free"); } void truss_cpu(Graph * g, int k) { int done = 0; while(!done){ done = 1; for (unsigned int i = 0; i < g->nnzSize; ++i) { unsigned int v1 = g->row[i]; unsigned int v2 = g->col[i]; if (g->col[i] == UINT_MAX){ continue; } unsigned int commonNeighbors = 0; int index1 = g->rowPtr[v1]; int index2 = g->rowPtr[v2]; while (index1 < g->rowPtr[v1+1] && index2 < g->rowPtr[v2+1]) { if (g->col[index1] == UINT_MAX) { ++index1; continue; } else if (g->col[index2] == UINT_MAX) { ++index2; continue; } if (g->col[index1] < g->col[index2]) { ++index1; } else if (g->col[index1] > g->col[index2]) { ++index2; } else { ++commonNeighbors; ++index1; ++index2; } if (commonNeighbors == k-2) { break; } } if (commonNeighbors < (k-2)) { g->col[i] = UINT_MAX; done = 0; } } } } void addEdge(Graph * g, int v1, int v2, double weight, int index) { if (g->nnzSize == 0) { g->row = (unsigned int *)malloc(BASE_SIZE * sizeof(int)); g->col = (unsigned int *)malloc(BASE_SIZE * sizeof(int)); g->nnzSize = BASE_SIZE; } if (index > g->nnzSize) { g->nnzSize *= 2; g->row = (unsigned int *)realloc(g->row, g->nnzSize * sizeof(int)); g->col = (unsigned int *)realloc(g->col, g->nnzSize * sizeof(int)); } g->row[index] = v1; g->col[index] = v2; } void done(Graph * g, int size) { g->nnzSize = size; } void createCSRFromCOO(Graph * g, int numRows) { // Allocate unsigned int *rowPtrs = (unsigned int *)calloc(numRows + 1, sizeof(unsigned int)); unsigned int *colIdxs = (unsigned int *)malloc(g->nnzSize * sizeof(unsigned int)); unsigned int *rowIdxs = (unsigned int *)malloc(g->nnzSize * sizeof(unsigned int)); //sort by col for (unsigned int i = 0 ; i < g->nnzSize; ++i) { for (unsigned int j = 0 ; j < g->nnzSize - i - 1; ++j) { if (g->col[j] > g->col[j+1]) { int rowTemp = g->row[j]; int colTemp = g->col[j]; g->row[j] = g->row[j+1]; g->col[j] = g->col[j+1]; g->row[j+1] = rowTemp; g->col[j+1] = colTemp; } } } // Histogram for (unsigned int i = 0; i <g->nnzSize; ++i) { unsigned int row = g->row[i]; rowPtrs[row]++; } // Prefix sum row unsigned int sumRow = 0; for (unsigned int row = 0; row < numRows; ++row) { unsigned int val = rowPtrs[row]; rowPtrs[row] = sumRow; sumRow += val; } rowPtrs[numRows] = sumRow; // Binning for (unsigned int index = 0; index < g->nnzSize; ++index) { unsigned int row = g->row[index]; unsigned int i = rowPtrs[row]++; colIdxs[i] = g->col[index]; rowIdxs[i] = g->row[index]; } // Restore row pointers for (unsigned int row = numRows - 1; row > 0; --row) { rowPtrs[row] = rowPtrs[row - 1]; } rowPtrs[0] = 0; g->rowPtrSize = numRows; g->rowPtr = rowPtrs; g->col = colIdxs; g->row = rowIdxs; } unsigned int *DFSUtil(Graph * g, int v, int visited[], int *size) { unsigned int *visiting = (unsigned int *)malloc(BASE_SIZE * sizeof(int)); visited[v] = 1; unsigned int i = 0; visiting[i++] = v; for (int j = g->rowPtr[v]; j < g->rowPtr[v + 1]; ++j) { if (g->col[j] != UINT_MAX && !visited[g->col[j]] ) { int size2 = BASE_SIZE; unsigned int *temp = DFSUtil(g, g->col[j], visited, &size2); int index = i; for (i; i < index + size2; ++i) { if (i >= *size) { *size *= 2; visiting = (unsigned int *)realloc(visiting, (*size) * sizeof(int *)); } visiting[i] = temp[i - index]; } } } *size = i; return visiting; } unsigned int **connectedComponents(Graph * g, int *returnSize, int *componentSizes) { unsigned int **dfs = (unsigned int **)malloc(BASE_SIZE * sizeof(int *)); int index = 0; int size = BASE_SIZE; int *visited = (int *)malloc((g->rowPtrSize) * sizeof(int)); for (int v = 0; v < g->rowPtrSize; v++){ visited[v] = 0; } for (int v = 0; v < g->rowPtrSize; v++) { if (visited[v] == 0) { if (index >= size) { size *= 2; dfs = (unsigned int **)realloc(dfs, size * sizeof(int *)); componentSizes = (int *)realloc(dfs, size * sizeof(int)); } int size = BASE_SIZE; dfs[index] = (DFSUtil(g, v, visited, &size)); componentSizes[index++] = size; } } *returnSize = index; return dfs; } void printTrussComponents(Graph * g, int k) { int size; int *componentSizes = (int *)malloc(BASE_SIZE * sizeof(int)); unsigned int **cc = connectedComponents(g, &size, componentSizes); for (int i = 0; i < size; ++i) { if (componentSizes[i] > 1) { printf("["); for (int j = 0; j < componentSizes[i]; ++j) { char end[3] = ", "; if (j == (componentSizes[i] - 1)) { strcpy(end, "]\n"); } printf("%d%s", cc[i][j], end); } } } }
d3a87ddb3a6ebef1edfc0be18097319bcbfd7bef.cu
#include "graph.h" #include "../timer.h" #include <string> __global__ void truss_kernel(Graph g, int k, int *done, int iteration) { unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < g.nnzSize) { unsigned int v1 = g.row[index]; unsigned int v2 = g.col[index]; if (v2 != UINT_MAX/* && (g.affected[index] || iteration == 0)*/) { unsigned int commonNeighbors = 0; int index1 = g.rowPtr[v1]; int index2 = g.rowPtr[v2]; while (index1 < g.rowPtr[v1+1] && index2 < g.rowPtr[v2+1]) { if (g.col[index1] == UINT_MAX) { ++index1; continue; } else if (g.col[index2] == UINT_MAX) { ++index2; continue; } if (g.col[index1] < g.col[index2]) { ++index1; } else if (g.col[index1] > g.col[index2]) { ++index2; } else { ++commonNeighbors; ++index1; ++index2; } if (commonNeighbors == k-2) { break; } } int affected = 0; if (commonNeighbors < k-2) { g.col[index] = UINT_MAX; *done = 0; affected = 1; } g.affected[index] = g.affected[index] || affected; for (unsigned int n1 = g.rowPtr[v1]; n1 < g.rowPtr[v1+1]; ++n1) { g.affected[n1] = g.affected[index] || affected; } for (unsigned int n1 = g.rowPtr[v2]; n1 < g.rowPtr[v2+1]; ++n1) { g.affected[n1] = g.affected[index] || affected; } } } } void truss_gpu(Graph * g, int k) { Graph graph_d; int *done_d; Timer timer; startTime(&timer); cudaMalloc((void**)&graph_d.row, g->nnzSize*sizeof(int)); cudaMalloc((void**)&graph_d.col, g->nnzSize*sizeof(int)); cudaMalloc((void**)&graph_d.affected, g->nnzSize*sizeof(int)); cudaMalloc((void**)&graph_d.rowPtr, g->rowPtrSize*sizeof(int)); cudaMalloc((void **)&done_d, sizeof(int)); cudaDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Allocation"); startTime(&timer); cudaMemcpy(graph_d.row, g->row, g->nnzSize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(graph_d.col, g->col, g->nnzSize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(graph_d.affected, g->affected, g->nnzSize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(graph_d.rowPtr, g->rowPtr, g->rowPtrSize*sizeof(int), cudaMemcpyHostToDevice); graph_d.nnzSize = g->nnzSize; graph_d.rowPtrSize = g->rowPtrSize; cudaDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Copy"); unsigned int numThreads = 1024; unsigned int numBlocks = (g->nnzSize + numThreads - 1) / numThreads; int *done = (int *)malloc(sizeof(int)); *done = 0; startTime(&timer); int iteration = 0; while (!*done) { *done = 1; cudaMemcpy(done_d, done, sizeof(int), cudaMemcpyHostToDevice); truss_kernel<<<numThreads, numBlocks>>>(graph_d, k, done_d, iteration++); cudaMemcpy(done, done_d, sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); } stopTime(&timer); printElapsedTime(timer, "Kernel"); startTime(&timer); cudaMemcpy(g->row, graph_d.row, g->nnzSize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(g->col, graph_d.col, g->nnzSize*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(g->rowPtr, graph_d.rowPtr, g->rowPtrSize*sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Copy back"); startTime(&timer); cudaFree(graph_d.row); cudaFree(graph_d.col); cudaFree(graph_d.rowPtr); cudaFree(&graph_d); cudaFree(done_d); cudaDeviceSynchronize(); stopTime(&timer); printElapsedTime(timer, "Free"); } void truss_cpu(Graph * g, int k) { int done = 0; while(!done){ done = 1; for (unsigned int i = 0; i < g->nnzSize; ++i) { unsigned int v1 = g->row[i]; unsigned int v2 = g->col[i]; if (g->col[i] == UINT_MAX){ continue; } unsigned int commonNeighbors = 0; int index1 = g->rowPtr[v1]; int index2 = g->rowPtr[v2]; while (index1 < g->rowPtr[v1+1] && index2 < g->rowPtr[v2+1]) { if (g->col[index1] == UINT_MAX) { ++index1; continue; } else if (g->col[index2] == UINT_MAX) { ++index2; continue; } if (g->col[index1] < g->col[index2]) { ++index1; } else if (g->col[index1] > g->col[index2]) { ++index2; } else { ++commonNeighbors; ++index1; ++index2; } if (commonNeighbors == k-2) { break; } } if (commonNeighbors < (k-2)) { g->col[i] = UINT_MAX; done = 0; } } } } void addEdge(Graph * g, int v1, int v2, double weight, int index) { if (g->nnzSize == 0) { g->row = (unsigned int *)malloc(BASE_SIZE * sizeof(int)); g->col = (unsigned int *)malloc(BASE_SIZE * sizeof(int)); g->nnzSize = BASE_SIZE; } if (index > g->nnzSize) { g->nnzSize *= 2; g->row = (unsigned int *)realloc(g->row, g->nnzSize * sizeof(int)); g->col = (unsigned int *)realloc(g->col, g->nnzSize * sizeof(int)); } g->row[index] = v1; g->col[index] = v2; } void done(Graph * g, int size) { g->nnzSize = size; } void createCSRFromCOO(Graph * g, int numRows) { // Allocate unsigned int *rowPtrs = (unsigned int *)calloc(numRows + 1, sizeof(unsigned int)); unsigned int *colIdxs = (unsigned int *)malloc(g->nnzSize * sizeof(unsigned int)); unsigned int *rowIdxs = (unsigned int *)malloc(g->nnzSize * sizeof(unsigned int)); //sort by col for (unsigned int i = 0 ; i < g->nnzSize; ++i) { for (unsigned int j = 0 ; j < g->nnzSize - i - 1; ++j) { if (g->col[j] > g->col[j+1]) { int rowTemp = g->row[j]; int colTemp = g->col[j]; g->row[j] = g->row[j+1]; g->col[j] = g->col[j+1]; g->row[j+1] = rowTemp; g->col[j+1] = colTemp; } } } // Histogram for (unsigned int i = 0; i <g->nnzSize; ++i) { unsigned int row = g->row[i]; rowPtrs[row]++; } // Prefix sum row unsigned int sumRow = 0; for (unsigned int row = 0; row < numRows; ++row) { unsigned int val = rowPtrs[row]; rowPtrs[row] = sumRow; sumRow += val; } rowPtrs[numRows] = sumRow; // Binning for (unsigned int index = 0; index < g->nnzSize; ++index) { unsigned int row = g->row[index]; unsigned int i = rowPtrs[row]++; colIdxs[i] = g->col[index]; rowIdxs[i] = g->row[index]; } // Restore row pointers for (unsigned int row = numRows - 1; row > 0; --row) { rowPtrs[row] = rowPtrs[row - 1]; } rowPtrs[0] = 0; g->rowPtrSize = numRows; g->rowPtr = rowPtrs; g->col = colIdxs; g->row = rowIdxs; } unsigned int *DFSUtil(Graph * g, int v, int visited[], int *size) { unsigned int *visiting = (unsigned int *)malloc(BASE_SIZE * sizeof(int)); visited[v] = 1; unsigned int i = 0; visiting[i++] = v; for (int j = g->rowPtr[v]; j < g->rowPtr[v + 1]; ++j) { if (g->col[j] != UINT_MAX && !visited[g->col[j]] ) { int size2 = BASE_SIZE; unsigned int *temp = DFSUtil(g, g->col[j], visited, &size2); int index = i; for (i; i < index + size2; ++i) { if (i >= *size) { *size *= 2; visiting = (unsigned int *)realloc(visiting, (*size) * sizeof(int *)); } visiting[i] = temp[i - index]; } } } *size = i; return visiting; } unsigned int **connectedComponents(Graph * g, int *returnSize, int *componentSizes) { unsigned int **dfs = (unsigned int **)malloc(BASE_SIZE * sizeof(int *)); int index = 0; int size = BASE_SIZE; int *visited = (int *)malloc((g->rowPtrSize) * sizeof(int)); for (int v = 0; v < g->rowPtrSize; v++){ visited[v] = 0; } for (int v = 0; v < g->rowPtrSize; v++) { if (visited[v] == 0) { if (index >= size) { size *= 2; dfs = (unsigned int **)realloc(dfs, size * sizeof(int *)); componentSizes = (int *)realloc(dfs, size * sizeof(int)); } int size = BASE_SIZE; dfs[index] = (DFSUtil(g, v, visited, &size)); componentSizes[index++] = size; } } *returnSize = index; return dfs; } void printTrussComponents(Graph * g, int k) { int size; int *componentSizes = (int *)malloc(BASE_SIZE * sizeof(int)); unsigned int **cc = connectedComponents(g, &size, componentSizes); for (int i = 0; i < size; ++i) { if (componentSizes[i] > 1) { printf("["); for (int j = 0; j < componentSizes[i]; ++j) { char end[3] = ", "; if (j == (componentSizes[i] - 1)) { strcpy(end, "]\n"); } printf("%d%s", cc[i][j], end); } } } }
a4d1670cbbf7f5782cfa9d4d4f24294d2540d74d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "SoftMax.h" #include "../common/cuBase.h" #include "../common/cuMatrix.h" #include "../common/Config.h" #include "../layers/BranchLayer.h" #include <math.h> /* * blocks : cuSoftMaxP->rows * threads: cuSoftMaxP->cols * shared : sizeof(float) * cuSoftMaxP->cols * 2 */ __global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols) { int bid = blockIdx.x; extern __shared__ float _share[]; float * _max = _share; float * _sum = _share + blockDim.x; float* sp = softMaxP + bid * cols; _sum[threadIdx.x] = 0.0; _max[threadIdx.x] = -100000000.0; for(int tid = 0; tid < cols; tid += blockDim.x){ int id = tid + threadIdx.x; if(id < cols){ sp[id] += b[id]; _max[threadIdx.x] = max(_max[threadIdx.x], sp[id]); } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x] < _max[threadIdx.x + skip]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x){ int id = tid + threadIdx.x; if(id < cols){ sp[id] -= _max[0]; sp[id] = exp(sp[id]); _sum[threadIdx.x] += sp[id]; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x){ int id = tid + threadIdx.x; if(id < cols){ sp[id] /= _sum[0]; } } } __global__ void g_getSoftMaxDelta(float* softMaxDelta, float* softMaxP, float* groudTruth, int len) { for(int i = 0; i < len; i += blockDim.x) { int id = i + threadIdx.x; if(id < len) { softMaxDelta[id] = softMaxP[id] - groudTruth[id]; } } } /* */ __global__ void g_getSmrWgrad(float* wgrad, float* weight, float lambda, int len, int batch) { for(int i = 0; i < len; i += blockDim.x) { int id = i + threadIdx.x; if(id < len) { wgrad[id] = lambda * weight[id] + wgrad[id] / batch; } } } void SoftMax::feedforward() { dim3 block = inputs->rows; dim3 thread = min(512, inputs->cols); //convert hipLaunchKernelGGL(( g_convert), dim3(block), dim3(thread), 0, 0, inputs->getDev(), inputs_format->getDev(), inputs->rows, inputs->cols, inputs->channels); checkCudaErrors(hipDeviceSynchronize()); getLastCudaError("g_convert"); matrixMulTB(inputs_format, w, outputs); int threads = ::min(512, outputs->cols); hipLaunchKernelGGL(( g_getSoftMaxP), dim3(outputs->rows), dim3(threads), sizeof(float) * threads * 2, 0, outputs->getDev(), b->getDev(), outputs->cols); hipDeviceSynchronize(); getLastCudaError("g_getSoftMaxP"); } void SoftMax::backpropagation() { hipLaunchKernelGGL(( g_getCost_1), dim3(dim3(1)), dim3(dim3(256)), sizeof(float) * 256, 0, outputs->getDev(), groudTruth->getDev(), cost->getDev(), predict, outputs->rows, outputs->cols, batch); hipDeviceSynchronize(); getLastCudaError("g_getCost_1"); hipLaunchKernelGGL(( g_getSoftMaxDelta), dim3(dim3(1)), dim3(dim3(256)), 0, 0, curDelta->getDev(), outputs->getDev(), groudTruth->getDev(), curDelta->getLen()); hipDeviceSynchronize(); matrixMul(curDelta, w, preDelta_format); dim3 block = batch; dim3 thread= min(512, preDelta->channels * preDelta->cols); hipLaunchKernelGGL(( g_preDeltaFormat), dim3(block), dim3(thread), 0, 0, preDelta_format->getDev(), preDelta->getDev(), preDelta->rows, preDelta->cols, preDelta->channels); hipDeviceSynchronize(); getLastCudaError("g_preDeltaFormat"); } void SoftMax::getGrad() { matrixMulTA(curDelta, inputs_format, wgrad); hipLaunchKernelGGL(( g_getSmrWgrad), dim3(dim3(1)), dim3(dim3(256)), 0, 0, wgrad->getDev(), w->getDev(), lambda, wgrad->getLen(), batch); hipDeviceSynchronize(); if(curDelta->rows > MAX_THREADS) { printf("getSoftMaxDelta g_getBgrad > MAX_THREADS\n"); exit(0); } hipLaunchKernelGGL(( g_getBgrad), dim3(dim3(curDelta->cols)), dim3(dim3(curDelta->rows)), sizeof(float) * curDelta->rows, 0, curDelta->getDev(), bgrad->getDev(), batch); hipDeviceSynchronize(); getLastCudaError("g_getBgrad"); } void SoftMax::updateWeight() { hipLaunchKernelGGL(( g_vecAdd), dim3(dim3(min((momentum_w->getLen() + 255) / 256, 5120))), dim3( dim3(256)), 0, 0, momentum_w->getDev(), wgrad->getDev(), w->getDev(), momentum_b->getDev(), bgrad->getDev(), b->getDev(), wgrad->getLen(), bgrad->getLen(), Config::instance()->getMomentum(), Config::instance()->getLrate(), Config::instance()->getLrate()); } void SoftMax::clearMomentum() { momentum_b->gpuClear(); momentum_w->gpuClear(); } void SoftMax::calCost() { hipLaunchKernelGGL(( g_getCost_2), dim3(dim3(1)), dim3(dim3(256)), sizeof(float) * 256, 0, cost->getDev(), w->getDev(), lambda, w->getLen()); hipDeviceSynchronize(); getLastCudaError("g_getCost_2"); } cuMatrix<float>* SoftMax::getOutputs() { return outputs; } cuMatrix<float>* SoftMax::getCurDelta() { return curDelta; } void SoftMax::setPreDelta(cuMatrix<float>* _preDelta) { preDelta = _preDelta; preDelta_format = new cuMatrix<float>(preDelta->rows, preDelta->cols * preDelta->channels, 1); } void SoftMax::initRandom() { //srand(clock()); float initW = Config::instance()->getLayerByName(m_name)->m_initW; if(Config::instance()->getLayerByName(m_name)->isGaussian()){ float epsilon = initW; for(int c = 0; c < w->channels; c++){ float r1 = 0.01f + 5.0f * (rand()) / RAND_MAX; float r2 = 0.01f + 5.0f * (rand()) / RAND_MAX; createGaussian(w->getHost() + c * w->getArea(), r1,r2, w->rows, w->cols, w->channels, epsilon); } } else{ for(int j = 0; j < w->getLen(); j++){ w->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f); } } w->toGpu(); } void SoftMax::initFromCheckpoint(FILE* file) { float val = 0.0; for(int i = 0; i < w->rows; i++){ for(int j=0; j< w->cols; j++){ fscanf(file, "%f", &val); w->set(i,j,0,val); } } for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ fscanf(file, "%f ", &val); b->set(i,j,0, val); } } w->toGpu(); b->toGpu(); } void SoftMax::save(FILE* file) { w->toCpu(); b->toCpu(); for(int c = 0; c < w->channels; c++){ for(int i = 0; i< w->rows; i++){ for(int j=0; j< w->cols; j++){ fprintf(file, "%f ", w->get(i,j,c)); } } } for(int c = 0; c < b->channels; c++){ for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ fprintf(file, "%f ", b->get(i,j,c)); } } } } SoftMax::SoftMax(std::string name) { m_name = name; ConfigFC* config = (ConfigFC*)Config::instance()->getLayerByName(m_name); LayerBase * preLayer = (LayerBase*)Layers::instance()->get(config->m_input); inputs = preLayer->getOutputs(); if(inputs == NULL){ /*inputs = NULL the type must be BranchLayers*/ Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer()); Assert(config->m_subInput != std::string("NULL")); BranchLayer* bl = static_cast<BranchLayer*>(preLayer); inputs = bl->getSubOutput(config->m_subInput); preDelta = bl->getSubCurDelta(config->m_subInput); }else{ preDelta = preLayer->getCurDelta(); } batch = Config::instance()->getBatchSize(); lambda = config->m_weightDecay; inputsize = inputs->cols * inputs->channels; outputsize = config->m_numFullConnectNeurons; NON_LINEARITY = config->m_nonLinearity; inputs_format = new cuMatrix<float>(inputs->rows, inputs->cols * inputs->channels, 1); outputs = new cuMatrix<float>(batch, outputsize, 1); curDelta= new cuMatrix<float>(batch, outputsize, 1); this->setPreDelta(preDelta); w = new cuMatrix<float>(outputsize, inputsize, 1); wgrad = new cuMatrix<float>(outputsize, inputsize, 1); b = new cuMatrix<float>(outputsize, 1, 1); bgrad = new cuMatrix<float>(outputsize, 1, 1); momentum_w = new cuMatrix<float>(outputsize, inputsize, 1); momentum_b = new cuMatrix<float>(outputsize, 1, 1); groudTruth = new cuMatrix<float>(batch, outputsize, 1); this->initRandom(); Layers::instance()->set(m_name, this); }
a4d1670cbbf7f5782cfa9d4d4f24294d2540d74d.cu
#include "SoftMax.h" #include "../common/cuBase.h" #include "../common/cuMatrix.h" #include "../common/Config.h" #include "../layers/BranchLayer.h" #include <math.h> /* * blocks : cuSoftMaxP->rows * threads: cuSoftMaxP->cols * shared : sizeof(float) * cuSoftMaxP->cols * 2 */ __global__ void g_getSoftMaxP(float* softMaxP, float* b, int cols) { int bid = blockIdx.x; extern __shared__ float _share[]; float * _max = _share; float * _sum = _share + blockDim.x; float* sp = softMaxP + bid * cols; _sum[threadIdx.x] = 0.0; _max[threadIdx.x] = -100000000.0; for(int tid = 0; tid < cols; tid += blockDim.x){ int id = tid + threadIdx.x; if(id < cols){ sp[id] += b[id]; _max[threadIdx.x] = max(_max[threadIdx.x], sp[id]); } } __syncthreads(); int len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { if(_max[threadIdx.x] < _max[threadIdx.x + skip]) { _max[threadIdx.x] = _max[threadIdx.x + skip]; } } len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x){ int id = tid + threadIdx.x; if(id < cols){ sp[id] -= _max[0]; sp[id] = exp(sp[id]); _sum[threadIdx.x] += sp[id]; } } __syncthreads(); len = blockDim.x; while(len != 1) { __syncthreads(); int skip = (len + 1) >> 1; if(threadIdx.x < (len >> 1)) { _sum[threadIdx.x] += _sum[threadIdx.x + skip]; } len = (len + 1) >> 1; } __syncthreads(); for(int tid = 0; tid < cols; tid += blockDim.x){ int id = tid + threadIdx.x; if(id < cols){ sp[id] /= _sum[0]; } } } __global__ void g_getSoftMaxDelta(float* softMaxDelta, float* softMaxP, float* groudTruth, int len) { for(int i = 0; i < len; i += blockDim.x) { int id = i + threadIdx.x; if(id < len) { softMaxDelta[id] = softMaxP[id] - groudTruth[id]; } } } /* */ __global__ void g_getSmrWgrad(float* wgrad, float* weight, float lambda, int len, int batch) { for(int i = 0; i < len; i += blockDim.x) { int id = i + threadIdx.x; if(id < len) { wgrad[id] = lambda * weight[id] + wgrad[id] / batch; } } } void SoftMax::feedforward() { dim3 block = inputs->rows; dim3 thread = min(512, inputs->cols); //convert g_convert<<<block, thread>>>( inputs->getDev(), inputs_format->getDev(), inputs->rows, inputs->cols, inputs->channels); checkCudaErrors(cudaDeviceSynchronize()); getLastCudaError("g_convert"); matrixMulTB(inputs_format, w, outputs); int threads = std::min(512, outputs->cols); g_getSoftMaxP<<<outputs->rows, threads, sizeof(float) * threads * 2>>>( outputs->getDev(), b->getDev(), outputs->cols); cudaDeviceSynchronize(); getLastCudaError("g_getSoftMaxP"); } void SoftMax::backpropagation() { g_getCost_1<<<dim3(1), dim3(256), sizeof(float) * 256>>>(outputs->getDev(), groudTruth->getDev(), cost->getDev(), predict, outputs->rows, outputs->cols, batch); cudaDeviceSynchronize(); getLastCudaError("g_getCost_1"); g_getSoftMaxDelta<<<dim3(1), dim3(256)>>>(curDelta->getDev(), outputs->getDev(), groudTruth->getDev(), curDelta->getLen()); cudaDeviceSynchronize(); matrixMul(curDelta, w, preDelta_format); dim3 block = batch; dim3 thread= min(512, preDelta->channels * preDelta->cols); g_preDeltaFormat<<<block, thread>>>( preDelta_format->getDev(), preDelta->getDev(), preDelta->rows, preDelta->cols, preDelta->channels); cudaDeviceSynchronize(); getLastCudaError("g_preDeltaFormat"); } void SoftMax::getGrad() { matrixMulTA(curDelta, inputs_format, wgrad); g_getSmrWgrad<<<dim3(1), dim3(256)>>>(wgrad->getDev(), w->getDev(), lambda, wgrad->getLen(), batch); cudaDeviceSynchronize(); if(curDelta->rows > MAX_THREADS) { printf("getSoftMaxDelta g_getBgrad > MAX_THREADS\n"); exit(0); } g_getBgrad<<<dim3(curDelta->cols), dim3(curDelta->rows), sizeof(float) * curDelta->rows>>>( curDelta->getDev(), bgrad->getDev(), batch); cudaDeviceSynchronize(); getLastCudaError("g_getBgrad"); } void SoftMax::updateWeight() { g_vecAdd<<<dim3(min((momentum_w->getLen() + 255) / 256, 5120)), dim3(256)>>>( momentum_w->getDev(), wgrad->getDev(), w->getDev(), momentum_b->getDev(), bgrad->getDev(), b->getDev(), wgrad->getLen(), bgrad->getLen(), Config::instance()->getMomentum(), Config::instance()->getLrate(), Config::instance()->getLrate()); } void SoftMax::clearMomentum() { momentum_b->gpuClear(); momentum_w->gpuClear(); } void SoftMax::calCost() { g_getCost_2<<<dim3(1), dim3(256), sizeof(float) * 256>>>(cost->getDev(), w->getDev(), lambda, w->getLen()); cudaDeviceSynchronize(); getLastCudaError("g_getCost_2"); } cuMatrix<float>* SoftMax::getOutputs() { return outputs; } cuMatrix<float>* SoftMax::getCurDelta() { return curDelta; } void SoftMax::setPreDelta(cuMatrix<float>* _preDelta) { preDelta = _preDelta; preDelta_format = new cuMatrix<float>(preDelta->rows, preDelta->cols * preDelta->channels, 1); } void SoftMax::initRandom() { //srand(clock()); float initW = Config::instance()->getLayerByName(m_name)->m_initW; if(Config::instance()->getLayerByName(m_name)->isGaussian()){ float epsilon = initW; for(int c = 0; c < w->channels; c++){ float r1 = 0.01f + 5.0f * (rand()) / RAND_MAX; float r2 = 0.01f + 5.0f * (rand()) / RAND_MAX; createGaussian(w->getHost() + c * w->getArea(), r1,r2, w->rows, w->cols, w->channels, epsilon); } } else{ for(int j = 0; j < w->getLen(); j++){ w->getHost()[j] = initW * (2.0f * rand() / RAND_MAX - 1.0f); } } w->toGpu(); } void SoftMax::initFromCheckpoint(FILE* file) { float val = 0.0; for(int i = 0; i < w->rows; i++){ for(int j=0; j< w->cols; j++){ fscanf(file, "%f", &val); w->set(i,j,0,val); } } for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ fscanf(file, "%f ", &val); b->set(i,j,0, val); } } w->toGpu(); b->toGpu(); } void SoftMax::save(FILE* file) { w->toCpu(); b->toCpu(); for(int c = 0; c < w->channels; c++){ for(int i = 0; i< w->rows; i++){ for(int j=0; j< w->cols; j++){ fprintf(file, "%f ", w->get(i,j,c)); } } } for(int c = 0; c < b->channels; c++){ for(int i = 0; i < b->rows; i++){ for(int j = 0; j < b->cols; j++){ fprintf(file, "%f ", b->get(i,j,c)); } } } } SoftMax::SoftMax(std::string name) { m_name = name; ConfigFC* config = (ConfigFC*)Config::instance()->getLayerByName(m_name); LayerBase * preLayer = (LayerBase*)Layers::instance()->get(config->m_input); inputs = preLayer->getOutputs(); if(inputs == NULL){ /*inputs = NULL the type must be BranchLayers*/ Assert(Config::instance()->getLayerByName(config->m_input)->isBranchLayer()); Assert(config->m_subInput != std::string("NULL")); BranchLayer* bl = static_cast<BranchLayer*>(preLayer); inputs = bl->getSubOutput(config->m_subInput); preDelta = bl->getSubCurDelta(config->m_subInput); }else{ preDelta = preLayer->getCurDelta(); } batch = Config::instance()->getBatchSize(); lambda = config->m_weightDecay; inputsize = inputs->cols * inputs->channels; outputsize = config->m_numFullConnectNeurons; NON_LINEARITY = config->m_nonLinearity; inputs_format = new cuMatrix<float>(inputs->rows, inputs->cols * inputs->channels, 1); outputs = new cuMatrix<float>(batch, outputsize, 1); curDelta= new cuMatrix<float>(batch, outputsize, 1); this->setPreDelta(preDelta); w = new cuMatrix<float>(outputsize, inputsize, 1); wgrad = new cuMatrix<float>(outputsize, inputsize, 1); b = new cuMatrix<float>(outputsize, 1, 1); bgrad = new cuMatrix<float>(outputsize, 1, 1); momentum_w = new cuMatrix<float>(outputsize, inputsize, 1); momentum_b = new cuMatrix<float>(outputsize, 1, 1); groudTruth = new cuMatrix<float>(batch, outputsize, 1); this->initRandom(); Layers::instance()->set(m_name, this); }
f56500825ceb0212ab39505cc8ebbaf248195cfc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/tensorrt/plugin/group_norm_op_plugin.h" #include "paddle/phi/kernels/group_norm_kernel.h" #include <hipcub/hipcub.hpp> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { using DataLayout = phi::DataLayout; static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; } static inline __device__ __host__ float sigmoid(float x) { return 1.F / (1.F + expf(-x)); } struct GroupSums { // Is it the 1st element of the group? int32_t flag; // The sum. float sum; // The sum of squares. float sumSq; }; struct GroupSumsOp { inline __device__ GroupSums operator()(GroupSums const &a, GroupSums const &b) { GroupSums dst; dst.sum = b.flag ? b.sum : (a.sum + b.sum); dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq); dst.flag = a.flag + b.flag; return dst; } }; static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) { int32_t maxDivisor = -1; for (int32_t i = 1; i <= std::sqrt(n); i++) { if (n % i == 0) { int32_t divisor1 = n / i; int32_t divisor2 = i; if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) { maxDivisor = divisor1; } if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) { maxDivisor = divisor2; } } } return maxDivisor; } template <int tTHREADS_PER_BLOCK> __global__ void groupNormNHWCSumKernel(const GroupNormNHWCParams params) { // The object in charge of doing the sums for the different blocks. typedef hipcub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan; // Allocate shared memory for BlockScan. __shared__ typename BlockScan::TempStorage tempStorage; // Allocate shared memory for the groups. We could reduce the amount of shared // memory reserved. __shared__ float2 smem[tTHREADS_PER_BLOCK]; // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // The sums. float sum = 0.F; float sumSq = 0.F; // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The offset. int64_t offset = static_cast<int64_t>(ni) * params.hwc + static_cast<int64_t>(hwi) * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { h2 = *reinterpret_cast<__half2 const *>(&params.srcX[offset]); } // Extract the two half values. float2 f2 = __half22float2(h2); // Update the sum. sum += f2.x + f2.y; // Update the sum of squares. sumSq += f2.x * f2.x + f2.y * f2.y; } // The group that thread works on and the channel in the group (modulus). int32_t gi = threadIdx.x * 2 / params.cPerGroup; int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi; // The data for the summations. GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq}; // Do the segmented scan. GroupSums out; BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp()); // Store the results for the groups in shared memory (to produce coalesced // stores later). // 2 channels per thread if (cj == params.cPerGroup - 2) { smem[gi] = make_float2(out.sum, out.sumSq); } // Make sure the data is in shared memory. __syncthreads(); // The global group index. int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x; // Threads that have nothing left to do, exit. if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) { return; } // The first threads (those storing to global memory, load the values). float2 sums = smem[threadIdx.x]; // Store to global memory. atomicAdd(&params.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x); atomicAdd(&params.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y); } void groupNormNHWCSum(const GroupNormNHWCParams &params, hipStream_t stream) { dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: hipLaunchKernelGGL(( groupNormNHWCSumKernel<160>), dim3(grid), dim3(160), 0, stream, params); break; case 480: hipLaunchKernelGGL(( groupNormNHWCSumKernel<256>), dim3(grid), dim3(256), 0, stream, params); break; case 256: hipLaunchKernelGGL(( groupNormNHWCSumKernel<128>), dim3(grid), dim3(128), 0, stream, params); break; case 128: hipLaunchKernelGGL(( groupNormNHWCSumKernel<64>), dim3(grid), dim3(64), 0, stream, params); break; case 8: hipLaunchKernelGGL(( groupNormNHWCSumKernel<4>), dim3(grid), dim3(4), 0, stream, params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCSum of GroupNormPlugin TRT Plugin " "encounter error")); } } template <int tTHREADS_PER_BLOCK> __global__ void groupNormNHWCScaleKernel(const GroupNormNHWCParams params) { // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The group that thread works on and the channel in the group (modulus). int32_t gi = ci / params.cPerGroup; // Load the sum and sum of squares for the group. float sum = 0.F, sumSq = 0.F; if (gi < params.groups) { sum = params.redBuffer[(2 * ni + 0) * params.groups + gi]; sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi]; } // Load gamma/beta. float2 gammaF2, betaF2; if (ci < params.c) { gammaF2 = __half22float2(*reinterpret_cast<half2 const *>( reinterpret_cast<half const *>(params.gamma) + ci)); betaF2 = __half22float2(*reinterpret_cast<half2 const *>( reinterpret_cast<half const *>(params.beta) + ci)); } // Compute the mean. float mean = sum * params.invHWC; // Compute the variance. float var = sumSq * params.invHWC - (mean * mean); // Compute the inverse of the stddev. float invStdDev = rsqrtf(var + params.eps); // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The src/dst offset. int64_t offset = (int64_t)ni * params.hwc + hwi * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { h2 = *reinterpret_cast<__half2 const *>(&params.srcX[offset]); } // Extract the two half values. float2 f2 = __half22float2(h2); // Normalize the channels. f2.x = (f2.x - mean) * invStdDev; f2.y = (f2.y - mean) * invStdDev; // Scale by gamma and add beta. f2.x = gammaF2.x * f2.x + betaF2.x; f2.y = gammaF2.y * f2.y + betaF2.y; // Apply Swish if needed. if (params.withSwish) { f2.x = f2.x * sigmoid(f2.x); f2.y = f2.y * sigmoid(f2.y); } // Store the scaled values. if (ci < params.c) { *reinterpret_cast<__half2 *>(&params.dst[offset]) = __float22half2_rn(f2); } } } void groupNormNHWCScale(const GroupNormNHWCParams &params, hipStream_t stream) { dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: hipLaunchKernelGGL(( groupNormNHWCScaleKernel<160>), dim3(grid), dim3(160), 0, stream, params); break; case 480: hipLaunchKernelGGL(( groupNormNHWCScaleKernel<256>), dim3(grid), dim3(256), 0, stream, params); break; case 256: hipLaunchKernelGGL(( groupNormNHWCScaleKernel<128>), dim3(grid), dim3(128), 0, stream, params); break; case 128: hipLaunchKernelGGL(( groupNormNHWCScaleKernel<64>), dim3(grid), dim3(64), 0, stream, params); break; case 8: hipLaunchKernelGGL(( groupNormNHWCScaleKernel<4>), dim3(grid), dim3(4), 0, stream, params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCScale of GroupNormPlugin TRT Plugin " "encounter error")); } } int GroupNormPlugin::initialize() TRT_NOEXCEPT { if (!with_fp16_) { // if use fp32 hipMalloc(&scale_gpu_, sizeof(float) * scale_.size()); hipMalloc(&bias_gpu_, sizeof(float) * bias_.size()); hipMemcpy(scale_gpu_, scale_.data(), scale_.size() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(bias_gpu_, bias_.data(), bias_.size() * sizeof(float), hipMemcpyHostToDevice); } else { // if use fp16 std::vector<half> scale_half(scale_.size()); std::vector<half> bias_half(bias_.size()); for (int i = 0; i < scale_.size(); ++i) { scale_half[i] = static_cast<half>(scale_[i]); } for (int i = 0; i < bias_.size(); ++i) { bias_half[i] = static_cast<half>(bias_[i]); } hipMalloc(&scale_gpu_, sizeof(half) * scale_half.size()); hipMalloc(&bias_gpu_, sizeof(half) * bias_half.size()); hipMemcpy(scale_gpu_, scale_half.data(), scale_half.size() * sizeof(half), hipMemcpyHostToDevice); hipMemcpy(bias_gpu_, bias_half.data(), bias_half.size() * sizeof(half), hipMemcpyHostToDevice); } return 0; } bool GroupNormPlugin::supportsFormat( nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT { if (with_fp16_) { return ((type == nvinfer1::DataType::kHALF) && (format == nvinfer1::PluginFormat::kLINEAR)); } else { return ((type == nvinfer1::DataType::kFLOAT) && (format == nvinfer1::PluginFormat::kLINEAR)); } } nvinfer1::Dims GroupNormPlugin::getOutputDimensions( int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT { return inputDims[0]; } int GroupNormPlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, #else void *const *outputs, void *workspace, #endif hipStream_t stream) TRT_NOEXCEPT { const auto &input_dims = this->getInputDims(0); int groups = groups_; float eps = eps_; std::vector<int> input_shape; input_shape.push_back(batch_size); for (int i = 0; i < input_dims.nbDims; i++) { input_shape.push_back(input_dims.d[i]); } const auto input_ddim = phi::make_ddim(input_shape); int C = input_shape[1]; PADDLE_ENFORCE_EQ( C, scale_.size(), platform::errors::InvalidArgument( "scale's size should be equal to the channel number in groupnorm," "but got channel number:%d, scale's size:%d.", C, scale_.size())); PADDLE_ENFORCE_EQ( C, bias_.size(), platform::errors::InvalidArgument( "bias's size should be equal to the channel number in groupnorm," "but got channel number:%d, bias's size:%d.", C, bias_.size())); float *mean_d = static_cast<float *>(workspace); float *variance_d = mean_d + input_shape[0] * groups_; float *temp_variance_d = variance_d + input_shape[0] * groups_; auto input_type = getDataType(); if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32"; const float *input = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); phi::GroupNormDirectCUDAFunctor<float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<float *>(bias_gpu_), reinterpret_cast<float *>(scale_gpu_), temp_variance_d, groups_, eps_, output, mean_d, variance_d, DataLayout::kNCHW); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16"; const half *input = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); phi::GroupNormDirectCUDAFunctor<half, float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<const half *>(bias_gpu_), reinterpret_cast<const half *>(scale_gpu_), temp_variance_d, groups_, eps_, output, mean_d, variance_d, DataLayout::kNCHW); } else { PADDLE_THROW(platform::errors::Fatal( "The GroupNorm TRT Plugin's input type should be float or half.")); } return hipGetLastError() != hipSuccess; } nvinfer1::DimsExprs GroupNormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputDims, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputDims[0]; } bool GroupNormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of groupnorm plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return ((in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::PluginFormat::kLINEAR || in.format == nvinfer1::PluginFormat::kHWC8)); } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType GroupNormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The groupnorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int GroupNormPluginDynamic::initialize() TRT_NOEXCEPT { if (with_fp16_ == false) { // if use fp32 hipMalloc(&scale_gpu_, sizeof(float) * scale_.size()); hipMalloc(&bias_gpu_, sizeof(float) * bias_.size()); hipMemcpy(scale_gpu_, scale_.data(), scale_.size() * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(bias_gpu_, bias_.data(), bias_.size() * sizeof(float), hipMemcpyHostToDevice); } else { // if use fp16 std::vector<half> scale_half(scale_.size()); std::vector<half> bias_half(bias_.size()); for (int i = 0; i < scale_.size(); ++i) { scale_half[i] = static_cast<half>(scale_[i]); } for (int i = 0; i < bias_.size(); ++i) { bias_half[i] = static_cast<half>(bias_[i]); } hipMalloc(&scale_gpu_, sizeof(half) * scale_.size()); hipMalloc(&bias_gpu_, sizeof(half) * bias_.size()); hipMemcpy(scale_gpu_, scale_half.data(), scale_half.size() * sizeof(half), hipMemcpyHostToDevice); hipMemcpy(bias_gpu_, bias_half.data(), bias_half.size() * sizeof(half), hipMemcpyHostToDevice); } return 0; } int GroupNormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { const auto &input_dims = input_desc[0].dims; int groups = groups_; float eps = eps_; std::vector<int> input_shape; for (int i = 0; i < input_dims.nbDims; i++) { input_shape.push_back(input_dims.d[i]); } const auto input_ddim = phi::make_ddim(input_shape); int C = input_shape[1]; int image_size = input_shape[2] * input_shape[3]; int batchSize = input_shape[0]; PADDLE_ENFORCE_EQ( C, scale_.size(), platform::errors::InvalidArgument( "scale's size should be equal to the channel number in groupnorm," "but got feature_size:%d, scale's size:%d.", C, scale_.size())); PADDLE_ENFORCE_EQ( C, bias_.size(), platform::errors::InvalidArgument( "bias's size should be equal to the channel number in groupnorm," "but got feature_size:%d, bias's size:%d.", C, bias_.size())); float *mean_d = static_cast<float *>(workspace); float *variance_d = mean_d + input_shape[0] * groups_; float *temp_variance_d = variance_d + input_shape[0] * groups_; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32"; const float *input = reinterpret_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); phi::GroupNormDirectCUDAFunctor<float, float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<float *>(bias_gpu_), reinterpret_cast<float *>(scale_gpu_), temp_variance_d, groups, eps, output, mean_d, variance_d, DataLayout::kNCHW); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16"; const half *input = reinterpret_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); if (input_desc[0].format == nvinfer1::PluginFormat::kLINEAR) { phi::GroupNormDirectCUDAFunctor<half, float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<half *>(bias_gpu_), reinterpret_cast<half *>(scale_gpu_), temp_variance_d, groups, eps, output, mean_d, variance_d, DataLayout::kNCHW); } else if (input_desc[0].format == nvinfer1::PluginFormat::kHWC8) { int32_t cPerBlock = 320; int32_t maxBlocksPerHW = 1024; switch (input_desc[0].dims.d[1]) { case 960: case 1920: cPerBlock = 480; break; case 512: case 256: cPerBlock = 256; break; case 128: cPerBlock = 128; break; default: cPerBlock = 320; } if (cPerBlock > input_desc[0].dims.d[1]) { cPerBlock = 8; } params_.withSwish = false; params_.dst = static_cast<half *>(outputs[0]); params_.srcX = static_cast<half const *>(inputs[0]); params_.gamma = scale_gpu_; params_.beta = bias_gpu_; params_.redBuffer = static_cast<float *>(workspace); params_.n = input_desc[0].dims.d[0]; params_.h = input_desc[0].dims.d[2]; params_.w = input_desc[0].dims.d[3]; params_.c = input_desc[0].dims.d[1]; params_.groups = groups_; params_.hw = params_.h * params_.w; const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW); params_.hwPerBlock = divUp(params_.hw, blocksPerHW); params_.cPerBlock = cPerBlock; params_.cPerGroup = params_.c / params_.groups; params_.hwc = params_.hw * params_.c; params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup); params_.groupsPerBlock = cPerBlock / params_.cPerGroup; params_.eps = eps_; hipMemsetAsync(params_.redBuffer, 0, 2 * sizeof(float) * params_.n * groups_, stream); groupNormNHWCSum(params_, stream); groupNormNHWCScale(params_, stream); } else { PADDLE_THROW(platform::errors::Fatal( "The Groupnorm TRT Plugin's only support nchw or nhwc8 input")); } } else { // input not float PADDLE_THROW(platform::errors::Fatal( "The Groupnorm TRT Plugin's only support fp32 or fp16 input")); } return hipGetLastError() != hipSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
f56500825ceb0212ab39505cc8ebbaf248195cfc.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. SPDX-License-Identifier: Apache-2.0 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/tensorrt/plugin/group_norm_op_plugin.h" #include "paddle/phi/kernels/group_norm_kernel.h" #include <cub/cub.cuh> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { using DataLayout = phi::DataLayout; static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; } static inline __device__ __host__ float sigmoid(float x) { return 1.F / (1.F + expf(-x)); } struct GroupSums { // Is it the 1st element of the group? int32_t flag; // The sum. float sum; // The sum of squares. float sumSq; }; struct GroupSumsOp { inline __device__ GroupSums operator()(GroupSums const &a, GroupSums const &b) { GroupSums dst; dst.sum = b.flag ? b.sum : (a.sum + b.sum); dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq); dst.flag = a.flag + b.flag; return dst; } }; static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) { int32_t maxDivisor = -1; for (int32_t i = 1; i <= std::sqrt(n); i++) { if (n % i == 0) { int32_t divisor1 = n / i; int32_t divisor2 = i; if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) { maxDivisor = divisor1; } if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) { maxDivisor = divisor2; } } } return maxDivisor; } template <int tTHREADS_PER_BLOCK> __global__ void groupNormNHWCSumKernel(const GroupNormNHWCParams params) { // The object in charge of doing the sums for the different blocks. typedef cub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan; // Allocate shared memory for BlockScan. __shared__ typename BlockScan::TempStorage tempStorage; // Allocate shared memory for the groups. We could reduce the amount of shared // memory reserved. __shared__ float2 smem[tTHREADS_PER_BLOCK]; // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // The sums. float sum = 0.F; float sumSq = 0.F; // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The offset. int64_t offset = static_cast<int64_t>(ni) * params.hwc + static_cast<int64_t>(hwi) * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { h2 = *reinterpret_cast<__half2 const *>(&params.srcX[offset]); } // Extract the two half values. float2 f2 = __half22float2(h2); // Update the sum. sum += f2.x + f2.y; // Update the sum of squares. sumSq += f2.x * f2.x + f2.y * f2.y; } // The group that thread works on and the channel in the group (modulus). int32_t gi = threadIdx.x * 2 / params.cPerGroup; int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi; // The data for the summations. GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq}; // Do the segmented scan. GroupSums out; BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp()); // Store the results for the groups in shared memory (to produce coalesced // stores later). // 2 channels per thread if (cj == params.cPerGroup - 2) { smem[gi] = make_float2(out.sum, out.sumSq); } // Make sure the data is in shared memory. __syncthreads(); // The global group index. int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x; // Threads that have nothing left to do, exit. if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) { return; } // The first threads (those storing to global memory, load the values). float2 sums = smem[threadIdx.x]; // Store to global memory. atomicAdd(&params.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x); atomicAdd(&params.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y); } void groupNormNHWCSum(const GroupNormNHWCParams &params, cudaStream_t stream) { dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: groupNormNHWCSumKernel<160><<<grid, 160, 0, stream>>>(params); break; case 480: groupNormNHWCSumKernel<256><<<grid, 256, 0, stream>>>(params); break; case 256: groupNormNHWCSumKernel<128><<<grid, 128, 0, stream>>>(params); break; case 128: groupNormNHWCSumKernel<64><<<grid, 64, 0, stream>>>(params); break; case 8: groupNormNHWCSumKernel<4><<<grid, 4, 0, stream>>>(params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCSum of GroupNormPlugin TRT Plugin " "encounter error")); } } template <int tTHREADS_PER_BLOCK> __global__ void groupNormNHWCScaleKernel(const GroupNormNHWCParams params) { // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The group that thread works on and the channel in the group (modulus). int32_t gi = ci / params.cPerGroup; // Load the sum and sum of squares for the group. float sum = 0.F, sumSq = 0.F; if (gi < params.groups) { sum = params.redBuffer[(2 * ni + 0) * params.groups + gi]; sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi]; } // Load gamma/beta. float2 gammaF2, betaF2; if (ci < params.c) { gammaF2 = __half22float2(*reinterpret_cast<half2 const *>( reinterpret_cast<half const *>(params.gamma) + ci)); betaF2 = __half22float2(*reinterpret_cast<half2 const *>( reinterpret_cast<half const *>(params.beta) + ci)); } // Compute the mean. float mean = sum * params.invHWC; // Compute the variance. float var = sumSq * params.invHWC - (mean * mean); // Compute the inverse of the stddev. float invStdDev = rsqrtf(var + params.eps); // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The src/dst offset. int64_t offset = (int64_t)ni * params.hwc + hwi * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { h2 = *reinterpret_cast<__half2 const *>(&params.srcX[offset]); } // Extract the two half values. float2 f2 = __half22float2(h2); // Normalize the channels. f2.x = (f2.x - mean) * invStdDev; f2.y = (f2.y - mean) * invStdDev; // Scale by gamma and add beta. f2.x = gammaF2.x * f2.x + betaF2.x; f2.y = gammaF2.y * f2.y + betaF2.y; // Apply Swish if needed. if (params.withSwish) { f2.x = f2.x * sigmoid(f2.x); f2.y = f2.y * sigmoid(f2.y); } // Store the scaled values. if (ci < params.c) { *reinterpret_cast<__half2 *>(&params.dst[offset]) = __float22half2_rn(f2); } } } void groupNormNHWCScale(const GroupNormNHWCParams &params, cudaStream_t stream) { dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: groupNormNHWCScaleKernel<160><<<grid, 160, 0, stream>>>(params); break; case 480: groupNormNHWCScaleKernel<256><<<grid, 256, 0, stream>>>(params); break; case 256: groupNormNHWCScaleKernel<128><<<grid, 128, 0, stream>>>(params); break; case 128: groupNormNHWCScaleKernel<64><<<grid, 64, 0, stream>>>(params); break; case 8: groupNormNHWCScaleKernel<4><<<grid, 4, 0, stream>>>(params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCScale of GroupNormPlugin TRT Plugin " "encounter error")); } } int GroupNormPlugin::initialize() TRT_NOEXCEPT { if (!with_fp16_) { // if use fp32 cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size()); cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size()); cudaMemcpy(scale_gpu_, scale_.data(), scale_.size() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(bias_gpu_, bias_.data(), bias_.size() * sizeof(float), cudaMemcpyHostToDevice); } else { // if use fp16 std::vector<half> scale_half(scale_.size()); std::vector<half> bias_half(bias_.size()); for (int i = 0; i < scale_.size(); ++i) { scale_half[i] = static_cast<half>(scale_[i]); } for (int i = 0; i < bias_.size(); ++i) { bias_half[i] = static_cast<half>(bias_[i]); } cudaMalloc(&scale_gpu_, sizeof(half) * scale_half.size()); cudaMalloc(&bias_gpu_, sizeof(half) * bias_half.size()); cudaMemcpy(scale_gpu_, scale_half.data(), scale_half.size() * sizeof(half), cudaMemcpyHostToDevice); cudaMemcpy(bias_gpu_, bias_half.data(), bias_half.size() * sizeof(half), cudaMemcpyHostToDevice); } return 0; } bool GroupNormPlugin::supportsFormat( nvinfer1::DataType type, nvinfer1::PluginFormat format) const TRT_NOEXCEPT { if (with_fp16_) { return ((type == nvinfer1::DataType::kHALF) && (format == nvinfer1::PluginFormat::kLINEAR)); } else { return ((type == nvinfer1::DataType::kFLOAT) && (format == nvinfer1::PluginFormat::kLINEAR)); } } nvinfer1::Dims GroupNormPlugin::getOutputDimensions( int index, const nvinfer1::Dims *inputDims, int nbInputs) TRT_NOEXCEPT { return inputDims[0]; } int GroupNormPlugin::enqueue(int batch_size, const void *const *inputs, #if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, #else void *const *outputs, void *workspace, #endif cudaStream_t stream) TRT_NOEXCEPT { const auto &input_dims = this->getInputDims(0); int groups = groups_; float eps = eps_; std::vector<int> input_shape; input_shape.push_back(batch_size); for (int i = 0; i < input_dims.nbDims; i++) { input_shape.push_back(input_dims.d[i]); } const auto input_ddim = phi::make_ddim(input_shape); int C = input_shape[1]; PADDLE_ENFORCE_EQ( C, scale_.size(), platform::errors::InvalidArgument( "scale's size should be equal to the channel number in groupnorm," "but got channel number:%d, scale's size:%d.", C, scale_.size())); PADDLE_ENFORCE_EQ( C, bias_.size(), platform::errors::InvalidArgument( "bias's size should be equal to the channel number in groupnorm," "but got channel number:%d, bias's size:%d.", C, bias_.size())); float *mean_d = static_cast<float *>(workspace); float *variance_d = mean_d + input_shape[0] * groups_; float *temp_variance_d = variance_d + input_shape[0] * groups_; auto input_type = getDataType(); if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32"; const float *input = static_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); phi::GroupNormDirectCUDAFunctor<float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<float *>(bias_gpu_), reinterpret_cast<float *>(scale_gpu_), temp_variance_d, groups_, eps_, output, mean_d, variance_d, DataLayout::kNCHW); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16"; const half *input = static_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); phi::GroupNormDirectCUDAFunctor<half, float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<const half *>(bias_gpu_), reinterpret_cast<const half *>(scale_gpu_), temp_variance_d, groups_, eps_, output, mean_d, variance_d, DataLayout::kNCHW); } else { PADDLE_THROW(platform::errors::Fatal( "The GroupNorm TRT Plugin's input type should be float or half.")); } return cudaGetLastError() != cudaSuccess; } nvinfer1::DimsExprs GroupNormPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputDims, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputDims[0]; } bool GroupNormPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of groupnorm plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return ((in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::PluginFormat::kLINEAR || in.format == nvinfer1::PluginFormat::kHWC8)); } else { return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType GroupNormPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument( "The groupnorm Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int GroupNormPluginDynamic::initialize() TRT_NOEXCEPT { if (with_fp16_ == false) { // if use fp32 cudaMalloc(&scale_gpu_, sizeof(float) * scale_.size()); cudaMalloc(&bias_gpu_, sizeof(float) * bias_.size()); cudaMemcpy(scale_gpu_, scale_.data(), scale_.size() * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(bias_gpu_, bias_.data(), bias_.size() * sizeof(float), cudaMemcpyHostToDevice); } else { // if use fp16 std::vector<half> scale_half(scale_.size()); std::vector<half> bias_half(bias_.size()); for (int i = 0; i < scale_.size(); ++i) { scale_half[i] = static_cast<half>(scale_[i]); } for (int i = 0; i < bias_.size(); ++i) { bias_half[i] = static_cast<half>(bias_[i]); } cudaMalloc(&scale_gpu_, sizeof(half) * scale_.size()); cudaMalloc(&bias_gpu_, sizeof(half) * bias_.size()); cudaMemcpy(scale_gpu_, scale_half.data(), scale_half.size() * sizeof(half), cudaMemcpyHostToDevice); cudaMemcpy(bias_gpu_, bias_half.data(), bias_half.size() * sizeof(half), cudaMemcpyHostToDevice); } return 0; } int GroupNormPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { const auto &input_dims = input_desc[0].dims; int groups = groups_; float eps = eps_; std::vector<int> input_shape; for (int i = 0; i < input_dims.nbDims; i++) { input_shape.push_back(input_dims.d[i]); } const auto input_ddim = phi::make_ddim(input_shape); int C = input_shape[1]; int image_size = input_shape[2] * input_shape[3]; int batchSize = input_shape[0]; PADDLE_ENFORCE_EQ( C, scale_.size(), platform::errors::InvalidArgument( "scale's size should be equal to the channel number in groupnorm," "but got feature_size:%d, scale's size:%d.", C, scale_.size())); PADDLE_ENFORCE_EQ( C, bias_.size(), platform::errors::InvalidArgument( "bias's size should be equal to the channel number in groupnorm," "but got feature_size:%d, bias's size:%d.", C, bias_.size())); float *mean_d = static_cast<float *>(workspace); float *variance_d = mean_d + input_shape[0] * groups_; float *temp_variance_d = variance_d + input_shape[0] * groups_; auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp32"; const float *input = reinterpret_cast<const float *>(inputs[0]); float *output = static_cast<float *>(outputs[0]); phi::GroupNormDirectCUDAFunctor<float, float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<float *>(bias_gpu_), reinterpret_cast<float *>(scale_gpu_), temp_variance_d, groups, eps, output, mean_d, variance_d, DataLayout::kNCHW); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. GroupNorm-->fp16"; const half *input = reinterpret_cast<const half *>(inputs[0]); half *output = static_cast<half *>(outputs[0]); if (input_desc[0].format == nvinfer1::PluginFormat::kLINEAR) { phi::GroupNormDirectCUDAFunctor<half, float> group_norm; group_norm(stream, input, input_shape, reinterpret_cast<half *>(bias_gpu_), reinterpret_cast<half *>(scale_gpu_), temp_variance_d, groups, eps, output, mean_d, variance_d, DataLayout::kNCHW); } else if (input_desc[0].format == nvinfer1::PluginFormat::kHWC8) { int32_t cPerBlock = 320; int32_t maxBlocksPerHW = 1024; switch (input_desc[0].dims.d[1]) { case 960: case 1920: cPerBlock = 480; break; case 512: case 256: cPerBlock = 256; break; case 128: cPerBlock = 128; break; default: cPerBlock = 320; } if (cPerBlock > input_desc[0].dims.d[1]) { cPerBlock = 8; } params_.withSwish = false; params_.dst = static_cast<half *>(outputs[0]); params_.srcX = static_cast<half const *>(inputs[0]); params_.gamma = scale_gpu_; params_.beta = bias_gpu_; params_.redBuffer = static_cast<float *>(workspace); params_.n = input_desc[0].dims.d[0]; params_.h = input_desc[0].dims.d[2]; params_.w = input_desc[0].dims.d[3]; params_.c = input_desc[0].dims.d[1]; params_.groups = groups_; params_.hw = params_.h * params_.w; const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW); params_.hwPerBlock = divUp(params_.hw, blocksPerHW); params_.cPerBlock = cPerBlock; params_.cPerGroup = params_.c / params_.groups; params_.hwc = params_.hw * params_.c; params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup); params_.groupsPerBlock = cPerBlock / params_.cPerGroup; params_.eps = eps_; cudaMemsetAsync(params_.redBuffer, 0, 2 * sizeof(float) * params_.n * groups_, stream); groupNormNHWCSum(params_, stream); groupNormNHWCScale(params_, stream); } else { PADDLE_THROW(platform::errors::Fatal( "The Groupnorm TRT Plugin's only support nchw or nhwc8 input")); } } else { // input not float PADDLE_THROW(platform::errors::Fatal( "The Groupnorm TRT Plugin's only support fp32 or fp16 input")); } return cudaGetLastError() != cudaSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
27f3a3b0c1e50c0775e3cfa589fc8fc752e3f63f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2014 BVLC and contributors. #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/data_layers.hpp" #include "caffe/util/math_functions.hpp" #include <iostream> #include <fstream> #define CUDART_NAN_F __int_as_float(0x7fffffff) namespace caffe { inline __device__ __host__ float clamp(float f, float a, float b) { return fmaxf(a, fminf(f, b)); } template <typename Dtype> __global__ void WarpData(const int nthreads, const int num, const int height, const int width, const Dtype* src_data, const int src_count, const int dest_height, const int dest_width, Dtype* dest_data, const typename AugmentationLayerBase<Dtype>::tTransMat *transMats1, const typename AugmentationLayerBase<Dtype>::tTransMat *transMats2 ) { CUDA_KERNEL_LOOP(index, nthreads) { float x = (float)(index % dest_width); //w-pos float y = (float)((index / dest_width) % dest_height); //h-pos int n = (index / dest_width / dest_height); // num // === Warping: //transMat: // / 0 2 4 \ // \ 1 3 5 / const typename PhilDataAugmentationLayer<Dtype>::tTransMat *transMat1 = &(transMats1[n]); const typename PhilDataAugmentationLayer<Dtype>::tTransMat *transMat2 = &(transMats2[n]); float xpos1, ypos1, xpos2, ypos2, xpos3, ypos3; // Step 1: Apply inverse tranformation of Image 1 xpos1 = x * transMat1->t0 + y * transMat1->t2 + transMat1->t4; ypos1 = x * transMat1->t1 + y * transMat1->t3 + transMat1->t5; // Step 2: Apply flow field int srcIdxOffx = width*(height*(2*n+0) + (int)(ypos1+(Dtype)0.5)) + (int)(xpos1+(Dtype)0.5); int srcIdxOffy = width*(height*(2*n+1) + (int)(ypos1+(Dtype)0.5)) + (int)(xpos1+(Dtype)0.5); xpos2 = xpos1 + src_data[min(srcIdxOffx,src_count)]; ypos2 = ypos1 + src_data[min(srcIdxOffy,src_count)]; // Step 3: Apply tranformation of Image 2 xpos3 = xpos2 * transMat2->t0 + ypos2 * transMat2->t2 + transMat2->t4; ypos3 = xpos2 * transMat2->t1 + ypos2 * transMat2->t3 + transMat2->t5; // Step 4: Difference between the new and old positions gives the flow dest_data[dest_width*(dest_height*(2*n+0) + (int)y) + (int)x] = xpos3 - x; dest_data[dest_width*(dest_height*(2*n+1) + (int)y) + (int)x] = ypos3 - y; /*xpos = clamp(xpos, 0.0f, (float)(width)-1.05f);//Ensure that floor(xpos)+1 is still valid ypos = clamp(ypos, 0.0f, (float)(height)-1.05f); // Get interpolated sample //float sample = tex2DLayered(texRef, xpos, ypos, cn); float tlx = floor(xpos); float tly = floor(ypos); int srcIdxOff = width*(height*cn + tly) + tlx; float sampleTL = src_data[srcIdxOff]; float sampleTR = src_data[min(srcIdxOff+1,src_count)]; float sampleBL = src_data[min(srcIdxOff+width,src_count)]; float sampleBR = src_data[min(srcIdxOff+1+width,src_count)]; float xdist = xpos - tlx; float ydist = ypos - tly; float sample = (1-xdist)*(1-ydist)*sampleTL + ( xdist)*( ydist)*sampleBR + (1-xdist)*( ydist)*sampleBL + ( xdist)*(1-ydist)*sampleTR; dest_data[index] = sample;*/ } } template <typename Dtype> void FlowAugmentationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //"Flow augmentation layer takes three input blobs: FlowField, Img1TransfParams, Img2TransfParams"; Dtype* top_data = (top)[0]->mutable_gpu_data(); // dest int topwidth = (top)[0]->width(); int topheight = (top)[0]->height(); int topchannels = (top)[0]->channels(); int topcount = (top)[0]->count(); CHECK_EQ(topchannels, 2); const Dtype* bottom_data = bottom[0]->gpu_data(); // source int bottomchannels = (bottom)[0]->channels(); int bottomwidth = (bottom)[0]->width(); int bottomheight = (bottom)[0]->height(); int bottomcount = (bottom)[0]->count(); int num = (bottom)[0]->num(); CHECK_EQ(bottomchannels, 2); CHECK_EQ((bottom)[0]->num(), (top)[0]->num()); // Debug: check for NaNs and lare values: const Dtype* bottom_cpu_data = bottom[0]->cpu_data(); /*for(int i=0; i<bottomcount; i++) { if (isnan(bottom_cpu_data[i])) LOG(WARNING) << "bottom_data[" << i << "]=NaN"; // if (::fabs(bottom_cpu_data[i])>1e3) // LOG(WARNING) << "bottom_data[" << i << "]=" << bottom_cpu_data[i]; }*/ // Prepare matrices all_coeffs1_.ShareData(*bottom[1]); //reuse all_coeffs2_.ShareData(*bottom[2]); //all_coeffs1_.CopyFrom(*bottom[1]); //all_coeffs2_.CopyFrom(*bottom[2]); const Dtype* my_params1 = all_coeffs1_.cpu_data(); const Dtype* my_params2 = all_coeffs2_.cpu_data(); typename AugmentationLayerBase<Dtype>::tTransMat *matrices1 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices1_->mutable_cpu_data()); typename AugmentationLayerBase<Dtype>::tTransMat *matrices2 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices2_->mutable_cpu_data()); for (int item_id = 0; item_id < num; ++item_id) { AugmentationCoeff coeff; // Load the previously generated coeffs (either they are from another layer or generated above) AugmentationLayerBase<Dtype>::array_to_coeff(my_params1 + item_id * num_params_, coeff); matrices1[item_id].toIdentity(); matrices1[item_id].fromCoeff(&coeff,cropped_width_,cropped_height_,bottomwidth,bottomheight); AugmentationLayerBase<Dtype>::array_to_coeff(my_params2 + item_id * num_params_, coeff); matrices2[item_id].toIdentity(); matrices2[item_id].fromCoeff(&coeff,cropped_width_,cropped_height_,bottomwidth,bottomheight); matrices2[item_id] = matrices2[item_id].inverse(); } // Do GPU work typename AugmentationLayerBase<Dtype>::tTransMat *gpumatrices1 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices1_->gpu_data()); typename AugmentationLayerBase<Dtype>::tTransMat *gpumatrices2 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices2_->gpu_data()); int topThreadCount = topcount / 2; hipLaunchKernelGGL(( WarpData<Dtype>), dim3(CAFFE_GET_BLOCKS(topThreadCount)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, topThreadCount, num, bottomheight, bottomwidth, bottom_data, bottomcount, topheight, topwidth, top_data, gpumatrices1, gpumatrices2); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(FlowAugmentationLayer); } // namespace caffe
27f3a3b0c1e50c0775e3cfa589fc8fc752e3f63f.cu
// Copyright 2014 BVLC and contributors. #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #include "caffe/data_layers.hpp" #include "caffe/util/math_functions.hpp" #include <iostream> #include <fstream> #define CUDART_NAN_F __int_as_float(0x7fffffff) namespace caffe { inline __device__ __host__ float clamp(float f, float a, float b) { return fmaxf(a, fminf(f, b)); } template <typename Dtype> __global__ void WarpData(const int nthreads, const int num, const int height, const int width, const Dtype* src_data, const int src_count, const int dest_height, const int dest_width, Dtype* dest_data, const typename AugmentationLayerBase<Dtype>::tTransMat *transMats1, const typename AugmentationLayerBase<Dtype>::tTransMat *transMats2 ) { CUDA_KERNEL_LOOP(index, nthreads) { float x = (float)(index % dest_width); //w-pos float y = (float)((index / dest_width) % dest_height); //h-pos int n = (index / dest_width / dest_height); // num // === Warping: //transMat: // / 0 2 4 \ // \ 1 3 5 / const typename PhilDataAugmentationLayer<Dtype>::tTransMat *transMat1 = &(transMats1[n]); const typename PhilDataAugmentationLayer<Dtype>::tTransMat *transMat2 = &(transMats2[n]); float xpos1, ypos1, xpos2, ypos2, xpos3, ypos3; // Step 1: Apply inverse tranformation of Image 1 xpos1 = x * transMat1->t0 + y * transMat1->t2 + transMat1->t4; ypos1 = x * transMat1->t1 + y * transMat1->t3 + transMat1->t5; // Step 2: Apply flow field int srcIdxOffx = width*(height*(2*n+0) + (int)(ypos1+(Dtype)0.5)) + (int)(xpos1+(Dtype)0.5); int srcIdxOffy = width*(height*(2*n+1) + (int)(ypos1+(Dtype)0.5)) + (int)(xpos1+(Dtype)0.5); xpos2 = xpos1 + src_data[min(srcIdxOffx,src_count)]; ypos2 = ypos1 + src_data[min(srcIdxOffy,src_count)]; // Step 3: Apply tranformation of Image 2 xpos3 = xpos2 * transMat2->t0 + ypos2 * transMat2->t2 + transMat2->t4; ypos3 = xpos2 * transMat2->t1 + ypos2 * transMat2->t3 + transMat2->t5; // Step 4: Difference between the new and old positions gives the flow dest_data[dest_width*(dest_height*(2*n+0) + (int)y) + (int)x] = xpos3 - x; dest_data[dest_width*(dest_height*(2*n+1) + (int)y) + (int)x] = ypos3 - y; /*xpos = clamp(xpos, 0.0f, (float)(width)-1.05f);//Ensure that floor(xpos)+1 is still valid ypos = clamp(ypos, 0.0f, (float)(height)-1.05f); // Get interpolated sample //float sample = tex2DLayered(texRef, xpos, ypos, cn); float tlx = floor(xpos); float tly = floor(ypos); int srcIdxOff = width*(height*cn + tly) + tlx; float sampleTL = src_data[srcIdxOff]; float sampleTR = src_data[min(srcIdxOff+1,src_count)]; float sampleBL = src_data[min(srcIdxOff+width,src_count)]; float sampleBR = src_data[min(srcIdxOff+1+width,src_count)]; float xdist = xpos - tlx; float ydist = ypos - tly; float sample = (1-xdist)*(1-ydist)*sampleTL + ( xdist)*( ydist)*sampleBR + (1-xdist)*( ydist)*sampleBL + ( xdist)*(1-ydist)*sampleTR; dest_data[index] = sample;*/ } } template <typename Dtype> void FlowAugmentationLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { //"Flow augmentation layer takes three input blobs: FlowField, Img1TransfParams, Img2TransfParams"; Dtype* top_data = (top)[0]->mutable_gpu_data(); // dest int topwidth = (top)[0]->width(); int topheight = (top)[0]->height(); int topchannels = (top)[0]->channels(); int topcount = (top)[0]->count(); CHECK_EQ(topchannels, 2); const Dtype* bottom_data = bottom[0]->gpu_data(); // source int bottomchannels = (bottom)[0]->channels(); int bottomwidth = (bottom)[0]->width(); int bottomheight = (bottom)[0]->height(); int bottomcount = (bottom)[0]->count(); int num = (bottom)[0]->num(); CHECK_EQ(bottomchannels, 2); CHECK_EQ((bottom)[0]->num(), (top)[0]->num()); // Debug: check for NaNs and lare values: const Dtype* bottom_cpu_data = bottom[0]->cpu_data(); /*for(int i=0; i<bottomcount; i++) { if (isnan(bottom_cpu_data[i])) LOG(WARNING) << "bottom_data[" << i << "]=NaN"; // if (std::fabs(bottom_cpu_data[i])>1e3) // LOG(WARNING) << "bottom_data[" << i << "]=" << bottom_cpu_data[i]; }*/ // Prepare matrices all_coeffs1_.ShareData(*bottom[1]); //reuse all_coeffs2_.ShareData(*bottom[2]); //all_coeffs1_.CopyFrom(*bottom[1]); //all_coeffs2_.CopyFrom(*bottom[2]); const Dtype* my_params1 = all_coeffs1_.cpu_data(); const Dtype* my_params2 = all_coeffs2_.cpu_data(); typename AugmentationLayerBase<Dtype>::tTransMat *matrices1 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices1_->mutable_cpu_data()); typename AugmentationLayerBase<Dtype>::tTransMat *matrices2 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices2_->mutable_cpu_data()); for (int item_id = 0; item_id < num; ++item_id) { AugmentationCoeff coeff; // Load the previously generated coeffs (either they are from another layer or generated above) AugmentationLayerBase<Dtype>::array_to_coeff(my_params1 + item_id * num_params_, coeff); matrices1[item_id].toIdentity(); matrices1[item_id].fromCoeff(&coeff,cropped_width_,cropped_height_,bottomwidth,bottomheight); AugmentationLayerBase<Dtype>::array_to_coeff(my_params2 + item_id * num_params_, coeff); matrices2[item_id].toIdentity(); matrices2[item_id].fromCoeff(&coeff,cropped_width_,cropped_height_,bottomwidth,bottomheight); matrices2[item_id] = matrices2[item_id].inverse(); } // Do GPU work typename AugmentationLayerBase<Dtype>::tTransMat *gpumatrices1 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices1_->gpu_data()); typename AugmentationLayerBase<Dtype>::tTransMat *gpumatrices2 = (typename AugmentationLayerBase<Dtype>::tTransMat *)(coeff_matrices2_->gpu_data()); int topThreadCount = topcount / 2; WarpData<Dtype><<<CAFFE_GET_BLOCKS(topThreadCount), CAFFE_CUDA_NUM_THREADS>>>( topThreadCount, num, bottomheight, bottomwidth, bottom_data, bottomcount, topheight, topwidth, top_data, gpumatrices1, gpumatrices2); CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(FlowAugmentationLayer); } // namespace caffe
09a1cc9b8747498a35d14bdcb6c3f70399b3ae04.hip
// !!! This is a file automatically generated by hipify!!! //a######################################################### //a## 2D Acoustic VTI Medium RTM //a## Ps : P + sv wave and get rid of sv //a## GPU(CUDA) ,poynting adcigs //a## //a##/*a*************************** //a##Function for VTI medium modeling,2017.2.13 //a## //a## Ps: the function of modeling following: //a## //a## du/dt=1/rho*dp/dx , //a## dw/dt=1/rho*dq/dz , //a## dp/dt=rho*vpx^2*du/dx+rho*vp*vpn*dw/dz , //a## dq/dt=rho*vp*vpn*du/dx+rho*vp^2*dw/dz , //a## vpx^2=vp^2*(1+2*epsilon); //a## vpn^2=vp^2*(1+2*delta); //a##*********a*******************/ //a## //a## Rong Tao //a## 2017.2.15 //a######################################################### #include<stdio.h> #include<malloc.h> #include<math.h> #include<stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #define pi 3.141592653 #define mm 4 //__constant__ float c[mm]={1.125,-0.04166667};/*mm==2*/ //__constant__ float c[mm]={1.1718750,-0.065104167,0.0046875};/*mm==3*/ __constant__ float c[mm]={1.196289,-0.0797526,0.009570313,-0.0006975447};/*mm==4*/ //__constant__ float c[mm]={1.211243,-0.08972168,0.01384277,-0.00176566,0.0001186795};/*mm==5*/ __device__ float d0; //a################################################################################ void check_gpu_error (const char *msg) /*< check GPU errors >*/ { hipError_t err = hipGetLastError (); if (hipSuccess != err) { printf("Cuda error: %s: %s\n", msg, hipGetErrorString (err)); exit(0); } } /*************func**************/ void laplace_filter(int adj, int nz, int nx, float *in, float *out) /*< linear operator, come from Madagascar Mlaplac2>*/ { int iz,ix,j; for (j=0;j<nx*nz;j++) out[j]=0.0; for (ix=0; ix < nx; ix++) { for (iz=0; iz < nz; iz++) { j = iz+ix*nz; if (iz > 0) { if (adj) { out[j-1] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j-1]; } } if (iz < nz-1) { if (adj) { out[j+1] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j+1]; } } if (ix > 0) { if (adj) { out[j-nz] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j-nz]; } } if (ix < nx-1) { if (adj) { out[j+nz] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j+nz]; } } } } } /*************func**************/ __global__ void add_source(float pfac,float xsn,float zsn,int nx,int nz,int nnx,int nnz,float dt,float t, float favg,int wtype,int npml,int is,int ds,float *P,float *Q) /*< generate ricker wavelet with time deley >*/ { int ixs,izs; float x_,xx_,tdelay,ts,source=0.0,fs; tdelay=1.0/favg; ts=t-tdelay; fs=xsn+(is-1)*ds; if(wtype==1)//ricker wavelet { x_=favg*ts; xx_=x_*x_; source=(1-2*pi*pi*(xx_))*exp(-(pi*pi*xx_)); }else if(wtype==2){//derivative of gaussian x_=(-4)*favg*favg*pi*pi/log(0.1); source=(-2)*pi*pi*ts*exp(-x_*ts*ts); }else if(wtype==3){//derivative of gaussian x_=(-1)*favg*favg*pi*pi/log(0.1); source=exp(-x_*ts*ts); } if(t<=2*tdelay) { ixs = (int)(fs+0.5)+npml-1; izs = (int)(zsn+0.5)+npml-1; P[ixs*nnz+izs]+=pfac*source; Q[ixs*nnz+izs]+=pfac*source; } } /*******************func*********************/ __global__ void update_vel(int nx,int nz,int nnx,int nnz,int npml,float dt,float dx,float dz, float *u0,float *w0,float *u1,float *w1,float *P,float *Q, float *coffx1,float *coffx2,float *coffz1,float *coffz2) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix,iz,im; float dtx,dtz,xx,zz; ix=id/nnz; iz=id%nnz; dtx=dt/dx; dtz=dt/dz; if(id>=mm&&id<nnx*nnz-mm) { if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm)) { xx=0.0; zz=0.0; for(im=0;im<mm;im++) { xx+=c[im]*(P[id+(im+1)*nnz]-P[id-im*nnz]); zz+=c[im]*(Q[id+im+1] -Q[id-im]); } u1[id]=coffx2[ix]*u0[id]-coffx1[ix]*dtx*xx; w1[id]=coffz2[iz]*w0[id]-coffz1[iz]*dtz*zz; } } } /*******************func***********************/ __global__ void update_stress(int nx,int nz,int nnx,int nnz,float dt,float dx,float dz, float *u1,float *w1,float *P,float *Q,float *vp,int npml, float *px1,float *px0,float *pz1,float *pz0,float *qx1,float *qx0,float *qz1,float *qz0, float *acoffx1,float *acoffx2,float *acoffz1,float *acoffz2, float *delta,float *epsilon,int fs,int ds,int zs,int is,bool SV) { int id=threadIdx.x+blockDim.x*blockIdx.x; int im,ix,iz,rx,rz,R=15,r=5; float dtx,dtz, xx,zz,ee,dd; ix=id/nnz; iz=id%nnz; dtx=dt/dx; dtz=dt/dz; if(id>=mm&&id<nnx*nnz-mm) { /************************i****************************************/ /************************iso circle start*************************/ rx=ix-(fs+(is-1)*ds+npml); rz=iz-(zs+npml); if(SV){ if((rx*rx+rz*rz)<=R*R){ if((rx*rx+rz*rz)<=r*r){ ee = 0.0; dd = 0.0; }else{ ee = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*epsilon[id]; dd = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*delta[id]; } }else{ ee=epsilon[id]; dd=delta[id]; } }else{ ee=epsilon[id]; dd=delta[id]; } /************************ iso circle end *************************/ /************************i****************************************/ if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm)) { xx=0.0; zz=0.0; for(im=0;im<mm;im++) { xx+=c[im]*(u1[id+im*nnz]-u1[id-(im+1)*nnz]); zz+=c[im]*(w1[id+im] -w1[id-im-1]); } px1[id]=acoffx2[ix]*px0[id]-acoffx1[ix]*vp[id]*vp[id]*(1+2*ee)*dtx*xx; pz1[id]=acoffz2[iz]*pz0[id]-acoffz1[iz]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtz*zz; qx1[id]=acoffx2[ix]*qx0[id]-acoffx1[ix]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtx*xx; qz1[id]=acoffz2[iz]*qz0[id]-acoffz1[iz]*vp[id]*vp[id]*dtz*zz; P[id]=px1[id]+pz1[id]; Q[id]=qx1[id]+qz1[id]; } } } /********************func**********************/ __global__ void get_d0(float dx,float dz,int nnx,int nnz,int npml,float *vp) { d0=10.0*vp[nnx*nnz/2]*log(100000.0)/(2.0*npml*((dx+dz)/2.0)); } /*************func*******************/ void pad_vv(int nx,int nz,int nnx,int nnz,int npml,float *ee) { int ix,iz,id; for(id=0;id<nnx*nnz;id++) { ix=id/nnz; iz=id%nnz; if(ix<npml){ ee[id]=ee[npml*nnz+iz]; //left }else if(ix>=nnx-npml){ ee[id]=ee[(nnx-npml-1)*nnz+iz];//right } } for(id=0;id<nnx*nnz;id++) { ix=id/nnz; iz=id%nnz; if(iz<npml){ ee[id]=ee[ix*nnz+npml];//up }else if(iz>=nnz-npml){ ee[id]=ee[ix*nnz+nnz-npml-1];//down } } } /*************func*******************/ void read_file(char FN1[],char FN2[],char FN3[],int nx,int nz,int nnx,int nnz,float dx,float dz,float favg,float dt, float *v,float *e,float *d,int npml) { int i,j,id; float vmax, vmin, H_min, dt_max, dxz_max, C, tmp; FILE *fp1,*fp2,*fp3; if((fp1=fopen(FN1,"rb"))==NULL){printf("error open <%s>!\n",FN1);exit(0);} if((fp2=fopen(FN2,"rb"))==NULL){printf("error open <%s>!\n",FN2);exit(0);} if((fp3=fopen(FN3,"rb"))==NULL){printf("error open <%s>!\n",FN3);exit(0);} vmin= 999999.9; vmax=-999999.9; for(i=npml;i<nx+npml;i++) { for(j=npml;j<nz+npml;j++) { id=i*nnz+j; fread(&v[id],4L,1,fp1); fread(&e[id],4L,1,fp2); fread(&d[id],4L,1,fp3); if(vmax<v[id]) vmax = v[id]; if(vmin>v[id]) vmin = v[id]; } } fclose(fp1); fclose(fp2); fclose(fp3); printf("------------------------------------\n---\n"); printf("--- Vmax=%.2f, Vmin=%.2f\n",vmax,vmin); /*********boundary*********/ pad_vv(nx,nz,nnx,nnz,npml,e); pad_vv(nx,nz,nnx,nnz,npml,d); pad_vv(nx,nz,nnx,nnz,npml,v); H_min=dx<dz?dx:dz; dt_max = 0.5*H_min/vmin; dxz_max = vmax/favg*0.2; if(dxz_max<dz||dxz_max<dx){printf("--- You need have to redefine DX and DZ ! \n");exit(0);} if(dt_max<dt){printf("--- You need have to redefine DT ! \n");exit(0);} if ( favg >= vmin/( 5.0*(dx>dz?dx:dz) ) || favg >= vmin/( 5.0*(dx>dz?dx:dz) ) ) {printf("--- Non-dispersion relation not satisfied! \n");exit(0);} else if ( mm == 2 ) C = 0.857; else if ( mm == 3 ) C = 0.8; else if ( mm == 4 ) C = 0.777; else if ( mm == 5 ) C = 0.759; tmp = dt*vmax*sqrtf( 1.0/(dx*dx)+1.0/(dz*dz) ); if ( tmp >= C){ printf("--- Stability condition not satisfied! tmp = %f, C = %f\n",tmp,C);exit(0);} } /*************func*******************/ __global__ void initial_coffe(float dt,int nn,float *coff1,float *coff2,float *acoff1,float *acoff2,int npml) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<nn+2*npml) { if(id<npml) { coff1[id]=1.0/(1.0+(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0); coff2[id]=coff1[id]*(1.0-(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0); acoff1[id]=1.0/(1.0+(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0); acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0); }else if(id>=npml&&id<npml+nn){ coff1[id]=1.0; coff2[id]=1.0; acoff1[id]=1.0; acoff2[id]=1.0; }else{ coff1[id]=1.0/(1.0+(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0); coff2[id]=coff1[id]*(1.0-(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0); acoff1[id]=1.0/(1.0+(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0); acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0); } } } /*************func*******************/ __global__ void shot_record(int nnx, int nnz, int nx, int nz, int npml, int it, int nt, float *P, float *shot, bool flag) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<nx) { if(flag){ shot[it+nt*id]=P[npml+nnz*(id+npml)]; }else{ P[npml+nnz*(id+npml)]=shot[it+nt*id]; } } } /*************func*******************/ __global__ void wavefield_bndr(int nnx, int nnz, int nx, int nz, int npml, int it, int nt, float *P, float *Q, float *P_bndr, float *Q_bndr, bool flag) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<2*nx+2*nz) { if(flag)/////////////////////////////////save boundary { if(id<nx){//up P_bndr[it*(2*nx+2*nz)+id]=P[npml-1+nnz*(id+npml)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[npml-1+nnz*(id+npml)]; }else if(id>=nx&&id<(2*nx)){//down P_bndr[it*(2*nx+2*nz)+id]=P[npml+nz+1+nnz*(id-nx+npml)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[npml+nz+1+nnz*(id-nx+npml)]; }else if(id>=(2*nx)&&id<(2*nx+nz)){//left P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx+npml+nnz*(npml-1)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx+npml+nnz*(npml-1)]; }else if(id>=(2*nx+nz)){//right P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx-nz+npml+nnz*(npml+nx+1)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx-nz+npml+nnz*(npml+nx+1)]; } }else{/////////////////////////////add boundary if(id<nx){//up P[npml-1+nnz*(id+npml)]=P_bndr[it*(2*nx+2*nz)+id]; Q[npml-1+nnz*(id+npml)]=Q_bndr[it*(2*nx+2*nz)+id]; }else if(id>=nx&&id<(2*nx)){//down P[npml+nz+1+nnz*(id-nx+npml)]=P_bndr[it*(2*nx+2*nz)+id]; Q[npml+nz+1+nnz*(id-nx+npml)]=Q_bndr[it*(2*nx+2*nz)+id]; }else if(id>=(2*nx)&&id<(2*nx+nz)){//left P[id-2*nx+npml+nnz*(npml-1)]=P_bndr[it*(2*nx+2*nz)+id]; Q[id-2*nx+npml+nnz*(npml-1)]=Q_bndr[it*(2*nx+2*nz)+id]; }else if(id>=(2*nx+nz)){//right P[id-2*nx-nz+npml+nnz*(npml+nx+1)]=P_bndr[it*(2*nx+2*nz)+id]; Q[id-2*nx-nz+npml+nnz*(npml+nx+1)]=Q_bndr[it*(2*nx+2*nz)+id]; } } } } /*************func**************/ __global__ void mute_directwave(int nx,int nt,float dt,float favg, float dx,float dz,int fs,int ds,int zs,int is, float *vp,float *epsilon,float *shot,int tt) { int id=threadIdx.x+blockDim.x*blockIdx.x; int mu_t,mu_nt; float mu_x,mu_z,mu_t0; int ix=id/nt; int it=id%nt; if(id<nx*nt) { mu_x=dx*abs(ix-fs-(is-1)*ds); mu_z=dz*zs; mu_t0=sqrtf(pow(mu_x,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilon[1])); mu_t=(int)(2.0/(dt*favg)); mu_nt=(int)(mu_t0/dt)+mu_t+tt; if((it>(int)(mu_t0/dt)-tt)&&(it<mu_nt)) shot[id]=0.0; } } /*************func**************/ __global__ void cal_illumination(int nnx, int nnz, int nz, int npml, float *illumination, float *P, float *Q) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/nz; int iz=id%nz; if(id<nnx*nnz) { illumination[id]+=P[iz+npml+nnz*(ix+npml)]*P[iz+npml+nnz*(ix+npml)] +Q[iz+npml+nnz*(ix+npml)]*Q[iz+npml+nnz*(ix+npml)]; if(illumination[id]==0)illumination[id]=1.0; } } /*************func**************/ __global__ void cal_migration(int nnx, int nnz, int nz, int npml, float *migration, float *s, float *g) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/nz; int iz=id%nz; if(id<nnx*nnz) { migration[id]+=s[iz+npml+nnz*(ix+npml)]*g[iz+npml+nnz*(ix+npml)]; } } /*************func**************/ __global__ void migration_illum(int nx, int nz, int npml, float *migration, float *illumination) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<nx*nz) { migration[id]/=illumination[id];//*illumination[id]; } } /*************func**************/ __global__ void Poynting_Adcigs(int nnz, int nx, int nz, int npml, int na, int da, int dcdp, float *adcigs, float *s_P, float *s_Q, float *s_u, float *s_w, float *g_P, float *g_Q, float *g_u, float *g_w) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/nz*dcdp; int iz=id%nz; int ia=0; float Ssx=-s_P[iz+npml+nnz*(ix+npml)]*s_u[iz+npml+nnz*(ix+npml)]; float Ssz=-s_Q[iz+npml+nnz*(ix+npml)]*s_w[iz+npml+nnz*(ix+npml)]; float Sgx= g_P[iz+npml+nnz*(ix+npml)]*g_u[iz+npml+nnz*(ix+npml)]; float Sgz= g_Q[iz+npml+nnz*(ix+npml)]*g_w[iz+npml+nnz*(ix+npml)]; float b1= Ssx*Ssx + Ssz*Ssz; float b2= Sgx*Sgx + Sgz*Sgz; float a=(Ssx*Sgx + Ssz*Sgz)/(sqrtf(b1*b2)*(1 - 0.1)); if(id<nx/dcdp*nz) { if(a>=-1&&a<=1) { a=0.5*acosf(a)*180.0/pi; ia=(int)(a/(da*1.0)); if(ia<na) { adcigs[iz+nz*ia+nz*na*(id/nz)] += s_P[iz+npml+nnz*(ix+npml)]*g_P[iz+npml+nnz*(ix+npml)] *cosf(ia*pi/180.0)*cosf(ia*pi/180.0)*cosf(ia*pi/180.0); } } } } /*************func**************/ __global__ void adcigs_illum(int nx, int nz, int na, int da, int dcdp, float *adcigs, float *illumination) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/(nz*na)*dcdp; int iz=id%nz; if(id<nx*nz/dcdp*na) { adcigs[id]/=illumination[iz+nz*ix];//*illumination[iz+nz*ix]; } } //a######################################################################## //a## Main Function ## //a######################################################################## int main(int argc,char *argv[]) { int is, it, nx, nz, nnx, nnz, nt, wtype, na, da, dcdp, nxa; int ns, ds, fs, zs, npml; float dx, dz, dt, t, pfac, favg; float *coffx1,*coffx2,*coffz1,*coffz2,*acoffx1,*acoffx2,*acoffz1,*acoffz2; float *v, *e, *d; float *vp, *epsilon, *delta; float *s_u0, *s_u1, *s_px0, *s_qx0, *s_px1, *s_qx1; float *s_w0, *s_w1, *s_pz0, *s_qz0, *s_pz1, *s_qz1; float *g_u0, *g_u1, *g_px0, *g_qx0, *g_px1, *g_qx1; float *g_w0, *g_w1, *g_pz0, *g_qz0, *g_pz1, *g_qz1; float *s_P, *s_Q, *g_P, *g_Q, *shot_Dev, *shot_Hos, *P_bndr, *Q_bndr; float *migration, *illumination, *adcigs; float *Atemp; bool read; clock_t start, end; /*************wavelet\boundary**************/ wtype=1;npml=20; /********** dat document ***********/ char FN1[250]={"layer_vel_601_301.dat"}; char FN2[250]={"layer_epsilon_601_301.dat"}; char FN3[250]={"layer_delta_601_301.dat"}; char FN4[250]={"layer_shot_obs.dat"}; char FN5[250]={"layer_shot_cal.dat"}; char FN6[250]={"layer_snap.dat"}; char FN7[250]={"layer_migration.dat"}; char FN8[250]={"layer_illumination.dat"}; char FN9[250]={"layer_adcigs.dat"}; /********* parameters *************/ read=true;/* true: read shot; flase: use right shot record */ /********* parameters *************/ nx=601; nz=301; favg=30; pfac=10.0; dx=5.0; dz=5.0; nt=3001; dt=0.0005; ns=100; fs=nx/ns/2; ds=nx/ns; zs=1; na=65; da=1; dcdp=1; /********aaa************/ FILE *fpsnap, *fpobs, *fpcal, *fpmig, *fpillum, *fpadcigs; if((fpobs=fopen(FN4,"rb"))==NULL){printf("error open <%s>!\n",FN4);exit(0);} fpcal=fopen(FN5,"wb"); fpsnap=fopen(FN6,"wb"); fpmig=fopen(FN7,"wb"); fpillum=fopen(FN8,"wb"); fpadcigs=fopen(FN9,"wb"); /*************v***************/ nnx=nx+2*npml; nnz=nz+2*npml; nxa=(int)(nx/dcdp); /************a*************/ Atemp=(float*)malloc(nz*nx/dcdp*na*sizeof(float)); v=(float*)malloc(nnz*nnx*sizeof(float)); e=(float*)malloc(nnz*nnx*sizeof(float)); d=(float*)malloc(nnz*nnx*sizeof(float)); shot_Hos=(float*)malloc(nt*nx*sizeof(float)); read_file(FN1,FN2,FN3,nx,nz,nnx,nnz,dx,dz,favg,dt,v,e,d,npml); /****************************/ hipSetDevice(0);// initialize device, default device=0; check_gpu_error("Failed to initialize device!"); /****************************/ hipMalloc(&vp, nnz*nnx*sizeof(float)); hipMalloc(&epsilon, nnz*nnx*sizeof(float)); hipMalloc(&delta, nnz*nnx*sizeof(float)); hipMemcpy(vp, v, nnz*nnx*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(epsilon, e, nnz*nnx*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(delta, d, nnz*nnx*sizeof(float), hipMemcpyHostToDevice); /****************************/ hipMalloc(&s_u0, nnz*nnx*sizeof(float)); hipMalloc(&s_u1, nnz*nnx*sizeof(float)); hipMalloc(&s_w0, nnz*nnx*sizeof(float)); hipMalloc(&s_w1, nnz*nnx*sizeof(float)); hipMalloc(&s_P, nnz*nnx*sizeof(float)); hipMalloc(&s_Q, nnz*nnx*sizeof(float)); hipMalloc(&s_px0, nnz*nnx*sizeof(float)); hipMalloc(&s_px1, nnz*nnx*sizeof(float)); hipMalloc(&s_pz0, nnz*nnx*sizeof(float)); hipMalloc(&s_pz1, nnz*nnx*sizeof(float)); hipMalloc(&s_qx0, nnz*nnx*sizeof(float)); hipMalloc(&s_qx1, nnz*nnx*sizeof(float)); hipMalloc(&s_qz0, nnz*nnx*sizeof(float)); hipMalloc(&s_qz1, nnz*nnx*sizeof(float)); hipMalloc(&g_u0, nnz*nnx*sizeof(float)); hipMalloc(&g_u1, nnz*nnx*sizeof(float)); hipMalloc(&g_w0, nnz*nnx*sizeof(float)); hipMalloc(&g_w1, nnz*nnx*sizeof(float)); hipMalloc(&g_P, nnz*nnx*sizeof(float)); hipMalloc(&g_Q, nnz*nnx*sizeof(float)); hipMalloc(&g_px0, nnz*nnx*sizeof(float)); hipMalloc(&g_px1, nnz*nnx*sizeof(float)); hipMalloc(&g_pz0, nnz*nnx*sizeof(float)); hipMalloc(&g_pz1, nnz*nnx*sizeof(float)); hipMalloc(&g_qx0, nnz*nnx*sizeof(float)); hipMalloc(&g_qx1, nnz*nnx*sizeof(float)); hipMalloc(&g_qz0, nnz*nnx*sizeof(float)); hipMalloc(&g_qz1, nnz*nnx*sizeof(float)); hipMalloc(&coffx1, nnx*sizeof(float)); hipMalloc(&coffx2, nnx*sizeof(float)); hipMalloc(&coffz1, nnz*sizeof(float)); hipMalloc(&coffz2, nnz*sizeof(float)); hipMalloc(&acoffx1, nnx*sizeof(float)); hipMalloc(&acoffx2, nnx*sizeof(float)); hipMalloc(&acoffz1, nnz*sizeof(float)); hipMalloc(&acoffz2, nnz*sizeof(float)); hipMalloc(&shot_Dev, nx*nt*sizeof(float)); hipMalloc(&P_bndr, nt*(2*nx+2*nz)*sizeof(float)); hipMalloc(&Q_bndr, nt*(2*nx+2*nz)*sizeof(float)); hipMalloc(&migration, nz*nx*sizeof(float)); hipMalloc(&illumination, nz*nx*sizeof(float)); hipMalloc(&adcigs, nz*na*nxa*sizeof(float)); /******************************/ check_gpu_error("Failed to allocate memory for variables!"); hipLaunchKernelGGL(( get_d0), dim3(1), dim3(1), 0, 0, dx, dz, nnx, nnz, npml, vp); hipLaunchKernelGGL(( initial_coffe), dim3((nnx+511)/512), dim3(512), 0, 0, dt,nx,coffx1,coffx2,acoffx1,acoffx2,npml); hipLaunchKernelGGL(( initial_coffe), dim3((nnz+511)/512), dim3(512), 0, 0, dt,nz,coffz1,coffz2,acoffz1,acoffz2,npml); hipMemset(migration, 0, nz*nx*sizeof(float)); hipMemset(illumination, 0, nz*nx*sizeof(float)); hipMemset(adcigs, 0, nz*na*nxa*sizeof(float)); printf("--------------------------------------------------------\n"); printf("---"); start = clock(); /**********IS Loop start*******/ for(is=1;is<=ns;is++) { printf("\n--- IS=%3d ",is); hipMemset(s_u0, 0, nnz*nnx*sizeof(float)); hipMemset(s_u1, 0, nnz*nnx*sizeof(float)); hipMemset(s_w0, 0, nnz*nnx*sizeof(float)); hipMemset(s_w1, 0, nnz*nnx*sizeof(float)); hipMemset(s_P, 0, nnz*nnx*sizeof(float)); hipMemset(s_Q, 0, nnz*nnx*sizeof(float)); hipMemset(s_px0, 0, nnz*nnx*sizeof(float)); hipMemset(s_px1, 0, nnz*nnx*sizeof(float)); hipMemset(s_pz0, 0, nnz*nnx*sizeof(float)); hipMemset(s_pz1, 0, nnz*nnx*sizeof(float)); hipMemset(s_qx0, 0, nnz*nnx*sizeof(float)); hipMemset(s_qx1, 0, nnz*nnx*sizeof(float)); hipMemset(s_qz0, 0, nnz*nnx*sizeof(float)); hipMemset(s_qz1, 0, nnz*nnx*sizeof(float)); hipMemset(g_u0, 0, nnz*nnx*sizeof(float)); hipMemset(g_u1, 0, nnz*nnx*sizeof(float)); hipMemset(g_w0, 0, nnz*nnx*sizeof(float)); hipMemset(g_w1, 0, nnz*nnx*sizeof(float)); hipMemset(g_P, 0, nnz*nnx*sizeof(float)); hipMemset(g_Q, 0, nnz*nnx*sizeof(float)); hipMemset(g_px0, 0, nnz*nnx*sizeof(float)); hipMemset(g_px1, 0, nnz*nnx*sizeof(float)); hipMemset(g_pz0, 0, nnz*nnx*sizeof(float)); hipMemset(g_pz1, 0, nnz*nnx*sizeof(float)); hipMemset(g_qx0, 0, nnz*nnx*sizeof(float)); hipMemset(g_qx1, 0, nnz*nnx*sizeof(float)); hipMemset(g_qz0, 0, nnz*nnx*sizeof(float)); hipMemset(g_qz1, 0, nnz*nnx*sizeof(float)); hipMemset(shot_Dev, 0, nt*nx*sizeof(float)); hipMemset(P_bndr, 0, nt*(2*nx+2*nz)*sizeof(float)); hipMemset(Q_bndr, 0, nt*(2*nx+2*nz)*sizeof(float)); /*a***********************************Forward*******************************************/ for(it=0,t=dt;it<nt;it++,t+=dt) { //if(it==0)printf(" > F >",is,it); /*a#####################a*/ /*a## Forward ##a*/ /*a#####################a*/ hipLaunchKernelGGL(( add_source), dim3(1),dim3(1), 0, 0, pfac,fs,zs,nx,nz,nnx,nnz,dt,t,favg,wtype,npml,is,ds,s_P,s_Q); hipLaunchKernelGGL(( update_vel), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,npml,dt,dx,dz, s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2); hipLaunchKernelGGL(( update_stress), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml, s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0, acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,true); s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1; hipLaunchKernelGGL(( shot_record), dim3((nx+511)/512), dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, s_P, shot_Dev, true); hipLaunchKernelGGL(( wavefield_bndr), dim3(((2*nx+2*nz)+511)/512),dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, true); hipLaunchKernelGGL(( cal_illumination), dim3((nx*nz+511)/512), dim3(512), 0, 0, nnx, nnz, nz, npml, illumination, s_P, s_Q); if((is==1)&&(it%300==0)) { hipMemcpy(e, s_P, nnz*nnx*sizeof(float), hipMemcpyDeviceToHost); fwrite(e,4L,nnx*nnz,fpsnap); } }//it loop end hipLaunchKernelGGL(( mute_directwave), dim3((nx*nt+511)/512), dim3(512), 0, 0, nx,nt,dt,favg,dx,dz,fs,ds,zs,is,vp,epsilon,shot_Dev,20); hipMemcpy(shot_Hos, shot_Dev, nt*nx*sizeof(float), hipMemcpyDeviceToHost); fseek(fpcal,(is-1)*nt*nx*sizeof(float),0); fwrite(shot_Hos,sizeof(float),nt*nx,fpcal); if(read){ fseek(fpobs,(is-1)*nt*nx*sizeof(float),0); fread(shot_Hos,sizeof(float),nt*nx,fpobs); hipMemcpy(shot_Dev, shot_Hos, nt*nx*sizeof(float), hipMemcpyHostToDevice); } /*a***********************************Backward*******************************************/ for(it=nt-1;it>=0;it--) { // if(it==0)printf(" B ",is,it); /*a#####################a*/ /*a## Reconstruction ##a*/ /*a#####################a*/ hipLaunchKernelGGL(( wavefield_bndr), dim3(((2*nx+2*nz)+511)/512),dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, false); hipLaunchKernelGGL(( update_vel), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,npml,dt,dx,dz, s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2); hipLaunchKernelGGL(( update_stress), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml, s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0, acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false); s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1; /* if((is==1)&&(it%300==0)) { hipMemcpy(e, s_P, nnz*nnx*sizeof(float), hipMemcpyDeviceToHost); fwrite(e,4L,nnx*nnz,fpsnap); }*/ /*a#####################a*/ /*a## Backward ##a*/ /*a#####################a*/ hipLaunchKernelGGL(( shot_record), dim3((nx+511)/512), dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, g_P, shot_Dev, false); hipLaunchKernelGGL(( shot_record), dim3((nx+511)/512), dim3(512), 0, 0, nnx, nnz, nx, nz, npml, it, nt, g_Q, shot_Dev, false); hipLaunchKernelGGL(( update_vel), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,npml,dt,dx,dz, g_u0,g_w0,g_u1,g_w1,g_P,g_Q,coffx1,coffx2,coffz1,coffz2); hipLaunchKernelGGL(( update_stress), dim3((nnx*nnz+511)/512), dim3(512), 0, 0, nx,nz,nnx,nnz,dt,dx,dz,g_u1,g_w1,g_P,g_Q,vp,npml, g_px1,g_px0,g_pz1,g_pz0,g_qx1,g_qx0,g_qz1,g_qz0, acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false); g_u0=g_u1; g_w0=g_w1; g_px0=g_px1; g_pz0=g_pz1; g_qx0=g_qx1; g_qz0=g_qz1; /* if((is==1)&&(it%300==0)) { hipMemcpy(e, g_P, nnz*nnx*sizeof(float), hipMemcpyDeviceToHost); fwrite(e,4L,nnx*nnz,fpsnap); }*/ hipLaunchKernelGGL(( cal_migration), dim3((nx*nz+511)/512), dim3(512), 0, 0, nnx, nnz, nz, npml, migration, s_P, g_P); hipLaunchKernelGGL(( Poynting_Adcigs), dim3((nxa*nz+511)/512), dim3(512), 0, 0, nnz, nx, nz, npml, na, da, dcdp, adcigs, s_P, s_Q, s_u0, s_w0, g_P, g_Q, g_u0, g_w0); }//it loop end }//is loop end hipLaunchKernelGGL(( migration_illum), dim3((nx*nz+511)/512), dim3(512), 0, 0, nx, nz, npml, migration, illumination); hipLaunchKernelGGL(( adcigs_illum), dim3((nxa*nz*na+511)/512), dim3(512), 0, 0, nx, nz, na, da, dcdp, adcigs, illumination); hipMemcpy(e, migration, nz*nx*sizeof(float), hipMemcpyDeviceToHost); laplace_filter(1,nz,nx,e,d); fwrite(d,sizeof(float),nx*nz,fpmig); hipMemcpy(e, illumination, nz*nx*sizeof(float), hipMemcpyDeviceToHost); fwrite(e,sizeof(float),nx*nz,fpillum); hipMemcpy(Atemp, adcigs, nz*nxa*na*sizeof(float), hipMemcpyDeviceToHost); fwrite(Atemp,sizeof(float),nz*nxa*na,fpadcigs); end = clock(); /*********IS Loop end*********/ printf("\n--- Complete!!!!!!!!! \n"); printf("total %d shots: %f (min)\n", ns, ((float)(end-start))/60.0/CLOCKS_PER_SEC); /***********close************/ fclose(fpsnap); fclose(fpobs); fclose(fpmig); fclose(fpillum); fclose(fpadcigs); /***********free*************/ hipFree(coffx1); hipFree(coffx2); hipFree(coffz1); hipFree(coffz2); hipFree(acoffx1); hipFree(acoffx2); hipFree(acoffz1); hipFree(acoffz2); hipFree(s_u0); hipFree(s_u1); hipFree(s_w0); hipFree(s_w1); hipFree(s_P); hipFree(s_Q); hipFree(s_px0); hipFree(s_px1); hipFree(s_pz0); hipFree(s_pz1); hipFree(s_qx0); hipFree(s_qx1); hipFree(s_qz0); hipFree(s_qz1); hipFree(g_u0); hipFree(g_u1); hipFree(g_w0); hipFree(g_w1); hipFree(g_P); hipFree(g_Q); hipFree(g_px0); hipFree(g_px1); hipFree(g_pz0); hipFree(g_pz1); hipFree(g_qx0); hipFree(g_qx1); hipFree(g_qz0); hipFree(g_qz1); hipFree(shot_Dev); hipFree(P_bndr); hipFree(Q_bndr); hipFree(migration); hipFree(illumination); hipFree(adcigs); /***************host free*****************/ free(v); free(e); free(d); free(shot_Hos); free(Atemp); }
09a1cc9b8747498a35d14bdcb6c3f70399b3ae04.cu
//a######################################################### //a## 2D Acoustic VTI Medium RTM //a## Ps : P + sv wave and get rid of sv //a## GPU(CUDA) ,poynting adcigs //a## //a##/*a*************************** //a##Function for VTI medium modeling,2017.2.13 //a## //a## Ps: the function of modeling following: //a## //a## du/dt=1/rho*dp/dx , //a## dw/dt=1/rho*dq/dz , //a## dp/dt=rho*vpx^2*du/dx+rho*vp*vpn*dw/dz , //a## dq/dt=rho*vp*vpn*du/dx+rho*vp^2*dw/dz , //a## vpx^2=vp^2*(1+2*epsilon); //a## vpn^2=vp^2*(1+2*delta); //a##*********a*******************/ //a## //a## Rong Tao //a## 2017.2.15 //a######################################################### #include<stdio.h> #include<malloc.h> #include<math.h> #include<stdlib.h> #include <string.h> #include <cuda_runtime.h> #define pi 3.141592653 #define mm 4 //__constant__ float c[mm]={1.125,-0.04166667};/*mm==2*/ //__constant__ float c[mm]={1.1718750,-0.065104167,0.0046875};/*mm==3*/ __constant__ float c[mm]={1.196289,-0.0797526,0.009570313,-0.0006975447};/*mm==4*/ //__constant__ float c[mm]={1.211243,-0.08972168,0.01384277,-0.00176566,0.0001186795};/*mm==5*/ __device__ float d0; //a################################################################################ void check_gpu_error (const char *msg) /*< check GPU errors >*/ { cudaError_t err = cudaGetLastError (); if (cudaSuccess != err) { printf("Cuda error: %s: %s\n", msg, cudaGetErrorString (err)); exit(0); } } /*************func**************/ void laplace_filter(int adj, int nz, int nx, float *in, float *out) /*< linear operator, come from Madagascar Mlaplac2>*/ { int iz,ix,j; for (j=0;j<nx*nz;j++) out[j]=0.0; for (ix=0; ix < nx; ix++) { for (iz=0; iz < nz; iz++) { j = iz+ix*nz; if (iz > 0) { if (adj) { out[j-1] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j-1]; } } if (iz < nz-1) { if (adj) { out[j+1] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j+1]; } } if (ix > 0) { if (adj) { out[j-nz] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j-nz]; } } if (ix < nx-1) { if (adj) { out[j+nz] -= in[j]; out[j] += in[j]; } else { out[j] += in[j] - in[j+nz]; } } } } } /*************func**************/ __global__ void add_source(float pfac,float xsn,float zsn,int nx,int nz,int nnx,int nnz,float dt,float t, float favg,int wtype,int npml,int is,int ds,float *P,float *Q) /*< generate ricker wavelet with time deley >*/ { int ixs,izs; float x_,xx_,tdelay,ts,source=0.0,fs; tdelay=1.0/favg; ts=t-tdelay; fs=xsn+(is-1)*ds; if(wtype==1)//ricker wavelet { x_=favg*ts; xx_=x_*x_; source=(1-2*pi*pi*(xx_))*exp(-(pi*pi*xx_)); }else if(wtype==2){//derivative of gaussian x_=(-4)*favg*favg*pi*pi/log(0.1); source=(-2)*pi*pi*ts*exp(-x_*ts*ts); }else if(wtype==3){//derivative of gaussian x_=(-1)*favg*favg*pi*pi/log(0.1); source=exp(-x_*ts*ts); } if(t<=2*tdelay) { ixs = (int)(fs+0.5)+npml-1; izs = (int)(zsn+0.5)+npml-1; P[ixs*nnz+izs]+=pfac*source; Q[ixs*nnz+izs]+=pfac*source; } } /*******************func*********************/ __global__ void update_vel(int nx,int nz,int nnx,int nnz,int npml,float dt,float dx,float dz, float *u0,float *w0,float *u1,float *w1,float *P,float *Q, float *coffx1,float *coffx2,float *coffz1,float *coffz2) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix,iz,im; float dtx,dtz,xx,zz; ix=id/nnz; iz=id%nnz; dtx=dt/dx; dtz=dt/dz; if(id>=mm&&id<nnx*nnz-mm) { if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm)) { xx=0.0; zz=0.0; for(im=0;im<mm;im++) { xx+=c[im]*(P[id+(im+1)*nnz]-P[id-im*nnz]); zz+=c[im]*(Q[id+im+1] -Q[id-im]); } u1[id]=coffx2[ix]*u0[id]-coffx1[ix]*dtx*xx; w1[id]=coffz2[iz]*w0[id]-coffz1[iz]*dtz*zz; } } } /*******************func***********************/ __global__ void update_stress(int nx,int nz,int nnx,int nnz,float dt,float dx,float dz, float *u1,float *w1,float *P,float *Q,float *vp,int npml, float *px1,float *px0,float *pz1,float *pz0,float *qx1,float *qx0,float *qz1,float *qz0, float *acoffx1,float *acoffx2,float *acoffz1,float *acoffz2, float *delta,float *epsilon,int fs,int ds,int zs,int is,bool SV) { int id=threadIdx.x+blockDim.x*blockIdx.x; int im,ix,iz,rx,rz,R=15,r=5; float dtx,dtz, xx,zz,ee,dd; ix=id/nnz; iz=id%nnz; dtx=dt/dx; dtz=dt/dz; if(id>=mm&&id<nnx*nnz-mm) { /************************i****************************************/ /************************iso circle start*************************/ rx=ix-(fs+(is-1)*ds+npml); rz=iz-(zs+npml); if(SV){ if((rx*rx+rz*rz)<=R*R){ if((rx*rx+rz*rz)<=r*r){ ee = 0.0; dd = 0.0; }else{ ee = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*epsilon[id]; dd = 0.5*(1-cos(pi*((sqrtf(rx*rx+rz*rz)-r)*4.0/(R*3.0-1))))*delta[id]; } }else{ ee=epsilon[id]; dd=delta[id]; } }else{ ee=epsilon[id]; dd=delta[id]; } /************************ iso circle end *************************/ /************************i****************************************/ if(ix>=mm&&ix<(nnx-mm)&&iz>=mm&&iz<(nnz-mm)) { xx=0.0; zz=0.0; for(im=0;im<mm;im++) { xx+=c[im]*(u1[id+im*nnz]-u1[id-(im+1)*nnz]); zz+=c[im]*(w1[id+im] -w1[id-im-1]); } px1[id]=acoffx2[ix]*px0[id]-acoffx1[ix]*vp[id]*vp[id]*(1+2*ee)*dtx*xx; pz1[id]=acoffz2[iz]*pz0[id]-acoffz1[iz]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtz*zz; qx1[id]=acoffx2[ix]*qx0[id]-acoffx1[ix]*vp[id]*vp[id]*sqrtf(1+2*dd)*dtx*xx; qz1[id]=acoffz2[iz]*qz0[id]-acoffz1[iz]*vp[id]*vp[id]*dtz*zz; P[id]=px1[id]+pz1[id]; Q[id]=qx1[id]+qz1[id]; } } } /********************func**********************/ __global__ void get_d0(float dx,float dz,int nnx,int nnz,int npml,float *vp) { d0=10.0*vp[nnx*nnz/2]*log(100000.0)/(2.0*npml*((dx+dz)/2.0)); } /*************func*******************/ void pad_vv(int nx,int nz,int nnx,int nnz,int npml,float *ee) { int ix,iz,id; for(id=0;id<nnx*nnz;id++) { ix=id/nnz; iz=id%nnz; if(ix<npml){ ee[id]=ee[npml*nnz+iz]; //left }else if(ix>=nnx-npml){ ee[id]=ee[(nnx-npml-1)*nnz+iz];//right } } for(id=0;id<nnx*nnz;id++) { ix=id/nnz; iz=id%nnz; if(iz<npml){ ee[id]=ee[ix*nnz+npml];//up }else if(iz>=nnz-npml){ ee[id]=ee[ix*nnz+nnz-npml-1];//down } } } /*************func*******************/ void read_file(char FN1[],char FN2[],char FN3[],int nx,int nz,int nnx,int nnz,float dx,float dz,float favg,float dt, float *v,float *e,float *d,int npml) { int i,j,id; float vmax, vmin, H_min, dt_max, dxz_max, C, tmp; FILE *fp1,*fp2,*fp3; if((fp1=fopen(FN1,"rb"))==NULL){printf("error open <%s>!\n",FN1);exit(0);} if((fp2=fopen(FN2,"rb"))==NULL){printf("error open <%s>!\n",FN2);exit(0);} if((fp3=fopen(FN3,"rb"))==NULL){printf("error open <%s>!\n",FN3);exit(0);} vmin= 999999.9; vmax=-999999.9; for(i=npml;i<nx+npml;i++) { for(j=npml;j<nz+npml;j++) { id=i*nnz+j; fread(&v[id],4L,1,fp1); fread(&e[id],4L,1,fp2); fread(&d[id],4L,1,fp3); if(vmax<v[id]) vmax = v[id]; if(vmin>v[id]) vmin = v[id]; } } fclose(fp1); fclose(fp2); fclose(fp3); printf("------------------------------------\n---\n"); printf("--- Vmax=%.2f, Vmin=%.2f\n",vmax,vmin); /*********boundary*********/ pad_vv(nx,nz,nnx,nnz,npml,e); pad_vv(nx,nz,nnx,nnz,npml,d); pad_vv(nx,nz,nnx,nnz,npml,v); H_min=dx<dz?dx:dz; dt_max = 0.5*H_min/vmin; dxz_max = vmax/favg*0.2; if(dxz_max<dz||dxz_max<dx){printf("--- You need have to redefine DX and DZ ! \n");exit(0);} if(dt_max<dt){printf("--- You need have to redefine DT ! \n");exit(0);} if ( favg >= vmin/( 5.0*(dx>dz?dx:dz) ) || favg >= vmin/( 5.0*(dx>dz?dx:dz) ) ) {printf("--- Non-dispersion relation not satisfied! \n");exit(0);} else if ( mm == 2 ) C = 0.857; else if ( mm == 3 ) C = 0.8; else if ( mm == 4 ) C = 0.777; else if ( mm == 5 ) C = 0.759; tmp = dt*vmax*sqrtf( 1.0/(dx*dx)+1.0/(dz*dz) ); if ( tmp >= C){ printf("--- Stability condition not satisfied! tmp = %f, C = %f\n",tmp,C);exit(0);} } /*************func*******************/ __global__ void initial_coffe(float dt,int nn,float *coff1,float *coff2,float *acoff1,float *acoff2,int npml) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<nn+2*npml) { if(id<npml) { coff1[id]=1.0/(1.0+(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0); coff2[id]=coff1[id]*(1.0-(dt*d0*pow((npml-0.5-id)/npml,2.0))/2.0); acoff1[id]=1.0/(1.0+(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0); acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((npml-id)*1.0)/npml,2.0))/2.0); }else if(id>=npml&&id<npml+nn){ coff1[id]=1.0; coff2[id]=1.0; acoff1[id]=1.0; acoff2[id]=1.0; }else{ coff1[id]=1.0/(1.0+(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0); coff2[id]=coff1[id]*(1.0-(dt*d0*pow((0.5+id-nn-npml)/npml,2.0))/2.0); acoff1[id]=1.0/(1.0+(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0); acoff2[id]=acoff1[id]*(1.0-(dt*d0*pow(((id-nn-npml)*1.0)/npml,2.0))/2.0); } } } /*************func*******************/ __global__ void shot_record(int nnx, int nnz, int nx, int nz, int npml, int it, int nt, float *P, float *shot, bool flag) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<nx) { if(flag){ shot[it+nt*id]=P[npml+nnz*(id+npml)]; }else{ P[npml+nnz*(id+npml)]=shot[it+nt*id]; } } } /*************func*******************/ __global__ void wavefield_bndr(int nnx, int nnz, int nx, int nz, int npml, int it, int nt, float *P, float *Q, float *P_bndr, float *Q_bndr, bool flag) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<2*nx+2*nz) { if(flag)/////////////////////////////////save boundary { if(id<nx){//up P_bndr[it*(2*nx+2*nz)+id]=P[npml-1+nnz*(id+npml)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[npml-1+nnz*(id+npml)]; }else if(id>=nx&&id<(2*nx)){//down P_bndr[it*(2*nx+2*nz)+id]=P[npml+nz+1+nnz*(id-nx+npml)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[npml+nz+1+nnz*(id-nx+npml)]; }else if(id>=(2*nx)&&id<(2*nx+nz)){//left P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx+npml+nnz*(npml-1)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx+npml+nnz*(npml-1)]; }else if(id>=(2*nx+nz)){//right P_bndr[it*(2*nx+2*nz)+id]=P[id-2*nx-nz+npml+nnz*(npml+nx+1)]; Q_bndr[it*(2*nx+2*nz)+id]=Q[id-2*nx-nz+npml+nnz*(npml+nx+1)]; } }else{/////////////////////////////add boundary if(id<nx){//up P[npml-1+nnz*(id+npml)]=P_bndr[it*(2*nx+2*nz)+id]; Q[npml-1+nnz*(id+npml)]=Q_bndr[it*(2*nx+2*nz)+id]; }else if(id>=nx&&id<(2*nx)){//down P[npml+nz+1+nnz*(id-nx+npml)]=P_bndr[it*(2*nx+2*nz)+id]; Q[npml+nz+1+nnz*(id-nx+npml)]=Q_bndr[it*(2*nx+2*nz)+id]; }else if(id>=(2*nx)&&id<(2*nx+nz)){//left P[id-2*nx+npml+nnz*(npml-1)]=P_bndr[it*(2*nx+2*nz)+id]; Q[id-2*nx+npml+nnz*(npml-1)]=Q_bndr[it*(2*nx+2*nz)+id]; }else if(id>=(2*nx+nz)){//right P[id-2*nx-nz+npml+nnz*(npml+nx+1)]=P_bndr[it*(2*nx+2*nz)+id]; Q[id-2*nx-nz+npml+nnz*(npml+nx+1)]=Q_bndr[it*(2*nx+2*nz)+id]; } } } } /*************func**************/ __global__ void mute_directwave(int nx,int nt,float dt,float favg, float dx,float dz,int fs,int ds,int zs,int is, float *vp,float *epsilon,float *shot,int tt) { int id=threadIdx.x+blockDim.x*blockIdx.x; int mu_t,mu_nt; float mu_x,mu_z,mu_t0; int ix=id/nt; int it=id%nt; if(id<nx*nt) { mu_x=dx*abs(ix-fs-(is-1)*ds); mu_z=dz*zs; mu_t0=sqrtf(pow(mu_x,2)+pow(mu_z,2))/(vp[1]*sqrtf(1+2*epsilon[1])); mu_t=(int)(2.0/(dt*favg)); mu_nt=(int)(mu_t0/dt)+mu_t+tt; if((it>(int)(mu_t0/dt)-tt)&&(it<mu_nt)) shot[id]=0.0; } } /*************func**************/ __global__ void cal_illumination(int nnx, int nnz, int nz, int npml, float *illumination, float *P, float *Q) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/nz; int iz=id%nz; if(id<nnx*nnz) { illumination[id]+=P[iz+npml+nnz*(ix+npml)]*P[iz+npml+nnz*(ix+npml)] +Q[iz+npml+nnz*(ix+npml)]*Q[iz+npml+nnz*(ix+npml)]; if(illumination[id]==0)illumination[id]=1.0; } } /*************func**************/ __global__ void cal_migration(int nnx, int nnz, int nz, int npml, float *migration, float *s, float *g) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/nz; int iz=id%nz; if(id<nnx*nnz) { migration[id]+=s[iz+npml+nnz*(ix+npml)]*g[iz+npml+nnz*(ix+npml)]; } } /*************func**************/ __global__ void migration_illum(int nx, int nz, int npml, float *migration, float *illumination) { int id=threadIdx.x+blockDim.x*blockIdx.x; if(id<nx*nz) { migration[id]/=illumination[id];//*illumination[id]; } } /*************func**************/ __global__ void Poynting_Adcigs(int nnz, int nx, int nz, int npml, int na, int da, int dcdp, float *adcigs, float *s_P, float *s_Q, float *s_u, float *s_w, float *g_P, float *g_Q, float *g_u, float *g_w) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/nz*dcdp; int iz=id%nz; int ia=0; float Ssx=-s_P[iz+npml+nnz*(ix+npml)]*s_u[iz+npml+nnz*(ix+npml)]; float Ssz=-s_Q[iz+npml+nnz*(ix+npml)]*s_w[iz+npml+nnz*(ix+npml)]; float Sgx= g_P[iz+npml+nnz*(ix+npml)]*g_u[iz+npml+nnz*(ix+npml)]; float Sgz= g_Q[iz+npml+nnz*(ix+npml)]*g_w[iz+npml+nnz*(ix+npml)]; float b1= Ssx*Ssx + Ssz*Ssz; float b2= Sgx*Sgx + Sgz*Sgz; float a=(Ssx*Sgx + Ssz*Sgz)/(sqrtf(b1*b2)*(1 - 0.1)); if(id<nx/dcdp*nz) { if(a>=-1&&a<=1) { a=0.5*acosf(a)*180.0/pi; ia=(int)(a/(da*1.0)); if(ia<na) { adcigs[iz+nz*ia+nz*na*(id/nz)] += s_P[iz+npml+nnz*(ix+npml)]*g_P[iz+npml+nnz*(ix+npml)] *cosf(ia*pi/180.0)*cosf(ia*pi/180.0)*cosf(ia*pi/180.0); } } } } /*************func**************/ __global__ void adcigs_illum(int nx, int nz, int na, int da, int dcdp, float *adcigs, float *illumination) { int id=threadIdx.x+blockDim.x*blockIdx.x; int ix=id/(nz*na)*dcdp; int iz=id%nz; if(id<nx*nz/dcdp*na) { adcigs[id]/=illumination[iz+nz*ix];//*illumination[iz+nz*ix]; } } //a######################################################################## //a## Main Function ## //a######################################################################## int main(int argc,char *argv[]) { int is, it, nx, nz, nnx, nnz, nt, wtype, na, da, dcdp, nxa; int ns, ds, fs, zs, npml; float dx, dz, dt, t, pfac, favg; float *coffx1,*coffx2,*coffz1,*coffz2,*acoffx1,*acoffx2,*acoffz1,*acoffz2; float *v, *e, *d; float *vp, *epsilon, *delta; float *s_u0, *s_u1, *s_px0, *s_qx0, *s_px1, *s_qx1; float *s_w0, *s_w1, *s_pz0, *s_qz0, *s_pz1, *s_qz1; float *g_u0, *g_u1, *g_px0, *g_qx0, *g_px1, *g_qx1; float *g_w0, *g_w1, *g_pz0, *g_qz0, *g_pz1, *g_qz1; float *s_P, *s_Q, *g_P, *g_Q, *shot_Dev, *shot_Hos, *P_bndr, *Q_bndr; float *migration, *illumination, *adcigs; float *Atemp; bool read; clock_t start, end; /*************wavelet\boundary**************/ wtype=1;npml=20; /********** dat document ***********/ char FN1[250]={"layer_vel_601_301.dat"}; char FN2[250]={"layer_epsilon_601_301.dat"}; char FN3[250]={"layer_delta_601_301.dat"}; char FN4[250]={"layer_shot_obs.dat"}; char FN5[250]={"layer_shot_cal.dat"}; char FN6[250]={"layer_snap.dat"}; char FN7[250]={"layer_migration.dat"}; char FN8[250]={"layer_illumination.dat"}; char FN9[250]={"layer_adcigs.dat"}; /********* parameters *************/ read=true;/* true: read shot; flase: use right shot record */ /********* parameters *************/ nx=601; nz=301; favg=30; pfac=10.0; dx=5.0; dz=5.0; nt=3001; dt=0.0005; ns=100; fs=nx/ns/2; ds=nx/ns; zs=1; na=65; da=1; dcdp=1; /********aaa************/ FILE *fpsnap, *fpobs, *fpcal, *fpmig, *fpillum, *fpadcigs; if((fpobs=fopen(FN4,"rb"))==NULL){printf("error open <%s>!\n",FN4);exit(0);} fpcal=fopen(FN5,"wb"); fpsnap=fopen(FN6,"wb"); fpmig=fopen(FN7,"wb"); fpillum=fopen(FN8,"wb"); fpadcigs=fopen(FN9,"wb"); /*************v***************/ nnx=nx+2*npml; nnz=nz+2*npml; nxa=(int)(nx/dcdp); /************a*************/ Atemp=(float*)malloc(nz*nx/dcdp*na*sizeof(float)); v=(float*)malloc(nnz*nnx*sizeof(float)); e=(float*)malloc(nnz*nnx*sizeof(float)); d=(float*)malloc(nnz*nnx*sizeof(float)); shot_Hos=(float*)malloc(nt*nx*sizeof(float)); read_file(FN1,FN2,FN3,nx,nz,nnx,nnz,dx,dz,favg,dt,v,e,d,npml); /****************************/ cudaSetDevice(0);// initialize device, default device=0; check_gpu_error("Failed to initialize device!"); /****************************/ cudaMalloc(&vp, nnz*nnx*sizeof(float)); cudaMalloc(&epsilon, nnz*nnx*sizeof(float)); cudaMalloc(&delta, nnz*nnx*sizeof(float)); cudaMemcpy(vp, v, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(epsilon, e, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(delta, d, nnz*nnx*sizeof(float), cudaMemcpyHostToDevice); /****************************/ cudaMalloc(&s_u0, nnz*nnx*sizeof(float)); cudaMalloc(&s_u1, nnz*nnx*sizeof(float)); cudaMalloc(&s_w0, nnz*nnx*sizeof(float)); cudaMalloc(&s_w1, nnz*nnx*sizeof(float)); cudaMalloc(&s_P, nnz*nnx*sizeof(float)); cudaMalloc(&s_Q, nnz*nnx*sizeof(float)); cudaMalloc(&s_px0, nnz*nnx*sizeof(float)); cudaMalloc(&s_px1, nnz*nnx*sizeof(float)); cudaMalloc(&s_pz0, nnz*nnx*sizeof(float)); cudaMalloc(&s_pz1, nnz*nnx*sizeof(float)); cudaMalloc(&s_qx0, nnz*nnx*sizeof(float)); cudaMalloc(&s_qx1, nnz*nnx*sizeof(float)); cudaMalloc(&s_qz0, nnz*nnx*sizeof(float)); cudaMalloc(&s_qz1, nnz*nnx*sizeof(float)); cudaMalloc(&g_u0, nnz*nnx*sizeof(float)); cudaMalloc(&g_u1, nnz*nnx*sizeof(float)); cudaMalloc(&g_w0, nnz*nnx*sizeof(float)); cudaMalloc(&g_w1, nnz*nnx*sizeof(float)); cudaMalloc(&g_P, nnz*nnx*sizeof(float)); cudaMalloc(&g_Q, nnz*nnx*sizeof(float)); cudaMalloc(&g_px0, nnz*nnx*sizeof(float)); cudaMalloc(&g_px1, nnz*nnx*sizeof(float)); cudaMalloc(&g_pz0, nnz*nnx*sizeof(float)); cudaMalloc(&g_pz1, nnz*nnx*sizeof(float)); cudaMalloc(&g_qx0, nnz*nnx*sizeof(float)); cudaMalloc(&g_qx1, nnz*nnx*sizeof(float)); cudaMalloc(&g_qz0, nnz*nnx*sizeof(float)); cudaMalloc(&g_qz1, nnz*nnx*sizeof(float)); cudaMalloc(&coffx1, nnx*sizeof(float)); cudaMalloc(&coffx2, nnx*sizeof(float)); cudaMalloc(&coffz1, nnz*sizeof(float)); cudaMalloc(&coffz2, nnz*sizeof(float)); cudaMalloc(&acoffx1, nnx*sizeof(float)); cudaMalloc(&acoffx2, nnx*sizeof(float)); cudaMalloc(&acoffz1, nnz*sizeof(float)); cudaMalloc(&acoffz2, nnz*sizeof(float)); cudaMalloc(&shot_Dev, nx*nt*sizeof(float)); cudaMalloc(&P_bndr, nt*(2*nx+2*nz)*sizeof(float)); cudaMalloc(&Q_bndr, nt*(2*nx+2*nz)*sizeof(float)); cudaMalloc(&migration, nz*nx*sizeof(float)); cudaMalloc(&illumination, nz*nx*sizeof(float)); cudaMalloc(&adcigs, nz*na*nxa*sizeof(float)); /******************************/ check_gpu_error("Failed to allocate memory for variables!"); get_d0<<<1, 1>>>(dx, dz, nnx, nnz, npml, vp); initial_coffe<<<(nnx+511)/512, 512>>>(dt,nx,coffx1,coffx2,acoffx1,acoffx2,npml); initial_coffe<<<(nnz+511)/512, 512>>>(dt,nz,coffz1,coffz2,acoffz1,acoffz2,npml); cudaMemset(migration, 0, nz*nx*sizeof(float)); cudaMemset(illumination, 0, nz*nx*sizeof(float)); cudaMemset(adcigs, 0, nz*na*nxa*sizeof(float)); printf("--------------------------------------------------------\n"); printf("---"); start = clock(); /**********IS Loop start*******/ for(is=1;is<=ns;is++) { printf("\n--- IS=%3d ",is); cudaMemset(s_u0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_u1, 0, nnz*nnx*sizeof(float)); cudaMemset(s_w0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_w1, 0, nnz*nnx*sizeof(float)); cudaMemset(s_P, 0, nnz*nnx*sizeof(float)); cudaMemset(s_Q, 0, nnz*nnx*sizeof(float)); cudaMemset(s_px0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_px1, 0, nnz*nnx*sizeof(float)); cudaMemset(s_pz0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_pz1, 0, nnz*nnx*sizeof(float)); cudaMemset(s_qx0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_qx1, 0, nnz*nnx*sizeof(float)); cudaMemset(s_qz0, 0, nnz*nnx*sizeof(float)); cudaMemset(s_qz1, 0, nnz*nnx*sizeof(float)); cudaMemset(g_u0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_u1, 0, nnz*nnx*sizeof(float)); cudaMemset(g_w0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_w1, 0, nnz*nnx*sizeof(float)); cudaMemset(g_P, 0, nnz*nnx*sizeof(float)); cudaMemset(g_Q, 0, nnz*nnx*sizeof(float)); cudaMemset(g_px0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_px1, 0, nnz*nnx*sizeof(float)); cudaMemset(g_pz0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_pz1, 0, nnz*nnx*sizeof(float)); cudaMemset(g_qx0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_qx1, 0, nnz*nnx*sizeof(float)); cudaMemset(g_qz0, 0, nnz*nnx*sizeof(float)); cudaMemset(g_qz1, 0, nnz*nnx*sizeof(float)); cudaMemset(shot_Dev, 0, nt*nx*sizeof(float)); cudaMemset(P_bndr, 0, nt*(2*nx+2*nz)*sizeof(float)); cudaMemset(Q_bndr, 0, nt*(2*nx+2*nz)*sizeof(float)); /*a***********************************Forward*******************************************/ for(it=0,t=dt;it<nt;it++,t+=dt) { //if(it==0)printf(" > F >",is,it); /*a#####################a*/ /*a## Forward ##a*/ /*a#####################a*/ add_source<<<1,1>>>(pfac,fs,zs,nx,nz,nnx,nnz,dt,t,favg,wtype,npml,is,ds,s_P,s_Q); update_vel<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,npml,dt,dx,dz, s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2); update_stress<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml, s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0, acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,true); s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1; shot_record<<<(nx+511)/512, 512>>>(nnx, nnz, nx, nz, npml, it, nt, s_P, shot_Dev, true); wavefield_bndr<<<((2*nx+2*nz)+511)/512,512>>>(nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, true); cal_illumination<<<(nx*nz+511)/512, 512>>>(nnx, nnz, nz, npml, illumination, s_P, s_Q); if((is==1)&&(it%300==0)) { cudaMemcpy(e, s_P, nnz*nnx*sizeof(float), cudaMemcpyDeviceToHost); fwrite(e,4L,nnx*nnz,fpsnap); } }//it loop end mute_directwave<<<(nx*nt+511)/512, 512>>>(nx,nt,dt,favg,dx,dz,fs,ds,zs,is,vp,epsilon,shot_Dev,20); cudaMemcpy(shot_Hos, shot_Dev, nt*nx*sizeof(float), cudaMemcpyDeviceToHost); fseek(fpcal,(is-1)*nt*nx*sizeof(float),0); fwrite(shot_Hos,sizeof(float),nt*nx,fpcal); if(read){ fseek(fpobs,(is-1)*nt*nx*sizeof(float),0); fread(shot_Hos,sizeof(float),nt*nx,fpobs); cudaMemcpy(shot_Dev, shot_Hos, nt*nx*sizeof(float), cudaMemcpyHostToDevice); } /*a***********************************Backward*******************************************/ for(it=nt-1;it>=0;it--) { // if(it==0)printf(" B ",is,it); /*a#####################a*/ /*a## Reconstruction ##a*/ /*a#####################a*/ wavefield_bndr<<<((2*nx+2*nz)+511)/512,512>>>(nnx, nnz, nx, nz, npml, it, nt, s_P, s_Q, P_bndr, Q_bndr, false); update_vel<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,npml,dt,dx,dz, s_u0,s_w0,s_u1,s_w1,s_P,s_Q,coffx1,coffx2,coffz1,coffz2); update_stress<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,dt,dx,dz,s_u1,s_w1,s_P,s_Q,vp,npml, s_px1,s_px0,s_pz1,s_pz0,s_qx1,s_qx0,s_qz1,s_qz0, acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false); s_u0=s_u1; s_w0=s_w1; s_px0=s_px1; s_pz0=s_pz1; s_qx0=s_qx1; s_qz0=s_qz1; /* if((is==1)&&(it%300==0)) { cudaMemcpy(e, s_P, nnz*nnx*sizeof(float), cudaMemcpyDeviceToHost); fwrite(e,4L,nnx*nnz,fpsnap); }*/ /*a#####################a*/ /*a## Backward ##a*/ /*a#####################a*/ shot_record<<<(nx+511)/512, 512>>>(nnx, nnz, nx, nz, npml, it, nt, g_P, shot_Dev, false); shot_record<<<(nx+511)/512, 512>>>(nnx, nnz, nx, nz, npml, it, nt, g_Q, shot_Dev, false); update_vel<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,npml,dt,dx,dz, g_u0,g_w0,g_u1,g_w1,g_P,g_Q,coffx1,coffx2,coffz1,coffz2); update_stress<<<(nnx*nnz+511)/512, 512>>>(nx,nz,nnx,nnz,dt,dx,dz,g_u1,g_w1,g_P,g_Q,vp,npml, g_px1,g_px0,g_pz1,g_pz0,g_qx1,g_qx0,g_qz1,g_qz0, acoffx1,acoffx2,acoffz1,acoffz2,delta,epsilon,fs,ds,zs,is,false); g_u0=g_u1; g_w0=g_w1; g_px0=g_px1; g_pz0=g_pz1; g_qx0=g_qx1; g_qz0=g_qz1; /* if((is==1)&&(it%300==0)) { cudaMemcpy(e, g_P, nnz*nnx*sizeof(float), cudaMemcpyDeviceToHost); fwrite(e,4L,nnx*nnz,fpsnap); }*/ cal_migration<<<(nx*nz+511)/512, 512>>>(nnx, nnz, nz, npml, migration, s_P, g_P); Poynting_Adcigs<<<(nxa*nz+511)/512, 512>>>(nnz, nx, nz, npml, na, da, dcdp, adcigs, s_P, s_Q, s_u0, s_w0, g_P, g_Q, g_u0, g_w0); }//it loop end }//is loop end migration_illum<<<(nx*nz+511)/512, 512>>>(nx, nz, npml, migration, illumination); adcigs_illum<<<(nxa*nz*na+511)/512, 512>>>(nx, nz, na, da, dcdp, adcigs, illumination); cudaMemcpy(e, migration, nz*nx*sizeof(float), cudaMemcpyDeviceToHost); laplace_filter(1,nz,nx,e,d); fwrite(d,sizeof(float),nx*nz,fpmig); cudaMemcpy(e, illumination, nz*nx*sizeof(float), cudaMemcpyDeviceToHost); fwrite(e,sizeof(float),nx*nz,fpillum); cudaMemcpy(Atemp, adcigs, nz*nxa*na*sizeof(float), cudaMemcpyDeviceToHost); fwrite(Atemp,sizeof(float),nz*nxa*na,fpadcigs); end = clock(); /*********IS Loop end*********/ printf("\n--- Complete!!!!!!!!! \n"); printf("total %d shots: %f (min)\n", ns, ((float)(end-start))/60.0/CLOCKS_PER_SEC); /***********close************/ fclose(fpsnap); fclose(fpobs); fclose(fpmig); fclose(fpillum); fclose(fpadcigs); /***********free*************/ cudaFree(coffx1); cudaFree(coffx2); cudaFree(coffz1); cudaFree(coffz2); cudaFree(acoffx1); cudaFree(acoffx2); cudaFree(acoffz1); cudaFree(acoffz2); cudaFree(s_u0); cudaFree(s_u1); cudaFree(s_w0); cudaFree(s_w1); cudaFree(s_P); cudaFree(s_Q); cudaFree(s_px0); cudaFree(s_px1); cudaFree(s_pz0); cudaFree(s_pz1); cudaFree(s_qx0); cudaFree(s_qx1); cudaFree(s_qz0); cudaFree(s_qz1); cudaFree(g_u0); cudaFree(g_u1); cudaFree(g_w0); cudaFree(g_w1); cudaFree(g_P); cudaFree(g_Q); cudaFree(g_px0); cudaFree(g_px1); cudaFree(g_pz0); cudaFree(g_pz1); cudaFree(g_qx0); cudaFree(g_qx1); cudaFree(g_qz0); cudaFree(g_qz1); cudaFree(shot_Dev); cudaFree(P_bndr); cudaFree(Q_bndr); cudaFree(migration); cudaFree(illumination); cudaFree(adcigs); /***************host free*****************/ free(v); free(e); free(d); free(shot_Hos); free(Atemp); }
f55ec4bbdbdaff25528b67f6296e048b24525791.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lbm_gpu.h" #define Qcc (19) void LBMGrid::initialize( real DENSITY, real LID_VELOCITY ) { // distribution functions real* _f = new real[nx* ny *nz * Qcc]; real* _feq = new real[nx * ny * nz * Qcc]; real* _f_new = new real[nx * ny * nz * Qcc]; // density and velocity real* _rho = new real[nx * ny * nz]; real* _ux = new real[nx * ny * nz]; real* _uy = new real[nx * ny * nz]; real* _uz = new real[nx * ny * nz]; real* _ex = new real[Qcc]; real* _ey = new real[Qcc]; real* _ez = new real[Qcc]; real* _wt = new real[Qcc]; // rate-of-strain real* _sigma = new real[nx * ny * nz]; _ex[0] = 0.000000; _ey[0] = 0.000000; _ez[0] = 0.000000; _ex[1] = -1.000000; _ey[1] = 0.000000; _ez[1] = 0.000000; _ex[2] = 1.000000; _ey[2] = 0.000000; _ez[2] = 0.000000; _ex[3] = 0.000000; _ey[3] = -1.000000; _ez[3] = 0.000000; _ex[4] = 0.000000; _ey[4] = 1.000000; _ez[4] = 0.000000; _ex[5] = 0.000000; _ey[5] = 0.000000; _ez[5] = -1.000000; _ex[6] = 0.000000; _ey[6] = 0.000000; _ez[6] = 1.000000; _ex[7] = -1.000000; _ey[7] = -1.000000; _ez[7] = 0.000000; _ex[8] = -1.000000; _ey[8] = 1.000000; _ez[8] = 0.000000; _ex[9] = 1.000000; _ey[9] = -1.000000; _ez[9] = 0.000000; _ex[10] = 1.000000; _ey[10] = 1.000000; _ez[10] = 0.000000; _ex[11] = 0.000000; _ey[11] = -1.000000; _ez[11] = -1.000000; _ex[12] = 0.000000; _ey[12] = -1.000000; _ez[12] = 1.000000; _ex[13] = 0.000000; _ey[13] = 1.000000; _ez[13] = -1.000000; _ex[14] = 0.000000; _ey[14] = 1.000000; _ez[14] = 1.000000; _ex[15] = -1.000000; _ey[15] = 0.000000; _ez[15] = -1.000000; _ex[16] = -1.000000; _ey[16] = 0.000000; _ez[16] = 1.000000; _ex[17] = 1.000000; _ey[17] = 0.000000; _ez[17] = -1.000000; _ex[18] = 1.000000; _ey[18] = 0.000000; _ez[18] = 1.000000; _wt[0] = 0.333333; _wt[1] = 0.055556; _wt[2] = 0.055556; _wt[3] = 0.055556; _wt[4] = 0.055556; _wt[5] = 0.055556; _wt[6] = 0.055556; _wt[7] = 0.027778; _wt[8] = 0.027778; _wt[9] = 0.027778; _wt[10] = 0.027778; _wt[11] = 0.027778; _wt[12] = 0.027778; _wt[13] = 0.027778; _wt[14] = 0.027778; _wt[15] = 0.027778; _wt[16] = 0.027778; _wt[17] = 0.027778; _wt[18] = 0.027778; { // loop over all voxels for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { for (int k = 0; k < nz; k++) { // natural index for location (i,j) int index = i * ny * nz + j * nz + k; // column-ordering // initialize density and velocity fields inside the cavity _rho[index] = DENSITY; // density _ux[index] = 0.0; // x-component of velocity _uy[index] = 0.0; // y-component of velocity _uz[index] = 0.0; // z-component of velocity cpu_ux[index] = 0.0; // x-component of velocity cpu_uy[index] = 0.0; // y-component of velocity cpu_uz[index] = 0.0; // z-component of velocity _sigma[index] = 0.0; // rate-of-strain field // specify boundary condition for the moving lid //if (j == N - 1) ux[index] = LID_VELOCITY; if ((j == ny - 1)) { _ux[index] = LID_VELOCITY; } // assign initial values for distribution functions // along various aections using equilibriu, functions for (int a = 0; a < Qcc; a++) { int index_f = a + index * Qcc; real edotu = _ex[a] * _ux[index] + _ey[a] * _uy[index] + _ez[a] * _uz[index]; real udotu = _ux[index] * _ux[index] + _uy[index] * _uy[index] + _uz[index] * _uz[index]; _feq[index_f] = _rho[index] * _wt[a] * (1.0 + 3.0 * edotu + 4.5 * edotu * edotu - 1.5 * udotu); _f[index_f] = _feq[index_f]; _f_new[index_f] = _feq[index_f]; } } } } } hipMemcpy( f, _f, sizeof( real ) * nx * ny * nz * Qcc, hipMemcpyHostToDevice ); hipMemcpy( feq, _feq, sizeof( real ) * nx * ny * nz * Qcc, hipMemcpyHostToDevice ); hipMemcpy( f_new, _f_new, sizeof( real ) * nx * ny * nz * Qcc, hipMemcpyHostToDevice ); hipMemcpy( rho, _rho, sizeof( real ) * nx * ny * nz , hipMemcpyHostToDevice ); hipMemcpy( ux, _ux, sizeof( real ) * nx * ny * nz , hipMemcpyHostToDevice ); hipMemcpy( uy, _uy, sizeof( real ) * nx * ny * nz , hipMemcpyHostToDevice ); hipMemcpy( uz, _uz, sizeof( real ) * nx * ny * nz , hipMemcpyHostToDevice ); hipMemcpy( sigma, _sigma, sizeof( real ) * nx * ny * nz , hipMemcpyHostToDevice ); delete[] _f; delete[] _feq; delete[] _f_new; delete[] _rho; delete[] _ux; delete[] _uy; delete[] _uz; delete[] _sigma; } void LBMGrid::step( real DENSITY, real LID_VELOCITY, const real REYNOLDS_NUMBER, bool copy ) { this->collideAndStream( DENSITY, LID_VELOCITY, REYNOLDS_NUMBER ); this->macroVar( DENSITY, LID_VELOCITY, REYNOLDS_NUMBER,copy ); }
f55ec4bbdbdaff25528b67f6296e048b24525791.cu
#include "cuda_runtime.h" #include "lbm_gpu.h" #define Qcc (19) void LBMGrid::initialize( real DENSITY, real LID_VELOCITY ) { // distribution functions real* _f = new real[nx* ny *nz * Qcc]; real* _feq = new real[nx * ny * nz * Qcc]; real* _f_new = new real[nx * ny * nz * Qcc]; // density and velocity real* _rho = new real[nx * ny * nz]; real* _ux = new real[nx * ny * nz]; real* _uy = new real[nx * ny * nz]; real* _uz = new real[nx * ny * nz]; real* _ex = new real[Qcc]; real* _ey = new real[Qcc]; real* _ez = new real[Qcc]; real* _wt = new real[Qcc]; // rate-of-strain real* _sigma = new real[nx * ny * nz]; _ex[0] = 0.000000; _ey[0] = 0.000000; _ez[0] = 0.000000; _ex[1] = -1.000000; _ey[1] = 0.000000; _ez[1] = 0.000000; _ex[2] = 1.000000; _ey[2] = 0.000000; _ez[2] = 0.000000; _ex[3] = 0.000000; _ey[3] = -1.000000; _ez[3] = 0.000000; _ex[4] = 0.000000; _ey[4] = 1.000000; _ez[4] = 0.000000; _ex[5] = 0.000000; _ey[5] = 0.000000; _ez[5] = -1.000000; _ex[6] = 0.000000; _ey[6] = 0.000000; _ez[6] = 1.000000; _ex[7] = -1.000000; _ey[7] = -1.000000; _ez[7] = 0.000000; _ex[8] = -1.000000; _ey[8] = 1.000000; _ez[8] = 0.000000; _ex[9] = 1.000000; _ey[9] = -1.000000; _ez[9] = 0.000000; _ex[10] = 1.000000; _ey[10] = 1.000000; _ez[10] = 0.000000; _ex[11] = 0.000000; _ey[11] = -1.000000; _ez[11] = -1.000000; _ex[12] = 0.000000; _ey[12] = -1.000000; _ez[12] = 1.000000; _ex[13] = 0.000000; _ey[13] = 1.000000; _ez[13] = -1.000000; _ex[14] = 0.000000; _ey[14] = 1.000000; _ez[14] = 1.000000; _ex[15] = -1.000000; _ey[15] = 0.000000; _ez[15] = -1.000000; _ex[16] = -1.000000; _ey[16] = 0.000000; _ez[16] = 1.000000; _ex[17] = 1.000000; _ey[17] = 0.000000; _ez[17] = -1.000000; _ex[18] = 1.000000; _ey[18] = 0.000000; _ez[18] = 1.000000; _wt[0] = 0.333333; _wt[1] = 0.055556; _wt[2] = 0.055556; _wt[3] = 0.055556; _wt[4] = 0.055556; _wt[5] = 0.055556; _wt[6] = 0.055556; _wt[7] = 0.027778; _wt[8] = 0.027778; _wt[9] = 0.027778; _wt[10] = 0.027778; _wt[11] = 0.027778; _wt[12] = 0.027778; _wt[13] = 0.027778; _wt[14] = 0.027778; _wt[15] = 0.027778; _wt[16] = 0.027778; _wt[17] = 0.027778; _wt[18] = 0.027778; { // loop over all voxels for (int i = 0; i < nx; i++) { for (int j = 0; j < ny; j++) { for (int k = 0; k < nz; k++) { // natural index for location (i,j) int index = i * ny * nz + j * nz + k; // column-ordering // initialize density and velocity fields inside the cavity _rho[index] = DENSITY; // density _ux[index] = 0.0; // x-component of velocity _uy[index] = 0.0; // y-component of velocity _uz[index] = 0.0; // z-component of velocity cpu_ux[index] = 0.0; // x-component of velocity cpu_uy[index] = 0.0; // y-component of velocity cpu_uz[index] = 0.0; // z-component of velocity _sigma[index] = 0.0; // rate-of-strain field // specify boundary condition for the moving lid //if (j == N - 1) ux[index] = LID_VELOCITY; if ((j == ny - 1)) { _ux[index] = LID_VELOCITY; } // assign initial values for distribution functions // along various aections using equilibriu, functions for (int a = 0; a < Qcc; a++) { int index_f = a + index * Qcc; real edotu = _ex[a] * _ux[index] + _ey[a] * _uy[index] + _ez[a] * _uz[index]; real udotu = _ux[index] * _ux[index] + _uy[index] * _uy[index] + _uz[index] * _uz[index]; _feq[index_f] = _rho[index] * _wt[a] * (1.0 + 3.0 * edotu + 4.5 * edotu * edotu - 1.5 * udotu); _f[index_f] = _feq[index_f]; _f_new[index_f] = _feq[index_f]; } } } } } cudaMemcpy( f, _f, sizeof( real ) * nx * ny * nz * Qcc, cudaMemcpyHostToDevice ); cudaMemcpy( feq, _feq, sizeof( real ) * nx * ny * nz * Qcc, cudaMemcpyHostToDevice ); cudaMemcpy( f_new, _f_new, sizeof( real ) * nx * ny * nz * Qcc, cudaMemcpyHostToDevice ); cudaMemcpy( rho, _rho, sizeof( real ) * nx * ny * nz , cudaMemcpyHostToDevice ); cudaMemcpy( ux, _ux, sizeof( real ) * nx * ny * nz , cudaMemcpyHostToDevice ); cudaMemcpy( uy, _uy, sizeof( real ) * nx * ny * nz , cudaMemcpyHostToDevice ); cudaMemcpy( uz, _uz, sizeof( real ) * nx * ny * nz , cudaMemcpyHostToDevice ); cudaMemcpy( sigma, _sigma, sizeof( real ) * nx * ny * nz , cudaMemcpyHostToDevice ); delete[] _f; delete[] _feq; delete[] _f_new; delete[] _rho; delete[] _ux; delete[] _uy; delete[] _uz; delete[] _sigma; } void LBMGrid::step( real DENSITY, real LID_VELOCITY, const real REYNOLDS_NUMBER, bool copy ) { this->collideAndStream( DENSITY, LID_VELOCITY, REYNOLDS_NUMBER ); this->macroVar( DENSITY, LID_VELOCITY, REYNOLDS_NUMBER,copy ); }
603ff6e026c0b144e473c90b582cc6fda9d4f4e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //************************************************************** // Assignment ChristianDouglasA3 // Name: Chistian Douglas // GPU Programming Date: 10/14/2020 //******************************************************************* //Program finds the product of 2 arrays A and B, and assigns the //values to array C. Each value of array A is initialized by the //equation 2*i and each value of Array B is initialized by the //equation 2*i+1. All 3 arrays are set to the size of 10240 //elements by using the global constant N. The main function also //contains 2 constant integers blockTwo, which is assigned the //value of 2 and blockTen, which is assigned the value of 10, //these two constants are used to let the gpu know how many blocks //to use. A integer variable called size is also in main and is //equal to the value of N * the size of the data type float and is //used when allocating space on the cpu for the pointers a_d, b_d, //and c_d in the funtion allocArray using hipMalloc. The pointers //a_d, b_d, c_d point to float data types, after being allocated //to the size of size, the values of arrays A and B are then copied //to a_d and b_d using hipMemcpy. dimGrid's number of blocks is //defined by the variable block and dimBlock's number of threads //is set t0 1024, which is the max number of threads a block can //hold. The kernel of arrayProduct is then called, which then takes //the total number of threads multiplied by the total number of //blocks added to the thread ID's and performs the multiplication //of the elements assigned to pointers a_d and b_d, assigning them //to c_d's elements. The values in c_d are the copied to the elements //of array C and the the memory allocated to the pointers of a_d, //b_d, and c_d are freed using hipFree. The function printVal prints //the first and last value of array C. //******************************************************************* #include <stdio.h> const int N = 10240; //number of elements in arrays //******************************************************************* //Method Name: allocArray //Parameters: A, B, C, size, block //Purpose: Allocates size of size to the local pointers a_d, b_d, and //c_d and copies the values from the arrays A, B, and C using //cudaMemcyp. dimGrid size is defind by the integer block and //dimBlock is defined by the max number of threads, 1024, //arrayProduct is called, after c_d's values are copied to array C //and the pointers are freed using hipFree //******************************************************************* void allocArray(float *A, float *B, float *C, int size, int block) { float *a_d, *b_d, *c_d; hipMalloc((void**)a_d, size); hipMalloc((void**)b_d, size); hipMalloc((void**)c_d, size); hipMemcpy(a_d, A, size, hipMemcpyHostToDevice); hipMemcpy(b_d, B, size, hipMemcpyHostToDevice); dim3 dimGrid(block, 1); dim3 dimBlock(1024, 1); hipLaunchKernelGGL(( arrayProduct), dim3(dimGrid), dim3(dimBlock), 0, 0, a_d, b_d, c_d, size); hipMemcpy(C, c_d, size, hipMemcpyDeviceToHost); hipFree(a_d); hipFree(b_d); hipFree(c_d); } //******************************************************************* //Kernel Name: arrayProduct //Parameters: a_d, b_d, c_d, size //Purpose: Integer i is defined by the total number of threads //multiplied by the total number of block added to the threadID //number. The pointers then use the value of i to access the values //they contain and uses parallel programming to obtain all the //product values of pointers a_d and b_d and assigns them to c_d. //******************************************************************* __global__ void arrayProduct(float* a_d, float* b_d, float* c_d, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; a_d[i] * b_d[i] = c_d[i]; } //******************************************************************* //Method Name: printVals //Parameters: C //Purpose: Prints the first and last elements of array C //******************************************************************* void printVals(float* C) { printf("C[0] %s", C[0]); printf("%c",'\n'); printf(" C[10239] %s", C[N-1]); } int main() { const int TwoBlock = 2; //used to define 2 blocks const int TenBlock = 10; //used to define 10 blocks float A[N] = 0, B[N] = 0, C[N] = 0; //float arrays inialized to 0 int size = N * sizeof(float); for(int i = 0; i < N; i++) { A[i] = 2 * i; B[i] = 2 * i + 1; } allocArray(A, B, C, size, TwoBlock); //not cyclic 2 block printVals(C); for(int i = 0; i < N; i = (i + 1) * 2048) //cyclic 2 block { allocArray(&A[i], &B[i], &C[i], size/5, TwoBlock); } printVals(C); allocArray(A, B, C, size, TenBlock); //10 block printVals(C); return 0; }
603ff6e026c0b144e473c90b582cc6fda9d4f4e3.cu
//************************************************************** // Assignment ChristianDouglasA3 // Name: Chistian Douglas // GPU Programming Date: 10/14/2020 //******************************************************************* //Program finds the product of 2 arrays A and B, and assigns the //values to array C. Each value of array A is initialized by the //equation 2*i and each value of Array B is initialized by the //equation 2*i+1. All 3 arrays are set to the size of 10240 //elements by using the global constant N. The main function also //contains 2 constant integers blockTwo, which is assigned the //value of 2 and blockTen, which is assigned the value of 10, //these two constants are used to let the gpu know how many blocks //to use. A integer variable called size is also in main and is //equal to the value of N * the size of the data type float and is //used when allocating space on the cpu for the pointers a_d, b_d, //and c_d in the funtion allocArray using cudaMalloc. The pointers //a_d, b_d, c_d point to float data types, after being allocated //to the size of size, the values of arrays A and B are then copied //to a_d and b_d using cudaMemcpy. dimGrid's number of blocks is //defined by the variable block and dimBlock's number of threads //is set t0 1024, which is the max number of threads a block can //hold. The kernel of arrayProduct is then called, which then takes //the total number of threads multiplied by the total number of //blocks added to the thread ID's and performs the multiplication //of the elements assigned to pointers a_d and b_d, assigning them //to c_d's elements. The values in c_d are the copied to the elements //of array C and the the memory allocated to the pointers of a_d, //b_d, and c_d are freed using cudaFree. The function printVal prints //the first and last value of array C. //******************************************************************* #include <stdio.h> const int N = 10240; //number of elements in arrays //******************************************************************* //Method Name: allocArray //Parameters: A, B, C, size, block //Purpose: Allocates size of size to the local pointers a_d, b_d, and //c_d and copies the values from the arrays A, B, and C using //cudaMemcyp. dimGrid size is defind by the integer block and //dimBlock is defined by the max number of threads, 1024, //arrayProduct is called, after c_d's values are copied to array C //and the pointers are freed using cudaFree //******************************************************************* void allocArray(float *A, float *B, float *C, int size, int block) { float *a_d, *b_d, *c_d; cudaMalloc((void**)a_d, size); cudaMalloc((void**)b_d, size); cudaMalloc((void**)c_d, size); cudaMemcpy(a_d, A, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, B, size, cudaMemcpyHostToDevice); dim3 dimGrid(block, 1); dim3 dimBlock(1024, 1); arrayProduct<<<dimGrid, dimBlock>>>(a_d, b_d, c_d, size); cudaMemcpy(C, c_d, size, cudaMemcpyDeviceToHost); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } //******************************************************************* //Kernel Name: arrayProduct //Parameters: a_d, b_d, c_d, size //Purpose: Integer i is defined by the total number of threads //multiplied by the total number of block added to the threadID //number. The pointers then use the value of i to access the values //they contain and uses parallel programming to obtain all the //product values of pointers a_d and b_d and assigns them to c_d. //******************************************************************* __global__ void arrayProduct(float* a_d, float* b_d, float* c_d, int size) { int i = blockIdx.x * blockDim.x + threadIdx.x; a_d[i] * b_d[i] = c_d[i]; } //******************************************************************* //Method Name: printVals //Parameters: C //Purpose: Prints the first and last elements of array C //******************************************************************* void printVals(float* C) { printf("C[0] %s", C[0]); printf("%c",'\n'); printf(" C[10239] %s", C[N-1]); } int main() { const int TwoBlock = 2; //used to define 2 blocks const int TenBlock = 10; //used to define 10 blocks float A[N] = 0, B[N] = 0, C[N] = 0; //float arrays inialized to 0 int size = N * sizeof(float); for(int i = 0; i < N; i++) { A[i] = 2 * i; B[i] = 2 * i + 1; } allocArray(A, B, C, size, TwoBlock); //not cyclic 2 block printVals(C); for(int i = 0; i < N; i = (i + 1) * 2048) //cyclic 2 block { allocArray(&A[i], &B[i], &C[i], size/5, TwoBlock); } printVals(C); allocArray(A, B, C, size, TenBlock); //10 block printVals(C); return 0; }
db97c77480c4f3545971967440a4b891acecb558.hip
// !!! This is a file automatically generated by hipify!!! #include <kernels/gpu/div.h> #include <core/tensor_builder.h> #include <backend/name.h> #include <utils/assert.h> #include <global/operator_factory.h> #include <global/fp16_operator_factory.h> #include <core/device.h> #include <numeric> #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include "kernels/gpu/gpu_kernel.h" namespace ts { namespace gpu { template<typename T> static __global__ void reduce_operator_scalar_kernel(T* data, int size, const T *scalar, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { data[index] = (*scalar) == T(0) ? (data[index] > T(0) ? maxvalue : minvalue) : data[index] / (*scalar); } } template<typename T> static __global__ void reduce_operator_scalar_cross_kernel(T* data, int size, const T *scalar, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { data[index] = data[index] == T(0) ? ((*scalar) > T(0) ? maxvalue : minvalue) : (*scalar) / data[index]; } } template<typename T> static __global__ void reduce_operator_same_shape_kernel(T* data, const T*bias, int size, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { data[index] = (bias[index]) == T(0) ? (data[index] > T(0) ? maxvalue : minvalue) : data[index] / (bias[index]); } } template<typename T> static __global__ void reduce_operator_bias_kernel(T* data, int size, int step, int slice, const T* bias, int biaslen, T maxvalue, T minvalue ) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { int dim = index % ( step * slice ) / (step); data[index] = (bias[dim]) == T(0) ? (data[index] > T(0) ? maxvalue: minvalue) : data[index] / (bias[dim]); } } template<typename T> static __global__ void reduce_operator_bias_cross_kernel(T* data, int size, int step, int slice, const T* bias, int biaslen, T maxvalue, T minvalue ) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { int dim = index % (step * slice) / (step); data[index] = (data[index]) == T(0) ? (bias[dim] > T(0) ? maxvalue : minvalue) : bias[dim] / (data[index]); } } template<typename T> static __global__ void reduce_operator_kernel(T* out, int size, const T* lhs, const T* rhs, int *lhsshape, int *lhsweight, int *rhsshape, int *rhsweight, int *outweight, int shapelen, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; int *ptmp = outweight + 1; int ntmp = index; int rhsindex = 0; int lhsindex = 0; int nbuff1,nbuff2; nbuff1 = nbuff2 = 0; for(int m = 0, i= shapelen - 1; i >= 0; --i, m++) { if(i > 0) { nbuff1 = ntmp / *ptmp; ntmp %= *ptmp; }else { nbuff1 = ntmp; } nbuff2 = nbuff1 % lhsshape[m]; if(m < shapelen - 1) { lhsindex += nbuff2 * lhsweight[m+1]; }else { lhsindex += nbuff2; } nbuff2 = nbuff1 % rhsshape[m]; if(m < shapelen - 1) { rhsindex += nbuff2 * rhsweight[m+1]; }else { rhsindex += nbuff2; } ++ptmp; } out[index] = (rhs[rhsindex]) == T(0) ? (lhs[lhsindex] > T(0) ? maxvalue : minvalue) : lhs[lhsindex] / (rhs[rhsindex]); } #ifdef TS_USE_CUDA_FP16 template<> __global__ void reduce_operator_scalar_kernel<half>(half* data, int size, const half *scalar, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { data[index] = (*scalar) == zero ? (data[index] > zero ? maxvalue : minvalue) : data[index] / (*scalar); } } template<> __global__ void reduce_operator_scalar_cross_kernel<half>(half* data, int size, const half *scalar, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { data[index] = data[index] == zero ? ((*scalar) > zero ? maxvalue : minvalue) : (*scalar) / data[index]; } } template<> __global__ void reduce_operator_same_shape_kernel<half>(half* data, const half* bias, int size, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { data[index] = (bias[index]) == zero ? (data[index] > zero ? maxvalue : minvalue) : data[index] / (bias[index]); } } template<> __global__ void reduce_operator_bias_kernel<half>(half* data, int size, int step, int slice, const half* bias, int biaslen, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { int dim = index % (step * slice) / (step); data[index] = (bias[dim]) == zero ? (data[index] > zero ? maxvalue : minvalue) : data[index] / (bias[dim]); } } template<> __global__ void reduce_operator_bias_cross_kernel<half>(half* data, int size, int step, int slice, const half* bias, int biaslen, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { int dim = index % (step * slice) / (step); data[index] = (data[index]) == zero ? (bias[dim] > zero ? maxvalue : minvalue) : bias[dim] / (data[index]); } } template<> __global__ void reduce_operator_kernel<half>(half* out, int size, const half* lhs, const half* rhs, int *lhsshape, int *lhsweight, int *rhsshape, int *rhsweight, int *outweight, int shapelen, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; half zero = half(0.f); int *ptmp = outweight + 1; int ntmp = index; int rhsindex = 0; int lhsindex = 0; int nbuff1, nbuff2; nbuff1 = nbuff2 = 0; for (int m = 0, i = shapelen - 1; i >= 0; --i, m++) { if (i > 0) { nbuff1 = ntmp / *ptmp; ntmp %= *ptmp; } else { nbuff1 = ntmp; } nbuff2 = nbuff1 % lhsshape[m]; if (m < shapelen - 1) { lhsindex += nbuff2 * lhsweight[m + 1]; } else { lhsindex += nbuff2; } nbuff2 = nbuff1 % rhsshape[m]; if (m < shapelen - 1) { rhsindex += nbuff2 * rhsweight[m + 1]; } else { rhsindex += nbuff2; } ++ptmp; } out[index] = (rhs[rhsindex]) == zero ? (lhs[lhsindex] > zero ? maxvalue : minvalue) : lhs[lhsindex] / (rhs[rhsindex]); } #endif template<typename T> static inline void div_gpu_compute_run(const Tensor &lhs, const Tensor &rhs, Tensor &out) { HypeShape lhs_hype(lhs.sizes()); HypeShape rhs_hype(rhs.sizes()); HypeShape out_hype(out.sizes()); auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); auto ncount = out.count(); int *lhsshape = nullptr; int *rhsshape = nullptr; int *lhsweight = nullptr; int *rhsweight = nullptr; int *outweight = nullptr; ///////////////////////////////////// Shape tmpshape; tmpshape.resize(1); tmpshape[0] = int32_t(lhs.sizes().size()); Tensor lhs_tensor(out.device(), INT32, tmpshape); lhsshape = lhs_tensor.data<int32_t>(); tmpshape[0] = int32_t(rhs.sizes().size()); Tensor rhs_tensor(out.device(), INT32, tmpshape); rhsshape = rhs_tensor.data<int32_t>(); tmpshape[0] = int32_t(lhs.sizes().size()); Tensor lhs_weight_tensor(out.device(), INT32, tmpshape); lhsweight = lhs_weight_tensor.data<int32_t>(); tmpshape[0] = int32_t(rhs.sizes().size()); Tensor rhs_weight_tensor(out.device(), INT32, tmpshape); rhsweight = rhs_weight_tensor.data<int32_t>(); tmpshape[0] = int32_t(out.sizes().size()); Tensor out_weight_tensor(out.device(), INT32, tmpshape); outweight = out_weight_tensor.data<int32_t>(); memcpy((void*)lhsshape, out.device(), lhs.sizes().size() * sizeof(int32_t), (void*)lhs.sizes().data(), MemoryDevice(CPU), lhs.sizes().size() * sizeof(int32_t)); memcpy((void*)rhsshape, out.device(), rhs.sizes().size() * sizeof(int32_t), (void*)rhs.sizes().data(), MemoryDevice(CPU), rhs.sizes().size() * sizeof(int32_t)); memcpy((void*)lhsweight, out.device(), lhs_hype.weight().size() * sizeof(int32_t), (void*)lhs_hype.weight().data(), MemoryDevice(CPU), lhs_hype.weight().size() * sizeof(int32_t)); memcpy((void*)rhsweight, out.device(), rhs_hype.weight().size() * sizeof(int32_t), (void*)rhs_hype.weight().data(), MemoryDevice(CPU), rhs_hype.weight().size() * sizeof(int32_t)); memcpy((void*)outweight, out.device(), out_hype.weight().size() * sizeof(int32_t), (void*)out_hype.weight().data(), MemoryDevice(CPU), out_hype.weight().size() * sizeof(int32_t)); ///////////////////////////////////// T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); RUN_KERNEL(reduce_operator_kernel<T>, CUDA_BLOCK(ncount, CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, ncount, plhs, prhs, lhsshape, lhsweight, rhsshape, rhsweight, outweight, int(out.sizes().size()), maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)plhs, lhs.device(), out.count() * sizeof(T)); RUN_KERNEL(reduce_operator_scalar_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), prhs, maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_scalar_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)prhs, rhs.device(), out.count() * sizeof(T)); RUN_KERNEL(reduce_operator_scalar_cross_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), plhs, maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)plhs, lhs.device(), out.count() * sizeof(T)); RUN_KERNEL(reduce_operator_same_shape_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, prhs, out.count(), maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); auto &out_shape = out.sizes(); auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>()); auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>()); auto channels = out_shape[dim]; memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)plhs, lhs.device(), out.count() * sizeof(T)); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); RUN_KERNEL(reduce_operator_bias_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), count, channels, prhs, rhs.count(), maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_bias_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); auto &out_shape = out.sizes(); auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>()); auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>()); auto channels = out_shape[dim]; memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)prhs, rhs.device(), out.count() * sizeof(T)); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); RUN_KERNEL(reduce_operator_bias_cross_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), count, channels, plhs, lhs.count(), maxvalue, minvalue); } void Div::reduce_with_broadcast(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_scalar<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_bias<TYPE>(lhs, rhs, out, dim); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_same_shape<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_scalar_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_scalar_cross<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_bias_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_bias_cross<TYPE>(lhs, rhs, out, dim); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(Div, GPU, name::layer::div()) #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(Div, ts::GPU, name::layer::div()) #endif
db97c77480c4f3545971967440a4b891acecb558.cu
#include <kernels/gpu/div.h> #include <core/tensor_builder.h> #include <backend/name.h> #include <utils/assert.h> #include <global/operator_factory.h> #include <global/fp16_operator_factory.h> #include <core/device.h> #include <numeric> #include "device_launch_parameters.h" #include <cuda_runtime.h> #include <cuda_fp16.h> #include "kernels/gpu/gpu_kernel.h" namespace ts { namespace gpu { template<typename T> static __global__ void reduce_operator_scalar_kernel(T* data, int size, const T *scalar, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { data[index] = (*scalar) == T(0) ? (data[index] > T(0) ? maxvalue : minvalue) : data[index] / (*scalar); } } template<typename T> static __global__ void reduce_operator_scalar_cross_kernel(T* data, int size, const T *scalar, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { data[index] = data[index] == T(0) ? ((*scalar) > T(0) ? maxvalue : minvalue) : (*scalar) / data[index]; } } template<typename T> static __global__ void reduce_operator_same_shape_kernel(T* data, const T*bias, int size, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { data[index] = (bias[index]) == T(0) ? (data[index] > T(0) ? maxvalue : minvalue) : data[index] / (bias[index]); } } template<typename T> static __global__ void reduce_operator_bias_kernel(T* data, int size, int step, int slice, const T* bias, int biaslen, T maxvalue, T minvalue ) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { int dim = index % ( step * slice ) / (step); data[index] = (bias[dim]) == T(0) ? (data[index] > T(0) ? maxvalue: minvalue) : data[index] / (bias[dim]); } } template<typename T> static __global__ void reduce_operator_bias_cross_kernel(T* data, int size, int step, int slice, const T* bias, int biaslen, T maxvalue, T minvalue ) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < size) { int dim = index % (step * slice) / (step); data[index] = (data[index]) == T(0) ? (bias[dim] > T(0) ? maxvalue : minvalue) : bias[dim] / (data[index]); } } template<typename T> static __global__ void reduce_operator_kernel(T* out, int size, const T* lhs, const T* rhs, int *lhsshape, int *lhsweight, int *rhsshape, int *rhsweight, int *outweight, int shapelen, T maxvalue, T minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; int *ptmp = outweight + 1; int ntmp = index; int rhsindex = 0; int lhsindex = 0; int nbuff1,nbuff2; nbuff1 = nbuff2 = 0; for(int m = 0, i= shapelen - 1; i >= 0; --i, m++) { if(i > 0) { nbuff1 = ntmp / *ptmp; ntmp %= *ptmp; }else { nbuff1 = ntmp; } nbuff2 = nbuff1 % lhsshape[m]; if(m < shapelen - 1) { lhsindex += nbuff2 * lhsweight[m+1]; }else { lhsindex += nbuff2; } nbuff2 = nbuff1 % rhsshape[m]; if(m < shapelen - 1) { rhsindex += nbuff2 * rhsweight[m+1]; }else { rhsindex += nbuff2; } ++ptmp; } out[index] = (rhs[rhsindex]) == T(0) ? (lhs[lhsindex] > T(0) ? maxvalue : minvalue) : lhs[lhsindex] / (rhs[rhsindex]); } #ifdef TS_USE_CUDA_FP16 template<> __global__ void reduce_operator_scalar_kernel<half>(half* data, int size, const half *scalar, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { data[index] = (*scalar) == zero ? (data[index] > zero ? maxvalue : minvalue) : data[index] / (*scalar); } } template<> __global__ void reduce_operator_scalar_cross_kernel<half>(half* data, int size, const half *scalar, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { data[index] = data[index] == zero ? ((*scalar) > zero ? maxvalue : minvalue) : (*scalar) / data[index]; } } template<> __global__ void reduce_operator_same_shape_kernel<half>(half* data, const half* bias, int size, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { data[index] = (bias[index]) == zero ? (data[index] > zero ? maxvalue : minvalue) : data[index] / (bias[index]); } } template<> __global__ void reduce_operator_bias_kernel<half>(half* data, int size, int step, int slice, const half* bias, int biaslen, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { int dim = index % (step * slice) / (step); data[index] = (bias[dim]) == zero ? (data[index] > zero ? maxvalue : minvalue) : data[index] / (bias[dim]); } } template<> __global__ void reduce_operator_bias_cross_kernel<half>(half* data, int size, int step, int slice, const half* bias, int biaslen, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; half zero = half(0.f); if (index < size) { int dim = index % (step * slice) / (step); data[index] = (data[index]) == zero ? (bias[dim] > zero ? maxvalue : minvalue) : bias[dim] / (data[index]); } } template<> __global__ void reduce_operator_kernel<half>(half* out, int size, const half* lhs, const half* rhs, int *lhsshape, int *lhsweight, int *rhsshape, int *rhsweight, int *outweight, int shapelen, half maxvalue, half minvalue) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index >= size) return; half zero = half(0.f); int *ptmp = outweight + 1; int ntmp = index; int rhsindex = 0; int lhsindex = 0; int nbuff1, nbuff2; nbuff1 = nbuff2 = 0; for (int m = 0, i = shapelen - 1; i >= 0; --i, m++) { if (i > 0) { nbuff1 = ntmp / *ptmp; ntmp %= *ptmp; } else { nbuff1 = ntmp; } nbuff2 = nbuff1 % lhsshape[m]; if (m < shapelen - 1) { lhsindex += nbuff2 * lhsweight[m + 1]; } else { lhsindex += nbuff2; } nbuff2 = nbuff1 % rhsshape[m]; if (m < shapelen - 1) { rhsindex += nbuff2 * rhsweight[m + 1]; } else { rhsindex += nbuff2; } ++ptmp; } out[index] = (rhs[rhsindex]) == zero ? (lhs[lhsindex] > zero ? maxvalue : minvalue) : lhs[lhsindex] / (rhs[rhsindex]); } #endif template<typename T> static inline void div_gpu_compute_run(const Tensor &lhs, const Tensor &rhs, Tensor &out) { HypeShape lhs_hype(lhs.sizes()); HypeShape rhs_hype(rhs.sizes()); HypeShape out_hype(out.sizes()); auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); auto ncount = out.count(); int *lhsshape = nullptr; int *rhsshape = nullptr; int *lhsweight = nullptr; int *rhsweight = nullptr; int *outweight = nullptr; ///////////////////////////////////// Shape tmpshape; tmpshape.resize(1); tmpshape[0] = int32_t(lhs.sizes().size()); Tensor lhs_tensor(out.device(), INT32, tmpshape); lhsshape = lhs_tensor.data<int32_t>(); tmpshape[0] = int32_t(rhs.sizes().size()); Tensor rhs_tensor(out.device(), INT32, tmpshape); rhsshape = rhs_tensor.data<int32_t>(); tmpshape[0] = int32_t(lhs.sizes().size()); Tensor lhs_weight_tensor(out.device(), INT32, tmpshape); lhsweight = lhs_weight_tensor.data<int32_t>(); tmpshape[0] = int32_t(rhs.sizes().size()); Tensor rhs_weight_tensor(out.device(), INT32, tmpshape); rhsweight = rhs_weight_tensor.data<int32_t>(); tmpshape[0] = int32_t(out.sizes().size()); Tensor out_weight_tensor(out.device(), INT32, tmpshape); outweight = out_weight_tensor.data<int32_t>(); memcpy((void*)lhsshape, out.device(), lhs.sizes().size() * sizeof(int32_t), (void*)lhs.sizes().data(), MemoryDevice(CPU), lhs.sizes().size() * sizeof(int32_t)); memcpy((void*)rhsshape, out.device(), rhs.sizes().size() * sizeof(int32_t), (void*)rhs.sizes().data(), MemoryDevice(CPU), rhs.sizes().size() * sizeof(int32_t)); memcpy((void*)lhsweight, out.device(), lhs_hype.weight().size() * sizeof(int32_t), (void*)lhs_hype.weight().data(), MemoryDevice(CPU), lhs_hype.weight().size() * sizeof(int32_t)); memcpy((void*)rhsweight, out.device(), rhs_hype.weight().size() * sizeof(int32_t), (void*)rhs_hype.weight().data(), MemoryDevice(CPU), rhs_hype.weight().size() * sizeof(int32_t)); memcpy((void*)outweight, out.device(), out_hype.weight().size() * sizeof(int32_t), (void*)out_hype.weight().data(), MemoryDevice(CPU), out_hype.weight().size() * sizeof(int32_t)); ///////////////////////////////////// T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); RUN_KERNEL(reduce_operator_kernel<T>, CUDA_BLOCK(ncount, CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, ncount, plhs, prhs, lhsshape, lhsweight, rhsshape, rhsweight, outweight, int(out.sizes().size()), maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)plhs, lhs.device(), out.count() * sizeof(T)); RUN_KERNEL(reduce_operator_scalar_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), prhs, maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_scalar_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)prhs, rhs.device(), out.count() * sizeof(T)); RUN_KERNEL(reduce_operator_scalar_cross_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), plhs, maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)plhs, lhs.device(), out.count() * sizeof(T)); RUN_KERNEL(reduce_operator_same_shape_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, prhs, out.count(), maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); auto &out_shape = out.sizes(); auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>()); auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>()); auto channels = out_shape[dim]; memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)plhs, lhs.device(), out.count() * sizeof(T)); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); RUN_KERNEL(reduce_operator_bias_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), count, channels, prhs, rhs.count(), maxvalue, minvalue); } template<typename T> static inline void div_gpu_compute_run_bias_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { auto plhs = lhs.data<T>(); auto prhs = rhs.data<T>(); auto pout = out.data<T>(); auto &out_shape = out.sizes(); auto number = std::accumulate(out_shape.begin(), out_shape.begin() + dim, 1, std::multiplies<int>()); auto count = std::accumulate(out_shape.begin() + dim + 1, out_shape.end(), 1, std::multiplies<int>()); auto channels = out_shape[dim]; memcpy((void*)pout, out.device(), out.count() * sizeof(T), (void*)prhs, rhs.device(), out.count() * sizeof(T)); T maxvalue = std::numeric_limits<T>::max(); T minvalue = std::numeric_limits<T>::lowest(); RUN_KERNEL(reduce_operator_bias_cross_kernel<T>, CUDA_BLOCK(out.count(), CUDA_THREAD_NUM), CUDA_THREAD_NUM, pout, out.count(), count, channels, plhs, lhs.count(), maxvalue, minvalue); } void Div::reduce_with_broadcast(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_scalar(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_scalar<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_bias(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_bias<TYPE>(lhs, rhs, out, dim); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_same_shape(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_same_shape<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_scalar_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_scalar_cross<TYPE>(lhs, rhs, out); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } void Div::reduce_with_bias_cross(const Tensor &lhs, const Tensor &rhs, Tensor &out, int dim) { // Notice: the all tensor' memory device are CPU, as given in running_memory_device DTYPE dtype = out.dtype(); switch(dtype) { #define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \ case DTYPE: { div_gpu_compute_run_bias_cross<TYPE>(lhs, rhs, out, dim); break; } DECLARE_COMPUTE_RUN(INT8, int8_t); DECLARE_COMPUTE_RUN(UINT8, uint8_t); DECLARE_COMPUTE_RUN(INT16, int16_t); DECLARE_COMPUTE_RUN(UINT16, uint16_t); DECLARE_COMPUTE_RUN(INT32, int32_t); DECLARE_COMPUTE_RUN(UINT32, uint32_t); DECLARE_COMPUTE_RUN(INT64, int64_t); DECLARE_COMPUTE_RUN(UINT64, uint64_t); #ifdef TS_USE_CUDA_FP16 DECLARE_COMPUTE_RUN(FLOAT16, half); #endif DECLARE_COMPUTE_RUN(FLOAT32, float); DECLARE_COMPUTE_RUN(FLOAT64, double); #undef DECLARE_COMPUTE_RUN default: { TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject; break; } } } } } using namespace ts; using namespace gpu; TS_REGISTER_OPERATOR(Div, GPU, name::layer::div()) #ifdef TS_USE_CUDA_FP16 TS_REGISTER_FP16_OPERATOR(Div, ts::GPU, name::layer::div()) #endif
2dad4b2d2a11143061794c6f29b654ec5d54a42c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #define N 8192 #define THREAD_PER_BLOCK_SIDE_X 8 #define THREAD_PER_BLOCK_SIDE_Y 16 #define THREAD_PER_BLOCK THREAD_PER_BLOCK_SIDE_X*THREAD_PER_BLOCK_SIDE_Y #define TYPE double #define TYPE_S "double" __global__ void transpose(TYPE * in, TYPE * out, int size) { //int temp_side = THREAD_PER_BLOCK; __shared__ TYPE temp_matrix[THREAD_PER_BLOCK_SIDE_X][THREAD_PER_BLOCK_SIDE_Y]; int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; // copy submatrix (transposed) in shared memory temp_matrix[threadIdx.x][threadIdx.y] = in[row*size + col]; __syncthreads(); // copy submatrix in main memory out[col*size + row] = temp_matrix[threadIdx.x][threadIdx.y]; } int correct(TYPE* a, TYPE* b, int side) { int i; for(i=0; i<side*side; i++) if(a[i]!=b[(i%side)*side + i/side]) return 0; return 1; } int main() { TYPE * h_in, * h_out; TYPE * d_in, * d_out; int size = N*N; int size_in_memory = size * sizeof(TYPE); int i; // timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //allocate memory in host and device h_in = (TYPE *)malloc(size_in_memory); h_out = (TYPE *)malloc(size_in_memory); hipMalloc((void**)&d_in, size_in_memory); hipMalloc((void**)&d_out, size_in_memory); //fill matrix in host for(i = 0; i<size; i++) h_in[i] = i; //transfer matrix from host to device hipMemcpy(d_in, h_in, size_in_memory, hipMemcpyHostToDevice); //transpose matrix in device dim3 grid, block; block.x = THREAD_PER_BLOCK_SIDE_X; block.y = THREAD_PER_BLOCK_SIDE_Y; grid.x = N / block.x; grid.y = N / block.y; hipEventRecord(start); hipLaunchKernelGGL(( transpose), dim3(grid), dim3(block) , 0, 0, d_in, d_out, N); hipEventRecord(stop); // transfer matrix from device to host hipMemcpy(h_out, d_out, size_in_memory, hipMemcpyDeviceToHost); // correctness test printf("\ncorrecteness: %d \n", correct(h_in, h_out, N)); //free memory free(h_in); free(h_out); hipFree(d_in); hipFree(d_out); //showing Bandwidth hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("\nmatrix type: %s", TYPE_S); printf("\nblock: %d x %d", block.y, block.x); printf("\nmilliseconds: %f", milliseconds); printf("\nBandwidth: %f GB/s \n", 2*size_in_memory/milliseconds/1e6); return 0; }
2dad4b2d2a11143061794c6f29b654ec5d54a42c.cu
#include <stdio.h> #define N 8192 #define THREAD_PER_BLOCK_SIDE_X 8 #define THREAD_PER_BLOCK_SIDE_Y 16 #define THREAD_PER_BLOCK THREAD_PER_BLOCK_SIDE_X*THREAD_PER_BLOCK_SIDE_Y #define TYPE double #define TYPE_S "double" __global__ void transpose(TYPE * in, TYPE * out, int size) { //int temp_side = THREAD_PER_BLOCK; __shared__ TYPE temp_matrix[THREAD_PER_BLOCK_SIDE_X][THREAD_PER_BLOCK_SIDE_Y]; int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; // copy submatrix (transposed) in shared memory temp_matrix[threadIdx.x][threadIdx.y] = in[row*size + col]; __syncthreads(); // copy submatrix in main memory out[col*size + row] = temp_matrix[threadIdx.x][threadIdx.y]; } int correct(TYPE* a, TYPE* b, int side) { int i; for(i=0; i<side*side; i++) if(a[i]!=b[(i%side)*side + i/side]) return 0; return 1; } int main() { TYPE * h_in, * h_out; TYPE * d_in, * d_out; int size = N*N; int size_in_memory = size * sizeof(TYPE); int i; // timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //allocate memory in host and device h_in = (TYPE *)malloc(size_in_memory); h_out = (TYPE *)malloc(size_in_memory); cudaMalloc((void**)&d_in, size_in_memory); cudaMalloc((void**)&d_out, size_in_memory); //fill matrix in host for(i = 0; i<size; i++) h_in[i] = i; //transfer matrix from host to device cudaMemcpy(d_in, h_in, size_in_memory, cudaMemcpyHostToDevice); //transpose matrix in device dim3 grid, block; block.x = THREAD_PER_BLOCK_SIDE_X; block.y = THREAD_PER_BLOCK_SIDE_Y; grid.x = N / block.x; grid.y = N / block.y; cudaEventRecord(start); transpose<<< grid, block >>>(d_in, d_out, N); cudaEventRecord(stop); // transfer matrix from device to host cudaMemcpy(h_out, d_out, size_in_memory, cudaMemcpyDeviceToHost); // correctness test printf("\ncorrecteness: %d \n", correct(h_in, h_out, N)); //free memory free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); //showing Bandwidth cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("\nmatrix type: %s", TYPE_S); printf("\nblock: %d x %d", block.y, block.x); printf("\nmilliseconds: %f", milliseconds); printf("\nBandwidth: %f GB/s \n", 2*size_in_memory/milliseconds/1e6); return 0; }
24ae8c4ee1dafa349d9a15f7ff720ff0d33eba30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "cuda_executor.hpp" extern "C" { #include "permute_param.h" #include "graph/tensor.h" #include "operator/op.h" #include "utility/log.h" } __global__ void permute(float *y, float *x, int elem_num, int n, int c, int hw) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int chw = c * hw; int idx_n = idx / chw; int idx_c = idx % chw / hw; int idx_hw = idx % hw; int idx_new = idx_n * chw + idx_hw * c + idx_c ; if (idx < elem_num) { y[idx_new] = x[idx]; } } void permute_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_uint2voidx gpu_addr_map) { struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct permute_param* param = (struct permute_param*)ir_node->op.param_mem; /* init grid and block */ int bs = 1024; int s = ceil((output_tensor->elem_num + bs - 1.) / bs); dim3 grid = dim3(s); if (param->order0 == 0 && param->order1 == 2 && param->order2 == 3 && param->order3 == 1) hipLaunchKernelGGL(( permute), dim3(grid), dim3(bs), 0, 0, (float*)gpu_addr_map[output_tensor->index], (float*)gpu_addr_map[input_tensor->index], output_tensor->elem_num, input_tensor->dims[0], input_tensor->dims[1], input_tensor->dims[2] * input_tensor->dims[3]); } void CUDAEngine::AddPermuteNode(struct graph* ir_graph, struct node* ir_node) { TLOG_INFO("Tengine GPU: Support OP(%d) OP_PERMUTE.\n", ir_node->index); permute_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map); this->ops.push_back(std::bind(&permute_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map)); }
24ae8c4ee1dafa349d9a15f7ff720ff0d33eba30.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "cuda_executor.hpp" extern "C" { #include "permute_param.h" #include "graph/tensor.h" #include "operator/op.h" #include "utility/log.h" } __global__ void permute(float *y, float *x, int elem_num, int n, int c, int hw) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int chw = c * hw; int idx_n = idx / chw; int idx_c = idx % chw / hw; int idx_hw = idx % hw; int idx_new = idx_n * chw + idx_hw * c + idx_c ; if (idx < elem_num) { y[idx_new] = x[idx]; } } void permute_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_uint2voidx gpu_addr_map) { struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct permute_param* param = (struct permute_param*)ir_node->op.param_mem; /* init grid and block */ int bs = 1024; int s = ceil((output_tensor->elem_num + bs - 1.) / bs); dim3 grid = dim3(s); if (param->order0 == 0 && param->order1 == 2 && param->order2 == 3 && param->order3 == 1) permute<<<grid, bs>>>((float*)gpu_addr_map[output_tensor->index], (float*)gpu_addr_map[input_tensor->index], output_tensor->elem_num, input_tensor->dims[0], input_tensor->dims[1], input_tensor->dims[2] * input_tensor->dims[3]); } void CUDAEngine::AddPermuteNode(struct graph* ir_graph, struct node* ir_node) { TLOG_INFO("Tengine GPU: Support OP(%d) OP_PERMUTE.\n", ir_node->index); permute_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map); this->ops.push_back(std::bind(&permute_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map)); }
5d56a58daa827a5a825c9b6c525a0b70c5111426.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/complex.h> #include <iostream> #include <cstdio> #include <cstring> #include <string> #include <hipfft.h> #include <sys/time.h> #include <thread> #include "FourierBeamPropagator.cuh" #include "OpticalElements.cuh" #include <gtk/gtk.h> #include <thread> #include <cstdlib> #include "DoubleBuffer.hpp" typedef float real; typedef thrust::complex<real> complex; const int CELLS = 1024 * 4; const real LAMBDA = 500.0e-9; const real DZ = LAMBDA; const real CELL_DIM = LAMBDA; const real MAX_N_CHANGE = 0.005; const bool ADJUST_STEP = true; const int RENDER_EVERY = 3; typedef BiConvexLens<real> OpticalPath; OpticalPath op(0.001, 0.005, 0.001, 1.3); fftbpm::BeamPropagator<real, OpticalPath>* prop = nullptr; inline void cucheck() { hipError_t err = hipGetLastError(); if(err != hipSuccess) { throw std::runtime_error(hipGetErrorString(err)); } } double prof_time() { struct timeval now; gettimeofday(&now, NULL); return now.tv_sec + 1.0e-6 * now.tv_usec; } const int STRIDE = cairo_format_stride_for_width(CAIRO_FORMAT_RGB24, 2 * CELLS); DoubleBuffer<unsigned char> render_buffer(STRIDE * CELLS); volatile int total_steps = 0; volatile real current_z = 0.0; volatile bool quit_thread = false; volatile bool changed = true; void stepper_thread() { static std::thread t; long long count = 0; hipSetDevice(prop->getDevice()); while(!quit_thread) { prop->step(DZ, ADJUST_STEP); if(count % RENDER_EVERY == 0) { complex* efld = new complex[CELLS * CELLS]; prop->getElectricField(efld); if(t.joinable()) t.join(); t = std::thread([efld]() { unsigned char* buffer = render_buffer.getRenderBuffer(); unsigned char* display_buffer = render_buffer.getDisplayBuffer(); complex* nbuf = new complex[CELLS * CELLS]; current_z = prop->getZCoordinate(); total_steps = prop->getStepCount(); #pragma omp parallel for for(int y = 0; y < CELLS; y++) for(int x = 0; x < CELLS; x++) nbuf[y * CELLS + x] = prop->indexOfRefraction((x - CELLS / 2) * CELL_DIM, (y - CELLS / 2) * CELL_DIM, current_z); #pragma omp parallel for for(int y = 0; y < CELLS; y++) { for(int x = 0; x < CELLS; x++) { unsigned char val = fmin(255.0, 255.0 * thrust::abs(efld[y * CELLS + x])); buffer[y * STRIDE + 4 * x + 0] = 255.0 * (nbuf[y * CELLS + x].real() - 1.0); buffer[y * STRIDE + 4 * x + 1] = val; buffer[y * STRIDE + 4 * x + 2] = val; buffer[y * STRIDE + 4 * x + 3] = 0; } for(int x = CELLS; x < 2 * CELLS - 1; x++) { buffer[y * STRIDE + 4 * x + 0] = display_buffer[y * STRIDE + 4 * (x + 1) + 0]; buffer[y * STRIDE + 4 * x + 1] = display_buffer[y * STRIDE + 4 * (x + 1) + 1]; buffer[y * STRIDE + 4 * x + 2] = display_buffer[y * STRIDE + 4 * (x + 1) + 2]; buffer[y * STRIDE + 4 * x + 3] = display_buffer[y * STRIDE + 4 * (x + 1) + 3]; } unsigned char val = fmin(255.0, 255.0 * thrust::abs(efld[y * CELLS + (CELLS / 2)])); buffer[y * STRIDE + 8 * (CELLS - 1) + 0] = 255.0 * (nbuf[y * CELLS + (CELLS / 2)].real() - 1.0); buffer[y * STRIDE + 8 * (CELLS - 1) + 1] = val; buffer[y * STRIDE + 8 * (CELLS - 1) + 2] = val; buffer[y * STRIDE + 8 * (CELLS - 1) + 3] = 0; } render_buffer.swapBuffers(); changed = true; delete[] nbuf; delete[] efld; }); } count++; } if(t.joinable()) t.join(); } GtkWidget* drawing_area; cairo_surface_t* s = nullptr; gboolean update_surface() { if(!changed) return TRUE; changed = false; if(s) cairo_surface_destroy(s); s = cairo_image_surface_create_for_data(render_buffer.getDisplayBuffer(), CAIRO_FORMAT_RGB24, 2 * CELLS, CELLS, STRIDE); gtk_widget_queue_draw(GTK_WIDGET(drawing_area)); return TRUE; } double start; gboolean draw(GtkWidget* widget, cairo_t* cr, gpointer data) { guint width, height; width = gtk_widget_get_allocated_width(widget); height = gtk_widget_get_allocated_height(widget); cairo_set_source_rgb(cr, 0.0, 0.0, 0.0); cairo_rectangle(cr, 0.0, 0.0, width, height); cairo_fill(cr); real scale = fmin(2 * CELLS > width ? (real)width / (2 * CELLS) : 1.0, CELLS > height ? (real)height / CELLS : 1.0); cairo_scale(cr, scale, scale); if(s) cairo_set_source_surface(cr, s, 0.0, 0.0); cairo_paint(cr); cairo_set_source_rgb(cr, 1.0, 0.0, 0.0); cairo_scale(cr, 1.0 / scale, 1.0 / scale); real yoff = 0.0; auto cairo_print = [&](const char* text) { cairo_text_extents_t te; cairo_text_extents (cr, text, &te); cairo_move_to(cr, 1.0, yoff + te.height + 1.0); cairo_show_text(cr, text); yoff += te.height + 1.0; }; char text[200]; sprintf(text, "%0.6lf m", current_z); cairo_print(text); sprintf(text, "%d steps", total_steps); cairo_print(text); sprintf(text, "%llu s run time", (unsigned long long)(prof_time() - start)); cairo_print(text); sprintf(text, "%d steps / min", (int)(total_steps / ((prof_time() - start) / 60.0))); cairo_print(text); sprintf(text, "%0.1lf x %0.1lf um", prop->getPhysicalGridDim() * 1.0e6, prop->getPhysicalGridDim() * 1.0e6); cairo_print(text); if(LAMBDA < 1.0e-6) sprintf(text, "Wavelength %0.1lf nm", LAMBDA * 1.0e9); else sprintf(text, "Wavelength %0.1lf um", LAMBDA * 1.0e6); cairo_print(text); return FALSE; } int main(int argc, char** argv) { using namespace fftbpm; hipSetDevice(1); start = prof_time(); gtk_init(&argc, &argv); GtkWidget* window = gtk_window_new(GTK_WINDOW_TOPLEVEL); int swidth = gdk_screen_get_height(gdk_screen_get_default()); int sheight = gdk_screen_get_height(gdk_screen_get_default()); int width = 2 * CELLS; int height = CELLS; while(width > swidth && height > sheight) { width /= 2; height /= 2; } gtk_window_set_default_size(GTK_WINDOW(window), width, height); g_signal_connect(window, "destroy", gtk_main_quit, NULL); drawing_area = gtk_drawing_area_new(); gtk_container_add(GTK_CONTAINER(window), drawing_area); g_signal_connect(drawing_area, "draw", G_CALLBACK(draw), NULL); prop = new fftbpm::BeamPropagator<real, OpticalPath>(CELLS, LAMBDA, CELL_DIM, 0.0, op); prop->setMaxNChange(MAX_N_CHANGE); complex* efld = new complex[CELLS * CELLS]; for(int y = 0; y < CELLS; y++) { for(int x = 0; x < CELLS; x++) { int dx = x - CELLS / 2; int dy = y - CELLS / 2; efld[y * CELLS + x] = ::exp(-(dx * dx + dy * dy) / 6000.0); // if(dx * dx + dy * dy < 100 * 100) // efld[y * CELLS + x] = 1.0; // else // efld[y * CELLS + x] = 0.0; // int x_prime = x % 100; // int y_prime = y % 100; // // if(x_prime > 20 && x_prime < 80 && y_prime < 80 && y_prime > 20) // { // if(y_prime > 75 || (x_prime > 47 && x_prime < 53)) // efld[y * CELLS + x] = 1.0; // else // efld[y * CELLS + x] = 0.0; // } // else // efld[y * CELLS + x] = 0.0; // // real dx = CELL_DIM * (x - CELLS / 2); // real dy = CELL_DIM * (y - CELLS / 2); // real r = sqrt(dx * dx + dy * dy); // real f = 0.005; // int n = 2 * (sqrt(r * r + f * f) - f) / LAMBDA; // efld[y * CELLS + x] = n % 2 && n < 6; // if(x > 400 && x < 600 && y > 490 && y < 500) // efld[y * CELLS + x] = 1.0; // else // efld[y * CELLS + x] = 0.0; // efld[y * CELLS + x] = 1.0; } } prop->setElectricField(efld); delete[] efld; gtk_widget_show_all(window); g_idle_add((GSourceFunc)update_surface, NULL); std::thread t(stepper_thread); gtk_main(); quit_thread = true; t.join(); delete prop; return 0; }
5d56a58daa827a5a825c9b6c525a0b70c5111426.cu
#include <thrust/complex.h> #include <iostream> #include <cstdio> #include <cstring> #include <string> #include <cufft.h> #include <sys/time.h> #include <thread> #include "FourierBeamPropagator.cuh" #include "OpticalElements.cuh" #include <gtk/gtk.h> #include <thread> #include <cstdlib> #include "DoubleBuffer.hpp" typedef float real; typedef thrust::complex<real> complex; const int CELLS = 1024 * 4; const real LAMBDA = 500.0e-9; const real DZ = LAMBDA; const real CELL_DIM = LAMBDA; const real MAX_N_CHANGE = 0.005; const bool ADJUST_STEP = true; const int RENDER_EVERY = 3; typedef BiConvexLens<real> OpticalPath; OpticalPath op(0.001, 0.005, 0.001, 1.3); fftbpm::BeamPropagator<real, OpticalPath>* prop = nullptr; inline void cucheck() { cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) { throw std::runtime_error(cudaGetErrorString(err)); } } double prof_time() { struct timeval now; gettimeofday(&now, NULL); return now.tv_sec + 1.0e-6 * now.tv_usec; } const int STRIDE = cairo_format_stride_for_width(CAIRO_FORMAT_RGB24, 2 * CELLS); DoubleBuffer<unsigned char> render_buffer(STRIDE * CELLS); volatile int total_steps = 0; volatile real current_z = 0.0; volatile bool quit_thread = false; volatile bool changed = true; void stepper_thread() { static std::thread t; long long count = 0; cudaSetDevice(prop->getDevice()); while(!quit_thread) { prop->step(DZ, ADJUST_STEP); if(count % RENDER_EVERY == 0) { complex* efld = new complex[CELLS * CELLS]; prop->getElectricField(efld); if(t.joinable()) t.join(); t = std::thread([efld]() { unsigned char* buffer = render_buffer.getRenderBuffer(); unsigned char* display_buffer = render_buffer.getDisplayBuffer(); complex* nbuf = new complex[CELLS * CELLS]; current_z = prop->getZCoordinate(); total_steps = prop->getStepCount(); #pragma omp parallel for for(int y = 0; y < CELLS; y++) for(int x = 0; x < CELLS; x++) nbuf[y * CELLS + x] = prop->indexOfRefraction((x - CELLS / 2) * CELL_DIM, (y - CELLS / 2) * CELL_DIM, current_z); #pragma omp parallel for for(int y = 0; y < CELLS; y++) { for(int x = 0; x < CELLS; x++) { unsigned char val = fmin(255.0, 255.0 * thrust::abs(efld[y * CELLS + x])); buffer[y * STRIDE + 4 * x + 0] = 255.0 * (nbuf[y * CELLS + x].real() - 1.0); buffer[y * STRIDE + 4 * x + 1] = val; buffer[y * STRIDE + 4 * x + 2] = val; buffer[y * STRIDE + 4 * x + 3] = 0; } for(int x = CELLS; x < 2 * CELLS - 1; x++) { buffer[y * STRIDE + 4 * x + 0] = display_buffer[y * STRIDE + 4 * (x + 1) + 0]; buffer[y * STRIDE + 4 * x + 1] = display_buffer[y * STRIDE + 4 * (x + 1) + 1]; buffer[y * STRIDE + 4 * x + 2] = display_buffer[y * STRIDE + 4 * (x + 1) + 2]; buffer[y * STRIDE + 4 * x + 3] = display_buffer[y * STRIDE + 4 * (x + 1) + 3]; } unsigned char val = fmin(255.0, 255.0 * thrust::abs(efld[y * CELLS + (CELLS / 2)])); buffer[y * STRIDE + 8 * (CELLS - 1) + 0] = 255.0 * (nbuf[y * CELLS + (CELLS / 2)].real() - 1.0); buffer[y * STRIDE + 8 * (CELLS - 1) + 1] = val; buffer[y * STRIDE + 8 * (CELLS - 1) + 2] = val; buffer[y * STRIDE + 8 * (CELLS - 1) + 3] = 0; } render_buffer.swapBuffers(); changed = true; delete[] nbuf; delete[] efld; }); } count++; } if(t.joinable()) t.join(); } GtkWidget* drawing_area; cairo_surface_t* s = nullptr; gboolean update_surface() { if(!changed) return TRUE; changed = false; if(s) cairo_surface_destroy(s); s = cairo_image_surface_create_for_data(render_buffer.getDisplayBuffer(), CAIRO_FORMAT_RGB24, 2 * CELLS, CELLS, STRIDE); gtk_widget_queue_draw(GTK_WIDGET(drawing_area)); return TRUE; } double start; gboolean draw(GtkWidget* widget, cairo_t* cr, gpointer data) { guint width, height; width = gtk_widget_get_allocated_width(widget); height = gtk_widget_get_allocated_height(widget); cairo_set_source_rgb(cr, 0.0, 0.0, 0.0); cairo_rectangle(cr, 0.0, 0.0, width, height); cairo_fill(cr); real scale = fmin(2 * CELLS > width ? (real)width / (2 * CELLS) : 1.0, CELLS > height ? (real)height / CELLS : 1.0); cairo_scale(cr, scale, scale); if(s) cairo_set_source_surface(cr, s, 0.0, 0.0); cairo_paint(cr); cairo_set_source_rgb(cr, 1.0, 0.0, 0.0); cairo_scale(cr, 1.0 / scale, 1.0 / scale); real yoff = 0.0; auto cairo_print = [&](const char* text) { cairo_text_extents_t te; cairo_text_extents (cr, text, &te); cairo_move_to(cr, 1.0, yoff + te.height + 1.0); cairo_show_text(cr, text); yoff += te.height + 1.0; }; char text[200]; sprintf(text, "%0.6lf m", current_z); cairo_print(text); sprintf(text, "%d steps", total_steps); cairo_print(text); sprintf(text, "%llu s run time", (unsigned long long)(prof_time() - start)); cairo_print(text); sprintf(text, "%d steps / min", (int)(total_steps / ((prof_time() - start) / 60.0))); cairo_print(text); sprintf(text, "%0.1lf x %0.1lf um", prop->getPhysicalGridDim() * 1.0e6, prop->getPhysicalGridDim() * 1.0e6); cairo_print(text); if(LAMBDA < 1.0e-6) sprintf(text, "Wavelength %0.1lf nm", LAMBDA * 1.0e9); else sprintf(text, "Wavelength %0.1lf um", LAMBDA * 1.0e6); cairo_print(text); return FALSE; } int main(int argc, char** argv) { using namespace fftbpm; cudaSetDevice(1); start = prof_time(); gtk_init(&argc, &argv); GtkWidget* window = gtk_window_new(GTK_WINDOW_TOPLEVEL); int swidth = gdk_screen_get_height(gdk_screen_get_default()); int sheight = gdk_screen_get_height(gdk_screen_get_default()); int width = 2 * CELLS; int height = CELLS; while(width > swidth && height > sheight) { width /= 2; height /= 2; } gtk_window_set_default_size(GTK_WINDOW(window), width, height); g_signal_connect(window, "destroy", gtk_main_quit, NULL); drawing_area = gtk_drawing_area_new(); gtk_container_add(GTK_CONTAINER(window), drawing_area); g_signal_connect(drawing_area, "draw", G_CALLBACK(draw), NULL); prop = new fftbpm::BeamPropagator<real, OpticalPath>(CELLS, LAMBDA, CELL_DIM, 0.0, op); prop->setMaxNChange(MAX_N_CHANGE); complex* efld = new complex[CELLS * CELLS]; for(int y = 0; y < CELLS; y++) { for(int x = 0; x < CELLS; x++) { int dx = x - CELLS / 2; int dy = y - CELLS / 2; efld[y * CELLS + x] = std::exp(-(dx * dx + dy * dy) / 6000.0); // if(dx * dx + dy * dy < 100 * 100) // efld[y * CELLS + x] = 1.0; // else // efld[y * CELLS + x] = 0.0; // int x_prime = x % 100; // int y_prime = y % 100; // // if(x_prime > 20 && x_prime < 80 && y_prime < 80 && y_prime > 20) // { // if(y_prime > 75 || (x_prime > 47 && x_prime < 53)) // efld[y * CELLS + x] = 1.0; // else // efld[y * CELLS + x] = 0.0; // } // else // efld[y * CELLS + x] = 0.0; // // real dx = CELL_DIM * (x - CELLS / 2); // real dy = CELL_DIM * (y - CELLS / 2); // real r = sqrt(dx * dx + dy * dy); // real f = 0.005; // int n = 2 * (sqrt(r * r + f * f) - f) / LAMBDA; // efld[y * CELLS + x] = n % 2 && n < 6; // if(x > 400 && x < 600 && y > 490 && y < 500) // efld[y * CELLS + x] = 1.0; // else // efld[y * CELLS + x] = 0.0; // efld[y * CELLS + x] = 1.0; } } prop->setElectricField(efld); delete[] efld; gtk_widget_show_all(window); g_idle_add((GSourceFunc)update_surface, NULL); std::thread t(stepper_thread); gtk_main(); quit_thread = true; t.join(); delete prop; return 0; }
61c3b2281d153c6baef90ba6aa730b430981614a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x3; int xdim0_advec_mom_kernel_x3_h = -1; __constant__ int ydim0_advec_mom_kernel_x3; int ydim0_advec_mom_kernel_x3_h = -1; __constant__ int xdim1_advec_mom_kernel_x3; int xdim1_advec_mom_kernel_x3_h = -1; __constant__ int ydim1_advec_mom_kernel_x3; int ydim1_advec_mom_kernel_x3_h = -1; __constant__ int xdim2_advec_mom_kernel_x3; int xdim2_advec_mom_kernel_x3_h = -1; __constant__ int ydim2_advec_mom_kernel_x3; int ydim2_advec_mom_kernel_x3_h = -1; __constant__ int xdim3_advec_mom_kernel_x3; int xdim3_advec_mom_kernel_x3_h = -1; __constant__ int ydim3_advec_mom_kernel_x3; int ydim3_advec_mom_kernel_x3_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_x3*(y)+xdim0_advec_mom_kernel_x3*ydim0_advec_mom_kernel_x3*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_x3*(y)+xdim1_advec_mom_kernel_x3*ydim1_advec_mom_kernel_x3*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_x3*(y)+xdim2_advec_mom_kernel_x3*ydim2_advec_mom_kernel_x3*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_x3*(y)+xdim3_advec_mom_kernel_x3*ydim3_advec_mom_kernel_x3*(z)) //user function __device__ inline void advec_mom_kernel_x3_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)]; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel_x3( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_x3 + idx_z * 1*1 * xdim0_advec_mom_kernel_x3 * ydim0_advec_mom_kernel_x3; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_x3 + idx_z * 1*1 * xdim1_advec_mom_kernel_x3 * ydim1_advec_mom_kernel_x3; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_x3 + idx_z * 1*1 * xdim2_advec_mom_kernel_x3 * ydim2_advec_mom_kernel_x3; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_x3 + idx_z * 1*1 * xdim3_advec_mom_kernel_x3 * ydim3_advec_mom_kernel_x3; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x3_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_x3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,124)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_x3"); OPS_kernels[124].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_x3_h || ydim0 != ydim0_advec_mom_kernel_x3_h || xdim1 != xdim1_advec_mom_kernel_x3_h || ydim1 != ydim1_advec_mom_kernel_x3_h || xdim2 != xdim2_advec_mom_kernel_x3_h || ydim2 != ydim2_advec_mom_kernel_x3_h || xdim3 != xdim3_advec_mom_kernel_x3_h || ydim3 != ydim3_advec_mom_kernel_x3_h) { hipMemcpyToSymbol( xdim0_advec_mom_kernel_x3, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_x3_h = xdim0; hipMemcpyToSymbol( ydim0_advec_mom_kernel_x3, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_x3_h = ydim0; hipMemcpyToSymbol( xdim1_advec_mom_kernel_x3, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_x3_h = xdim1; hipMemcpyToSymbol( ydim1_advec_mom_kernel_x3, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_x3_h = ydim1; hipMemcpyToSymbol( xdim2_advec_mom_kernel_x3, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_x3_h = xdim2; hipMemcpyToSymbol( ydim2_advec_mom_kernel_x3, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_x3_h = ydim2; hipMemcpyToSymbol( xdim3_advec_mom_kernel_x3, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_x3_h = xdim3; hipMemcpyToSymbol( ydim3_advec_mom_kernel_x3, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_x3_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_mom_kernel_x3), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[124].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 124; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 124; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_x3_execute; if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_x3"); } ops_enqueue_kernel(desc); } #endif
61c3b2281d153c6baef90ba6aa730b430981614a.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x3; int xdim0_advec_mom_kernel_x3_h = -1; __constant__ int ydim0_advec_mom_kernel_x3; int ydim0_advec_mom_kernel_x3_h = -1; __constant__ int xdim1_advec_mom_kernel_x3; int xdim1_advec_mom_kernel_x3_h = -1; __constant__ int ydim1_advec_mom_kernel_x3; int ydim1_advec_mom_kernel_x3_h = -1; __constant__ int xdim2_advec_mom_kernel_x3; int xdim2_advec_mom_kernel_x3_h = -1; __constant__ int ydim2_advec_mom_kernel_x3; int ydim2_advec_mom_kernel_x3_h = -1; __constant__ int xdim3_advec_mom_kernel_x3; int xdim3_advec_mom_kernel_x3_h = -1; __constant__ int ydim3_advec_mom_kernel_x3; int ydim3_advec_mom_kernel_x3_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_advec_mom_kernel_x3*(y)+xdim0_advec_mom_kernel_x3*ydim0_advec_mom_kernel_x3*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_mom_kernel_x3*(y)+xdim1_advec_mom_kernel_x3*ydim1_advec_mom_kernel_x3*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_mom_kernel_x3*(y)+xdim2_advec_mom_kernel_x3*ydim2_advec_mom_kernel_x3*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_mom_kernel_x3*(y)+xdim3_advec_mom_kernel_x3*ydim3_advec_mom_kernel_x3*(z)) //user function __device__ inline void advec_mom_kernel_x3_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x) { post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)]; pre_vol[OPS_ACC0(0,0,0)] = post_vol[OPS_ACC1(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_advec_mom_kernel_x3( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_mom_kernel_x3 + idx_z * 1*1 * xdim0_advec_mom_kernel_x3 * ydim0_advec_mom_kernel_x3; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_mom_kernel_x3 + idx_z * 1*1 * xdim1_advec_mom_kernel_x3 * ydim1_advec_mom_kernel_x3; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_mom_kernel_x3 + idx_z * 1*1 * xdim2_advec_mom_kernel_x3 * ydim2_advec_mom_kernel_x3; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_mom_kernel_x3 + idx_z * 1*1 * xdim3_advec_mom_kernel_x3 * ydim3_advec_mom_kernel_x3; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x3_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_advec_mom_kernel_x3_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,124)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_x3"); OPS_kernels[124].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_x3_h || ydim0 != ydim0_advec_mom_kernel_x3_h || xdim1 != xdim1_advec_mom_kernel_x3_h || ydim1 != ydim1_advec_mom_kernel_x3_h || xdim2 != xdim2_advec_mom_kernel_x3_h || ydim2 != ydim2_advec_mom_kernel_x3_h || xdim3 != xdim3_advec_mom_kernel_x3_h || ydim3 != ydim3_advec_mom_kernel_x3_h) { cudaMemcpyToSymbol( xdim0_advec_mom_kernel_x3, &xdim0, sizeof(int) ); xdim0_advec_mom_kernel_x3_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_mom_kernel_x3, &ydim0, sizeof(int) ); ydim0_advec_mom_kernel_x3_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_mom_kernel_x3, &xdim1, sizeof(int) ); xdim1_advec_mom_kernel_x3_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_mom_kernel_x3, &ydim1, sizeof(int) ); ydim1_advec_mom_kernel_x3_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_mom_kernel_x3, &xdim2, sizeof(int) ); xdim2_advec_mom_kernel_x3_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_mom_kernel_x3, &ydim2, sizeof(int) ); ydim2_advec_mom_kernel_x3_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_mom_kernel_x3, &xdim3, sizeof(int) ); xdim3_advec_mom_kernel_x3_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_mom_kernel_x3, &ydim3, sizeof(int) ); ydim3_advec_mom_kernel_x3_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_mom_kernel_x3<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[124].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[124].mpi_time += t2-t1; OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[124].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_x3(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 124; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 124; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_advec_mom_kernel_x3_execute; if (OPS_diags > 1) { ops_timing_realloc(124,"advec_mom_kernel_x3"); } ops_enqueue_kernel(desc); } #endif
a912ea028b7be3c837bbb8ed810ef373ed7cc4e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define _INIT_MATRIX_(ret_type, func_name) ret_type func_name #define _PRINT_MATRIX_(ret_type, func_name) ret_type func_name // initialize the matrix _INIT_MATRIX_(void, InitMatrix)(float *A, int nx, int ny) { int i, j; float cnt = 0; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { A[i*nx + j] = ++cnt; } } } // print the matrix _PRINT_MATRIX_(void, PrintMatrix)(float *A, int nx, int ny) { int i, j; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { printf("%.2f ", A[i*nx + j]); } printf("\n"); } } // add matrix on CPU void SumMatrixOnCPU(float *A, float *B, float *C, int nx, int ny) { int i, j; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { C[i*nx + j] = A[i*nx + j] + B[i*nx + j]; } } } // add the matrix on GPU __global__ void SumMatrixOnGPU(float *A, float *B, float *C, int nx, int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; int idx = iy * nx + ix; if (ix < nx && iy < ny) { C[idx] = A[idx] + B[idx]; } } int main(int argc, char *argv[]) { int N = 1 << 10; int nx = 1 << 5; int ny = 1 << 5; float *h_A = NULL; float *h_B = NULL; float *h_C = NULL; float *gpu_result = NULL; float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; // allocate the memory on CPU h_A = (float *)malloc(sizeof(float)*N); h_B = (float *)malloc(sizeof(float)*N); h_C = (float *)malloc(sizeof(float)*N); gpu_result = (float *)malloc(sizeof(float)*N); memset(h_A, 0, sizeof(float)*N); memset(h_B, 0, sizeof(float)*N); memset(h_C, 0, sizeof(float)*N); memset(gpu_result, 0, sizeof(float)*N); // allocate the memory on GPU hipMalloc((float **)&d_A, sizeof(float)*N); hipMalloc((float **)&d_B, sizeof(float)*N); hipMalloc((float **)&d_C, sizeof(float)*N); //hipMemset(d_A, 0, N); //hipMemset(d_B, 0, N); //hipMemset(d_C, 0, N); dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // make the initialization InitMatrix(h_A, 1 << 5, 1 << 5); InitMatrix(h_B, 1 << 5, 1 << 5); // transfer the data from CPU to GPU hipMemcpy(d_A, h_A, sizeof(float)*N, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, sizeof(float)*N, hipMemcpyHostToDevice); // add the matrix SumMatrixOnCPU(h_A, h_B, h_C, 1 << 5, 1 << 5); // sync SumMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, 1 << 5, 1 << 5); hipDeviceSynchronize(); // transfer the data from GPU to CPU hipMemcpy(gpu_result, d_C, sizeof(float)*N, hipMemcpyDeviceToHost); PrintMatrix(h_C, 1 << 5, 1 << 5); PrintMatrix(gpu_result, 1 << 5, 1 << 5); // free the memory hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); free(gpu_result); return 0; }
a912ea028b7be3c837bbb8ed810ef373ed7cc4e9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #define _INIT_MATRIX_(ret_type, func_name) ret_type func_name #define _PRINT_MATRIX_(ret_type, func_name) ret_type func_name // initialize the matrix _INIT_MATRIX_(void, InitMatrix)(float *A, int nx, int ny) { int i, j; float cnt = 0; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { A[i*nx + j] = ++cnt; } } } // print the matrix _PRINT_MATRIX_(void, PrintMatrix)(float *A, int nx, int ny) { int i, j; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { printf("%.2f ", A[i*nx + j]); } printf("\n"); } } // add matrix on CPU void SumMatrixOnCPU(float *A, float *B, float *C, int nx, int ny) { int i, j; for (i = 0; i < nx; i++) { for (j = 0; j < ny; j++) { C[i*nx + j] = A[i*nx + j] + B[i*nx + j]; } } } // add the matrix on GPU __global__ void SumMatrixOnGPU(float *A, float *B, float *C, int nx, int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; int idx = iy * nx + ix; if (ix < nx && iy < ny) { C[idx] = A[idx] + B[idx]; } } int main(int argc, char *argv[]) { int N = 1 << 10; int nx = 1 << 5; int ny = 1 << 5; float *h_A = NULL; float *h_B = NULL; float *h_C = NULL; float *gpu_result = NULL; float *d_A = NULL; float *d_B = NULL; float *d_C = NULL; // allocate the memory on CPU h_A = (float *)malloc(sizeof(float)*N); h_B = (float *)malloc(sizeof(float)*N); h_C = (float *)malloc(sizeof(float)*N); gpu_result = (float *)malloc(sizeof(float)*N); memset(h_A, 0, sizeof(float)*N); memset(h_B, 0, sizeof(float)*N); memset(h_C, 0, sizeof(float)*N); memset(gpu_result, 0, sizeof(float)*N); // allocate the memory on GPU cudaMalloc((float **)&d_A, sizeof(float)*N); cudaMalloc((float **)&d_B, sizeof(float)*N); cudaMalloc((float **)&d_C, sizeof(float)*N); //cudaMemset(d_A, 0, N); //cudaMemset(d_B, 0, N); //cudaMemset(d_C, 0, N); dim3 block(32, 32); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); // make the initialization InitMatrix(h_A, 1 << 5, 1 << 5); InitMatrix(h_B, 1 << 5, 1 << 5); // transfer the data from CPU to GPU cudaMemcpy(d_A, h_A, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeof(float)*N, cudaMemcpyHostToDevice); // add the matrix SumMatrixOnCPU(h_A, h_B, h_C, 1 << 5, 1 << 5); // sync SumMatrixOnGPU << <grid, block >> >(d_A, d_B, d_C, 1 << 5, 1 << 5); cudaDeviceSynchronize(); // transfer the data from GPU to CPU cudaMemcpy(gpu_result, d_C, sizeof(float)*N, cudaMemcpyDeviceToHost); PrintMatrix(h_C, 1 << 5, 1 << 5); PrintMatrix(gpu_result, 1 << 5, 1 << 5); // free the memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); free(gpu_result); return 0; }
2c87002221f89df6682462cfa437c27ea5d9022a.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** * * (C) Copyright 2007 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <hip/hip_runtime.h> /* #define HIP_ERRCK \ { hipError_t err = hipGetLastError(); \ if (err) fprintf(stderr, "HIP error: %s\n", hipGetErrorString(err)); \ } */ /* // Place and Transition are implicitly included in the code // as the grid is a fixed one typedef struct { float mark; } Place; typedef struct { int from1, from2; int to1, to2; } Transition; // this starts from row 0 and col 0 P(r,c) -> T(r,c) -> P(r,c+1) -> | | | \/ \/ \/ T(r+1,c-1)-> P(r+1,c) -> T(r+1,c) -> | | | \/ \/ \/ P(r+2,c) -> T(r+2,c) -> P(r+2,c+1)-> | | | \/ \/ \/ T(r+3,c-1)-> P(r+3,c) -> T(r+3,c)-> | | | \/ \/ \/ */ #include "rand_gen.cu" #include "petri_kernel.cu" static int N, s, t, N2, NSQUARE2; uint32 host_mt[MERS_N]; void* AllocateDeviceMemory(int size); void CopyFromDeviceMemory(void* h_p, void* d_p, int size); void CopyFromHostMemory(void* d_p, void* h_p, int size); void FreeDeviceMemory(void* mem); void PetrinetOnDevice(long long &ktime); void compute_statistics(); float results[4]; float* h_vars; int* h_maxs; long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } int main(int argc, char** argv) { if (argc<4) { printf("Usage: petri n s t\n" "n: the place-transition grid is 2nX2n\n" "s: the maximum steps in a trajectory\n" "t: number of trajectories\n"); return -1; } N = atoi(argv[1]); if (N<1) return -1; s = atoi(argv[2]); if (s<1) return -1; t = atoi(argv[3]); if (t<1) return -1; N2 = N+N; NSQUARE2 = N*N2; h_vars = (float*)malloc(t*sizeof(float)); h_maxs = (int*)malloc(t*sizeof(int)); // compute the simulation on the GPU long long ktime = 0; auto start = get_time(); PetrinetOnDevice(ktime); auto end = get_time(); printf("Total kernel execution time: %.2f s\n", ktime / 1e6f); printf("Total device execution time: %.2f s\n", (end - start) / 1e6f); compute_statistics(); free(h_vars); free(h_maxs); printf("petri N=%d s=%d t=%d\n", N, s, t); printf("mean_vars: %f var_vars: %f\n", results[0], results[1]); printf("mean_maxs: %f var_maxs: %f\n", results[2], results[3]); return 0; } void compute_statistics() { float sum = 0; float sum_vars = 0; float sum_max = 0; float sum_max_vars = 0; int i; for (i=0; i<t; i++) { sum += h_vars[i]; sum_vars += h_vars[i]*h_vars[i]; sum_max += h_maxs[i]; sum_max_vars += h_maxs[i]*h_maxs[i]; } results[0] = sum/t; results[1] = sum_vars/t - results[0]*results[0]; results[2] = sum_max/t; results[3] = sum_max_vars/t - results[2]*results[2]; } void PetrinetOnDevice(long long &time) { // Allocate memory int i; int unit_size = NSQUARE2*(sizeof(int)+sizeof(char))+ sizeof(float)+sizeof(int); int block_num = MAX_DEVICE_MEM/unit_size; printf("Number of thread blocks: %d\n", block_num); int *p_hmaxs; float *p_hvars; int* g_places; float* g_vars; int* g_maxs; g_places = (int*)AllocateDeviceMemory((unit_size- sizeof(float) - sizeof(int))*block_num); g_vars = (float*)AllocateDeviceMemory(block_num*sizeof(float)); g_maxs = (int*)AllocateDeviceMemory(block_num*sizeof(int)); // Setup the execution configuration dim3 grid(block_num); // number of blocks dim3 threads(256); // each block has 256 threads p_hmaxs = h_maxs; p_hvars = h_vars; // Launch the device computation threads! for (i = 0; i<t-block_num; i+=block_num) { hipDeviceSynchronize(); auto start = get_time(); hipLaunchKernelGGL(( PetrinetKernel), dim3(grid), dim3(threads), 0, 0, g_places, g_vars, g_maxs, N, s, 5489*(i+1)); hipDeviceSynchronize(); auto end = get_time(); time += end - start; CopyFromDeviceMemory(p_hmaxs, g_maxs, block_num*sizeof(int)); CopyFromDeviceMemory(p_hvars, g_vars, block_num*sizeof(float)); p_hmaxs += block_num; p_hvars += block_num; } dim3 grid1(t-i); hipDeviceSynchronize(); auto start = get_time(); hipLaunchKernelGGL(( PetrinetKernel), dim3(grid1), dim3(threads), 0, 0, g_places, g_vars, g_maxs, N, s, 5489*(i+1)); hipDeviceSynchronize(); auto end = get_time(); time += end - start; // Read result from the device CopyFromDeviceMemory(p_hmaxs, g_maxs, (t-i)*sizeof(int)); CopyFromDeviceMemory(p_hvars, g_vars, (t-i)*sizeof(float)); // Free device matrices FreeDeviceMemory(g_places); FreeDeviceMemory(g_vars); FreeDeviceMemory(g_maxs); } // Allocate a device matrix of same size as M. void* AllocateDeviceMemory(int size) { int* mem; hipMalloc((void**)&mem, size); return mem; } // Copy device memory to host memory void CopyFromDeviceMemory(void* h_p, void* d_p, int size) { hipMemcpy(h_p, d_p, size, hipMemcpyDeviceToHost); //HIP_ERRCK } // Copy device memory from host memory void CopyFromHostMemory(void* d_p, void* h_p, int size) { hipMemcpy(d_p, h_p, size, hipMemcpyHostToDevice); //HIP_ERRCK } // Free a device matrix. void FreeDeviceMemory(void* mem) { if (mem!=NULL) hipFree(mem); //HIP_ERRCK }
2c87002221f89df6682462cfa437c27ea5d9022a.cu
/*************************************************************************** * * (C) Copyright 2007 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <hip/hip_runtime.h> /* #define HIP_ERRCK \ { hipError_t err = hipGetLastError(); \ if (err) fprintf(stderr, "HIP error: %s\n", hipGetErrorString(err)); \ } */ /* // Place and Transition are implicitly included in the code // as the grid is a fixed one typedef struct { float mark; } Place; typedef struct { int from1, from2; int to1, to2; } Transition; // this starts from row 0 and col 0 P(r,c) -> T(r,c) -> P(r,c+1) -> | | | \/ \/ \/ T(r+1,c-1)-> P(r+1,c) -> T(r+1,c) -> | | | \/ \/ \/ P(r+2,c) -> T(r+2,c) -> P(r+2,c+1)-> | | | \/ \/ \/ T(r+3,c-1)-> P(r+3,c) -> T(r+3,c)-> | | | \/ \/ \/ */ #include "rand_gen.cu" #include "petri_kernel.cu" static int N, s, t, N2, NSQUARE2; uint32 host_mt[MERS_N]; void* AllocateDeviceMemory(int size); void CopyFromDeviceMemory(void* h_p, void* d_p, int size); void CopyFromHostMemory(void* d_p, void* h_p, int size); void FreeDeviceMemory(void* mem); void PetrinetOnDevice(long long &ktime); void compute_statistics(); float results[4]; float* h_vars; int* h_maxs; long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; } int main(int argc, char** argv) { if (argc<4) { printf("Usage: petri n s t\n" "n: the place-transition grid is 2nX2n\n" "s: the maximum steps in a trajectory\n" "t: number of trajectories\n"); return -1; } N = atoi(argv[1]); if (N<1) return -1; s = atoi(argv[2]); if (s<1) return -1; t = atoi(argv[3]); if (t<1) return -1; N2 = N+N; NSQUARE2 = N*N2; h_vars = (float*)malloc(t*sizeof(float)); h_maxs = (int*)malloc(t*sizeof(int)); // compute the simulation on the GPU long long ktime = 0; auto start = get_time(); PetrinetOnDevice(ktime); auto end = get_time(); printf("Total kernel execution time: %.2f s\n", ktime / 1e6f); printf("Total device execution time: %.2f s\n", (end - start) / 1e6f); compute_statistics(); free(h_vars); free(h_maxs); printf("petri N=%d s=%d t=%d\n", N, s, t); printf("mean_vars: %f var_vars: %f\n", results[0], results[1]); printf("mean_maxs: %f var_maxs: %f\n", results[2], results[3]); return 0; } void compute_statistics() { float sum = 0; float sum_vars = 0; float sum_max = 0; float sum_max_vars = 0; int i; for (i=0; i<t; i++) { sum += h_vars[i]; sum_vars += h_vars[i]*h_vars[i]; sum_max += h_maxs[i]; sum_max_vars += h_maxs[i]*h_maxs[i]; } results[0] = sum/t; results[1] = sum_vars/t - results[0]*results[0]; results[2] = sum_max/t; results[3] = sum_max_vars/t - results[2]*results[2]; } void PetrinetOnDevice(long long &time) { // Allocate memory int i; int unit_size = NSQUARE2*(sizeof(int)+sizeof(char))+ sizeof(float)+sizeof(int); int block_num = MAX_DEVICE_MEM/unit_size; printf("Number of thread blocks: %d\n", block_num); int *p_hmaxs; float *p_hvars; int* g_places; float* g_vars; int* g_maxs; g_places = (int*)AllocateDeviceMemory((unit_size- sizeof(float) - sizeof(int))*block_num); g_vars = (float*)AllocateDeviceMemory(block_num*sizeof(float)); g_maxs = (int*)AllocateDeviceMemory(block_num*sizeof(int)); // Setup the execution configuration dim3 grid(block_num); // number of blocks dim3 threads(256); // each block has 256 threads p_hmaxs = h_maxs; p_hvars = h_vars; // Launch the device computation threads! for (i = 0; i<t-block_num; i+=block_num) { hipDeviceSynchronize(); auto start = get_time(); PetrinetKernel<<<grid, threads>>> (g_places, g_vars, g_maxs, N, s, 5489*(i+1)); hipDeviceSynchronize(); auto end = get_time(); time += end - start; CopyFromDeviceMemory(p_hmaxs, g_maxs, block_num*sizeof(int)); CopyFromDeviceMemory(p_hvars, g_vars, block_num*sizeof(float)); p_hmaxs += block_num; p_hvars += block_num; } dim3 grid1(t-i); hipDeviceSynchronize(); auto start = get_time(); PetrinetKernel<<<grid1, threads>>> (g_places, g_vars, g_maxs, N, s, 5489*(i+1)); hipDeviceSynchronize(); auto end = get_time(); time += end - start; // Read result from the device CopyFromDeviceMemory(p_hmaxs, g_maxs, (t-i)*sizeof(int)); CopyFromDeviceMemory(p_hvars, g_vars, (t-i)*sizeof(float)); // Free device matrices FreeDeviceMemory(g_places); FreeDeviceMemory(g_vars); FreeDeviceMemory(g_maxs); } // Allocate a device matrix of same size as M. void* AllocateDeviceMemory(int size) { int* mem; hipMalloc((void**)&mem, size); return mem; } // Copy device memory to host memory void CopyFromDeviceMemory(void* h_p, void* d_p, int size) { hipMemcpy(h_p, d_p, size, hipMemcpyDeviceToHost); //HIP_ERRCK } // Copy device memory from host memory void CopyFromHostMemory(void* d_p, void* h_p, int size) { hipMemcpy(d_p, h_p, size, hipMemcpyHostToDevice); //HIP_ERRCK } // Free a device matrix. void FreeDeviceMemory(void* mem) { if (mem!=NULL) hipFree(mem); //HIP_ERRCK }
6c65926df7ab436a242b8eae2ea4c2a6aa476063.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../shared/timer.hpp" #include "../shared/tigr_utilities.hpp" #include "../shared/graph.hpp" #include "../shared/virtual_graph.hpp" #include "../shared/globals.hpp" #include "../shared/argument_parsing.hpp" #include "../shared/gpu_error_check.cuh" __global__ void kernel(unsigned int numParts, unsigned int *nodePointer, PartPointer *partNodePointer, unsigned int *edgeList, unsigned int *dist, bool *finished, bool *label1, bool *label2) { unsigned int partId = blockDim.x * blockIdx.x + threadIdx.x; if(partId < numParts) { unsigned int id = partNodePointer[partId].node; unsigned int part = partNodePointer[partId].part; if(label1[id] == false) return; unsigned int sourceWeight = dist[id]; //if (sourceWeight != DIST_INFINITY) //{ unsigned int thisPointer = nodePointer[id]; unsigned int degree = edgeList[thisPointer]; //int thisDegree = Part_Size; //int temp = degree - part*Part_Size; //if(temp <= Part_Size) // thisDegree = temp; unsigned int numParts; if(degree % Part_Size == 0) numParts = degree / Part_Size ; else numParts = degree / Part_Size + 1; //printf("id = %d degree = %d \n", id, thisDegree); unsigned int end; unsigned int w8; unsigned int finalDist; unsigned int thisEdgeW8; unsigned int ofs = thisPointer + 2*part + 1; for(int i=0; i<Part_Size; i++) { if(part + i*numParts >= degree) break; end = ofs + i*numParts*2; w8 = end + 1; thisEdgeW8 = edgeList[w8]; //if(sourceWeight > thisEdgeW8) // finalDist = thisEdgeW8; //else // finalDist = sourceWeight; finalDist = min(thisEdgeW8, sourceWeight); if(finalDist > dist[edgeList[end]]) { atomicMax(&dist[edgeList[end]] , finalDist); *finished = false; label2[edgeList[end]] = true; } } //} //label1[id] = false; } } __global__ void clearLabel(bool *label, unsigned int size) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < size) label[id] = false; } int main(int argc, char** argv) { ArgumentParser arguments(argc, argv, true, false); Graph graph(arguments.input, true, arguments.printIntermediateResults); graph.ReadGraph(); VirtualGraph vGraph(graph); vGraph.MakeGraph(); uint num_nodes = graph.num_nodes; uint num_edges = graph.num_edges; if(arguments.hasDeviceID) hipSetDevice(arguments.deviceID); hipFree(0); unsigned int *dist; dist = new unsigned int[num_nodes]; bool *label1; bool *label2; label1 = new bool[num_nodes]; label2 = new bool[num_nodes]; for(int i=0; i<num_nodes; i++) { dist[i] = 0; label1[i] = false; label2[i] = false; } dist[arguments.sourceNode] = DIST_INFINITY; label1[arguments.sourceNode] = true; unsigned int *d_nodePointer; unsigned int *d_edgeList; unsigned int *d_dist; PartPointer *d_partNodePointer; bool *d_label1; bool *d_label2; bool finished; bool *d_finished; gpuErrorcheck(hipMalloc(&d_nodePointer, num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_edgeList, (2*num_edges + num_nodes) * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_dist, num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool))); gpuErrorcheck(hipMalloc(&d_label1, num_nodes * sizeof(bool))); gpuErrorcheck(hipMalloc(&d_label2, num_nodes * sizeof(bool))); gpuErrorcheck(hipMalloc(&d_partNodePointer, vGraph.numParts * sizeof(PartPointer))); gpuErrorcheck(hipMemcpy(d_nodePointer, vGraph.nodePointer, num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(d_edgeList, vGraph.edgeList, (2*num_edges + num_nodes) * sizeof(unsigned int), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(d_dist, dist, num_nodes * sizeof(unsigned int), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(d_label1, label1, num_nodes * sizeof(bool), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(d_label2, label2, num_nodes * sizeof(bool), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(d_partNodePointer, vGraph.partNodePointer, vGraph.numParts * sizeof(PartPointer), hipMemcpyHostToDevice)); Timer t; t.Start(); int itr = 0; do { itr++; finished = true; gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice)); if(itr % 2 == 1) { hipLaunchKernelGGL(( kernel), dim3(vGraph.numParts/512 + 1) , dim3(512) , 0, 0, vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, d_label1, d_label2); hipLaunchKernelGGL(( clearLabel), dim3(num_nodes/512 + 1) , dim3(512) , 0, 0, d_label1, num_nodes); } else { hipLaunchKernelGGL(( kernel), dim3(vGraph.numParts/512 + 1) , dim3(512) , 0, 0, vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, d_label2, d_label1); hipLaunchKernelGGL(( clearLabel), dim3(num_nodes/512 + 1) , dim3(512) , 0, 0, d_label2, num_nodes); } //getLastCudaError("Kernel execution failed\n"); gpuErrorcheck( hipPeekAtLastError() ); gpuErrorcheck( hipDeviceSynchronize() ); //cout << itr << endl; gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost)); } while (!(finished)); cout << "Number of iterations = " << itr << endl; float runtime = t.Finish(); cout << "Processing finished in " << runtime << " (ms).\n"; gpuErrorcheck(hipMemcpy(dist, d_dist, num_nodes*sizeof(unsigned int), hipMemcpyDeviceToHost)); if(num_nodes < 30){ utilities::PrintResults(dist, num_nodes); } else{ utilities::PrintResults(dist, 30); } if(arguments.hasOutput) utilities::SaveResults(arguments.output, dist, num_nodes); gpuErrorcheck(hipFree(d_nodePointer)); gpuErrorcheck(hipFree(d_edgeList)); gpuErrorcheck(hipFree(d_dist)); gpuErrorcheck(hipFree(d_finished)); gpuErrorcheck(hipFree(d_label1)); gpuErrorcheck(hipFree(d_label2)); gpuErrorcheck(hipFree(d_partNodePointer)); }
6c65926df7ab436a242b8eae2ea4c2a6aa476063.cu
#include "../shared/timer.hpp" #include "../shared/tigr_utilities.hpp" #include "../shared/graph.hpp" #include "../shared/virtual_graph.hpp" #include "../shared/globals.hpp" #include "../shared/argument_parsing.hpp" #include "../shared/gpu_error_check.cuh" __global__ void kernel(unsigned int numParts, unsigned int *nodePointer, PartPointer *partNodePointer, unsigned int *edgeList, unsigned int *dist, bool *finished, bool *label1, bool *label2) { unsigned int partId = blockDim.x * blockIdx.x + threadIdx.x; if(partId < numParts) { unsigned int id = partNodePointer[partId].node; unsigned int part = partNodePointer[partId].part; if(label1[id] == false) return; unsigned int sourceWeight = dist[id]; //if (sourceWeight != DIST_INFINITY) //{ unsigned int thisPointer = nodePointer[id]; unsigned int degree = edgeList[thisPointer]; //int thisDegree = Part_Size; //int temp = degree - part*Part_Size; //if(temp <= Part_Size) // thisDegree = temp; unsigned int numParts; if(degree % Part_Size == 0) numParts = degree / Part_Size ; else numParts = degree / Part_Size + 1; //printf("id = %d degree = %d \n", id, thisDegree); unsigned int end; unsigned int w8; unsigned int finalDist; unsigned int thisEdgeW8; unsigned int ofs = thisPointer + 2*part + 1; for(int i=0; i<Part_Size; i++) { if(part + i*numParts >= degree) break; end = ofs + i*numParts*2; w8 = end + 1; thisEdgeW8 = edgeList[w8]; //if(sourceWeight > thisEdgeW8) // finalDist = thisEdgeW8; //else // finalDist = sourceWeight; finalDist = min(thisEdgeW8, sourceWeight); if(finalDist > dist[edgeList[end]]) { atomicMax(&dist[edgeList[end]] , finalDist); *finished = false; label2[edgeList[end]] = true; } } //} //label1[id] = false; } } __global__ void clearLabel(bool *label, unsigned int size) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < size) label[id] = false; } int main(int argc, char** argv) { ArgumentParser arguments(argc, argv, true, false); Graph graph(arguments.input, true, arguments.printIntermediateResults); graph.ReadGraph(); VirtualGraph vGraph(graph); vGraph.MakeGraph(); uint num_nodes = graph.num_nodes; uint num_edges = graph.num_edges; if(arguments.hasDeviceID) cudaSetDevice(arguments.deviceID); cudaFree(0); unsigned int *dist; dist = new unsigned int[num_nodes]; bool *label1; bool *label2; label1 = new bool[num_nodes]; label2 = new bool[num_nodes]; for(int i=0; i<num_nodes; i++) { dist[i] = 0; label1[i] = false; label2[i] = false; } dist[arguments.sourceNode] = DIST_INFINITY; label1[arguments.sourceNode] = true; unsigned int *d_nodePointer; unsigned int *d_edgeList; unsigned int *d_dist; PartPointer *d_partNodePointer; bool *d_label1; bool *d_label2; bool finished; bool *d_finished; gpuErrorcheck(cudaMalloc(&d_nodePointer, num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_edgeList, (2*num_edges + num_nodes) * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_dist, num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool))); gpuErrorcheck(cudaMalloc(&d_label1, num_nodes * sizeof(bool))); gpuErrorcheck(cudaMalloc(&d_label2, num_nodes * sizeof(bool))); gpuErrorcheck(cudaMalloc(&d_partNodePointer, vGraph.numParts * sizeof(PartPointer))); gpuErrorcheck(cudaMemcpy(d_nodePointer, vGraph.nodePointer, num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(d_edgeList, vGraph.edgeList, (2*num_edges + num_nodes) * sizeof(unsigned int), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(d_dist, dist, num_nodes * sizeof(unsigned int), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(d_label1, label1, num_nodes * sizeof(bool), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(d_label2, label2, num_nodes * sizeof(bool), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(d_partNodePointer, vGraph.partNodePointer, vGraph.numParts * sizeof(PartPointer), cudaMemcpyHostToDevice)); Timer t; t.Start(); int itr = 0; do { itr++; finished = true; gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice)); if(itr % 2 == 1) { kernel<<< vGraph.numParts/512 + 1 , 512 >>>(vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, d_label1, d_label2); clearLabel<<< num_nodes/512 + 1 , 512 >>>(d_label1, num_nodes); } else { kernel<<< vGraph.numParts/512 + 1 , 512 >>>(vGraph.numParts, d_nodePointer, d_partNodePointer, d_edgeList, d_dist, d_finished, d_label2, d_label1); clearLabel<<< num_nodes/512 + 1 , 512 >>>(d_label2, num_nodes); } //getLastCudaError("Kernel execution failed\n"); gpuErrorcheck( cudaPeekAtLastError() ); gpuErrorcheck( cudaDeviceSynchronize() ); //cout << itr << endl; gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost)); } while (!(finished)); cout << "Number of iterations = " << itr << endl; float runtime = t.Finish(); cout << "Processing finished in " << runtime << " (ms).\n"; gpuErrorcheck(cudaMemcpy(dist, d_dist, num_nodes*sizeof(unsigned int), cudaMemcpyDeviceToHost)); if(num_nodes < 30){ utilities::PrintResults(dist, num_nodes); } else{ utilities::PrintResults(dist, 30); } if(arguments.hasOutput) utilities::SaveResults(arguments.output, dist, num_nodes); gpuErrorcheck(cudaFree(d_nodePointer)); gpuErrorcheck(cudaFree(d_edgeList)); gpuErrorcheck(cudaFree(d_dist)); gpuErrorcheck(cudaFree(d_finished)); gpuErrorcheck(cudaFree(d_label1)); gpuErrorcheck(cudaFree(d_label2)); gpuErrorcheck(cudaFree(d_partNodePointer)); }
c145a65ba73114e8407b56b8449c15b59511a289.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <iostream> #include <vector> #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/device/gemm_batched.h" #pragma warning( disable : 4503) /* This example demonstrates how to use cutlass to compute a batched strided gemm. In this example, both A and B matrix are non-transpose and column major matrix batched_C = batched_A x batched_B As an example, matrix C can be seen as ----------------------------------------------------------- (0,0,0) | (0,0,1) | (0,0,2) | (1,0,0) | (1,0,1) | (1,0,2) | ----------------------------------------------------------- (0,1,0) | (0,1,1) | (0,1,2) | (1,1,0) | (1,1,1) | (1,1,2) | ----------------------------------------------------------- (0,2,0) | (0,2,1) | (0,2,2) | (1,2,0) | (1,2,1) | (1,2,2) | ----------------------------------------------------------- (0,3,0) | (0,3,1) | (0,3,2) | (1,3,0) | (1,3,1) | (1,3,2) | ----------------------------------------------------------- (0,4,0) | (0,4,1) | (0,4,2) | (1,4,0) | (1,4,1) | (1,4,2) | ----------------------------------------------------------- (0,5,0) | (0,5,1) | (0,5,2) | (1,5,0) | (1,5,1) | (1,5,2) | ----------------------------------------------------------- batch 0 | batch 1 where we denote each element with (batch_idx, row_idx, column_idx) In this example, batch size is 2, M is 6 and N is 3 The stride (batch_stride_C) between the first element of two batches is ldc * n matrix A can be seen as --------------------------------------- (0,0,0) | (0,0,1) | (1,0,0) | (1,0,1) | --------------------------------------- (0,1,0) | (0,1,1) | (1,1,0) | (1,1,1) | --------------------------------------- (0,2,0) | (0,2,1) | (1,2,0) | (1,2,1) | --------------------------------------- (0,3,0) | (0,3,1) | (1,3,0) | (1,3,1) | --------------------------------------- (0,4,0) | (0,4,1) | (1,4,0) | (1,4,1) | --------------------------------------- (0,5,0) | (0,5,1) | (1,5,0) | (1,5,1) | --------------------------------------- batch 0 | batch 1 , where batch size is 2, M is 6 and K is 2 The stride (batch_stride_B) between the first element of two batches is lda * k matrix B can be seen as ----------------------------- (0,0,0) | (0,0,1) | (0,0,2) | ----------------------------- batch 0 (0,1,0) | (0,1,1) | (0,1,2) | ------------------------------------- (1,0,0) | (1,0,1) | (1,0,2) | ----------------------------- batch 1 (1,1,0) | (1,1,1) | (1,1,2) | ----------------------------- , where the batch size is 2, N is 3 and K is 2 The stride (batch_stride_C) between the first element of two batches is k */ hipError_t cutlass_strided_batched_sgemm( int m, int n, int k, float alpha, float const *A, int lda, long long int batch_stride_A, float const *B, int ldb, long long int batch_stride_B, float *C, int ldc, long long int batch_stride_C, float beta, int batch_count) { using Gemm = cutlass::gemm::device::GemmBatched< float, cutlass::layout::ColumnMajor, float, cutlass::layout::ColumnMajor, float, cutlass::layout::ColumnMajor >; Gemm gemm_op; cutlass::Status status = gemm_op({ {m, n, k}, {A, lda}, batch_stride_A, {B, ldb}, batch_stride_B, {C, ldc}, batch_stride_C, {C, ldc}, batch_stride_C, {alpha, beta}, batch_count }); if (status != cutlass::Status::kSuccess) { return hipErrorUnknown; } return hipSuccess; } template<typename T> hipError_t strided_batched_gemm_nn_reference( int m, int n, int k, T alpha, std::vector<T> const &A, int lda, long long int batch_stride_A, std::vector<T> const &B, int ldb, long long int batch_stride_B, std::vector<T> &C, int ldc, long long int batch_stride_C, T beta, int batch_count) { /* strided batched gemm NN */ hipError_t result = hipSuccess; if (A.size() < lda * k * batch_count) { std::cout << "the size of A is too small" << std::endl; return hipErrorInvalidValue; } if (B.size() < ldb * n) { std::cout << "the size of B is too small" << std::endl; return hipErrorInvalidValue; } if (C.size() < ldc * n * batch_count) { std::cout << "the size of C is too small" << std::endl; return hipErrorInvalidValue; } for (int batch_idx = 0; batch_idx < batch_count; batch_idx++) { for (int n_idx = 0; n_idx < n; n_idx++) { for (int m_idx = 0; m_idx < m; m_idx++) { T accum = beta * C[batch_idx * batch_stride_C + n_idx * ldc + m_idx]; for (int k_idx = 0; k_idx < k; k_idx++) { accum += alpha * A[batch_idx * batch_stride_A + k_idx * lda + m_idx] * B[batch_idx * batch_stride_B + n_idx * ldb + k_idx]; } C[batch_idx * batch_stride_C + n_idx * ldc + m_idx] = accum; } } } return result; } int main() { // Arbitrary problem size int const m = 520; int const n = 219; int const k = 129; int const batch_count = 17; // A, B are non-transpose, column major int const lda = m; int const ldb = k * batch_count; int const ldc = m; int const count_A = batch_count * lda * k; int const count_B = ldb * n; int const count_C = batch_count * ldc * n; // the memory is batched along K dimension long long int batch_stride_A = static_cast<long long int>(lda) * static_cast<long long int>(k); long long int batch_stride_B = static_cast<long long int>(k); long long int batch_stride_C = static_cast<long long int>(ldc) * static_cast<long long int>(n); // alpha and beta float alpha = 1.0f; float beta = 2.0f; hipError_t result = hipSuccess; // allocate the host memory std::vector<float> host_A(count_A); std::vector<float> host_B(count_B); std::vector<float> host_C(count_C); std::vector<float> result_C(count_C); // allocate the device memory float *A; float *B; float *C; result = hipMalloc(&A, count_A * sizeof(float)); if (result != hipSuccess) { std::cerr << "hipMalloc result = " << result << std::endl; return result; } result = hipMalloc(&B, count_B * sizeof(float)); if (result != hipSuccess) { std::cerr << "hipMalloc result = " << result << std::endl; return result; } result = hipMalloc(&C, count_C * sizeof(float)); if (result != hipSuccess) { std::cerr << "hipMalloc result = " << result << std::endl; return result; } // Limit range to avoid floating-point errors int const kRange = 8; // fill A for (int b_idx = 0; b_idx < batch_count; b_idx++) { for (int col_idx = 0; col_idx < k; col_idx++) { for (int row_idx = 0; row_idx < m; row_idx++) { host_A[row_idx + col_idx * lda + b_idx * lda * k] = static_cast<float>((row_idx + col_idx * lda + b_idx * lda * k) % kRange); } } } // fill B for (int b_idx = 0; b_idx < batch_count; b_idx++) { for (int col_idx = 0; col_idx < n; col_idx++) { for (int row_idx = 0; row_idx < k; row_idx++) { host_B[row_idx + col_idx * ldb + b_idx * k] = static_cast<float>(((n + k * ldb + batch_count * k) - (row_idx + col_idx * ldb + b_idx * k)) % kRange); } } } // fill C for (int b_idx = 0; b_idx < batch_count; b_idx++) { for (int col_idx = 0; col_idx < n; col_idx++) { for (int row_idx = 0; row_idx < m; row_idx++) { host_C[row_idx + col_idx * ldc + b_idx * ldc * n] = 1.f; } } } // ref memory std::vector<float> ref_A(host_A); std::vector<float> ref_B(host_B); std::vector<float> ref_C(host_C); // copy host memory to device result = hipMemcpy(A, host_A.data(), count_A * sizeof(float), hipMemcpyHostToDevice); if (result != hipSuccess) { std::cerr << "hipMemcpy result = " << result << std::endl; return result; } result = hipMemcpy(B, host_B.data(), count_B * sizeof(float), hipMemcpyHostToDevice); if (result != hipSuccess) { std::cerr << "hipMemcpy result = " << result << std::endl; return result; } result = hipMemcpy(C, host_C.data(), count_C * sizeof(float), hipMemcpyHostToDevice); if (result != hipSuccess) { std::cerr << "hipMemcpy result = " << result << std::endl; return result; } // run cutlass result = cutlass_strided_batched_sgemm( m, n, k, alpha, A, lda, batch_stride_A, B, ldb, batch_stride_B, C, ldc, batch_stride_C, beta, batch_count); if (result != hipSuccess) return result; // copy device memory to host result = hipMemcpy(result_C.data(), C, count_C * sizeof(float), hipMemcpyDeviceToHost); if (result != hipSuccess) { std::cerr << "hipMemcpy result = " << result << std::endl; return result; } //compare with reference code result = strided_batched_gemm_nn_reference(m, n, k, alpha, ref_A, lda, batch_stride_A, ref_B, ldb, batch_stride_B, ref_C, ldc, batch_stride_C, beta, batch_count); if (result != 0) return result; // Expect bit-level accuracy for this simple example if (ref_C != result_C) { std::cout << "CUTLASS strided batched gemm does not run correctly" << std::endl; return hipErrorUnknown; } // free memory result = hipFree(A); if (result != hipSuccess) { std::cerr << "hipFree result = " << result << std::endl; return result; } result = hipFree(B); if (result != hipSuccess) { std::cerr << "hipFree result = " << result << std::endl; return result; } result = hipFree(C); if (result != hipSuccess) { std::cerr << "hipFree result = " << result << std::endl; return result; } if (result == hipSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == hipSuccess ? 0 : -1; }
c145a65ba73114e8407b56b8449c15b59511a289.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ #include <iostream> #include <vector> #include "cutlass/cutlass.h" #include "cutlass/layout/matrix.h" #include "cutlass/gemm/device/gemm_batched.h" #pragma warning( disable : 4503) /* This example demonstrates how to use cutlass to compute a batched strided gemm. In this example, both A and B matrix are non-transpose and column major matrix batched_C = batched_A x batched_B As an example, matrix C can be seen as ----------------------------------------------------------- (0,0,0) | (0,0,1) | (0,0,2) | (1,0,0) | (1,0,1) | (1,0,2) | ----------------------------------------------------------- (0,1,0) | (0,1,1) | (0,1,2) | (1,1,0) | (1,1,1) | (1,1,2) | ----------------------------------------------------------- (0,2,0) | (0,2,1) | (0,2,2) | (1,2,0) | (1,2,1) | (1,2,2) | ----------------------------------------------------------- (0,3,0) | (0,3,1) | (0,3,2) | (1,3,0) | (1,3,1) | (1,3,2) | ----------------------------------------------------------- (0,4,0) | (0,4,1) | (0,4,2) | (1,4,0) | (1,4,1) | (1,4,2) | ----------------------------------------------------------- (0,5,0) | (0,5,1) | (0,5,2) | (1,5,0) | (1,5,1) | (1,5,2) | ----------------------------------------------------------- batch 0 | batch 1 where we denote each element with (batch_idx, row_idx, column_idx) In this example, batch size is 2, M is 6 and N is 3 The stride (batch_stride_C) between the first element of two batches is ldc * n matrix A can be seen as --------------------------------------- (0,0,0) | (0,0,1) | (1,0,0) | (1,0,1) | --------------------------------------- (0,1,0) | (0,1,1) | (1,1,0) | (1,1,1) | --------------------------------------- (0,2,0) | (0,2,1) | (1,2,0) | (1,2,1) | --------------------------------------- (0,3,0) | (0,3,1) | (1,3,0) | (1,3,1) | --------------------------------------- (0,4,0) | (0,4,1) | (1,4,0) | (1,4,1) | --------------------------------------- (0,5,0) | (0,5,1) | (1,5,0) | (1,5,1) | --------------------------------------- batch 0 | batch 1 , where batch size is 2, M is 6 and K is 2 The stride (batch_stride_B) between the first element of two batches is lda * k matrix B can be seen as ----------------------------- (0,0,0) | (0,0,1) | (0,0,2) | ----------------------------- batch 0 (0,1,0) | (0,1,1) | (0,1,2) | ------------------------------------- (1,0,0) | (1,0,1) | (1,0,2) | ----------------------------- batch 1 (1,1,0) | (1,1,1) | (1,1,2) | ----------------------------- , where the batch size is 2, N is 3 and K is 2 The stride (batch_stride_C) between the first element of two batches is k */ cudaError_t cutlass_strided_batched_sgemm( int m, int n, int k, float alpha, float const *A, int lda, long long int batch_stride_A, float const *B, int ldb, long long int batch_stride_B, float *C, int ldc, long long int batch_stride_C, float beta, int batch_count) { using Gemm = cutlass::gemm::device::GemmBatched< float, cutlass::layout::ColumnMajor, float, cutlass::layout::ColumnMajor, float, cutlass::layout::ColumnMajor >; Gemm gemm_op; cutlass::Status status = gemm_op({ {m, n, k}, {A, lda}, batch_stride_A, {B, ldb}, batch_stride_B, {C, ldc}, batch_stride_C, {C, ldc}, batch_stride_C, {alpha, beta}, batch_count }); if (status != cutlass::Status::kSuccess) { return cudaErrorUnknown; } return cudaSuccess; } template<typename T> cudaError_t strided_batched_gemm_nn_reference( int m, int n, int k, T alpha, std::vector<T> const &A, int lda, long long int batch_stride_A, std::vector<T> const &B, int ldb, long long int batch_stride_B, std::vector<T> &C, int ldc, long long int batch_stride_C, T beta, int batch_count) { /* strided batched gemm NN */ cudaError_t result = cudaSuccess; if (A.size() < lda * k * batch_count) { std::cout << "the size of A is too small" << std::endl; return cudaErrorInvalidValue; } if (B.size() < ldb * n) { std::cout << "the size of B is too small" << std::endl; return cudaErrorInvalidValue; } if (C.size() < ldc * n * batch_count) { std::cout << "the size of C is too small" << std::endl; return cudaErrorInvalidValue; } for (int batch_idx = 0; batch_idx < batch_count; batch_idx++) { for (int n_idx = 0; n_idx < n; n_idx++) { for (int m_idx = 0; m_idx < m; m_idx++) { T accum = beta * C[batch_idx * batch_stride_C + n_idx * ldc + m_idx]; for (int k_idx = 0; k_idx < k; k_idx++) { accum += alpha * A[batch_idx * batch_stride_A + k_idx * lda + m_idx] * B[batch_idx * batch_stride_B + n_idx * ldb + k_idx]; } C[batch_idx * batch_stride_C + n_idx * ldc + m_idx] = accum; } } } return result; } int main() { // Arbitrary problem size int const m = 520; int const n = 219; int const k = 129; int const batch_count = 17; // A, B are non-transpose, column major int const lda = m; int const ldb = k * batch_count; int const ldc = m; int const count_A = batch_count * lda * k; int const count_B = ldb * n; int const count_C = batch_count * ldc * n; // the memory is batched along K dimension long long int batch_stride_A = static_cast<long long int>(lda) * static_cast<long long int>(k); long long int batch_stride_B = static_cast<long long int>(k); long long int batch_stride_C = static_cast<long long int>(ldc) * static_cast<long long int>(n); // alpha and beta float alpha = 1.0f; float beta = 2.0f; cudaError_t result = cudaSuccess; // allocate the host memory std::vector<float> host_A(count_A); std::vector<float> host_B(count_B); std::vector<float> host_C(count_C); std::vector<float> result_C(count_C); // allocate the device memory float *A; float *B; float *C; result = cudaMalloc(&A, count_A * sizeof(float)); if (result != cudaSuccess) { std::cerr << "cudaMalloc result = " << result << std::endl; return result; } result = cudaMalloc(&B, count_B * sizeof(float)); if (result != cudaSuccess) { std::cerr << "cudaMalloc result = " << result << std::endl; return result; } result = cudaMalloc(&C, count_C * sizeof(float)); if (result != cudaSuccess) { std::cerr << "cudaMalloc result = " << result << std::endl; return result; } // Limit range to avoid floating-point errors int const kRange = 8; // fill A for (int b_idx = 0; b_idx < batch_count; b_idx++) { for (int col_idx = 0; col_idx < k; col_idx++) { for (int row_idx = 0; row_idx < m; row_idx++) { host_A[row_idx + col_idx * lda + b_idx * lda * k] = static_cast<float>((row_idx + col_idx * lda + b_idx * lda * k) % kRange); } } } // fill B for (int b_idx = 0; b_idx < batch_count; b_idx++) { for (int col_idx = 0; col_idx < n; col_idx++) { for (int row_idx = 0; row_idx < k; row_idx++) { host_B[row_idx + col_idx * ldb + b_idx * k] = static_cast<float>(((n + k * ldb + batch_count * k) - (row_idx + col_idx * ldb + b_idx * k)) % kRange); } } } // fill C for (int b_idx = 0; b_idx < batch_count; b_idx++) { for (int col_idx = 0; col_idx < n; col_idx++) { for (int row_idx = 0; row_idx < m; row_idx++) { host_C[row_idx + col_idx * ldc + b_idx * ldc * n] = 1.f; } } } // ref memory std::vector<float> ref_A(host_A); std::vector<float> ref_B(host_B); std::vector<float> ref_C(host_C); // copy host memory to device result = cudaMemcpy(A, host_A.data(), count_A * sizeof(float), cudaMemcpyHostToDevice); if (result != cudaSuccess) { std::cerr << "cudaMemcpy result = " << result << std::endl; return result; } result = cudaMemcpy(B, host_B.data(), count_B * sizeof(float), cudaMemcpyHostToDevice); if (result != cudaSuccess) { std::cerr << "cudaMemcpy result = " << result << std::endl; return result; } result = cudaMemcpy(C, host_C.data(), count_C * sizeof(float), cudaMemcpyHostToDevice); if (result != cudaSuccess) { std::cerr << "cudaMemcpy result = " << result << std::endl; return result; } // run cutlass result = cutlass_strided_batched_sgemm( m, n, k, alpha, A, lda, batch_stride_A, B, ldb, batch_stride_B, C, ldc, batch_stride_C, beta, batch_count); if (result != cudaSuccess) return result; // copy device memory to host result = cudaMemcpy(result_C.data(), C, count_C * sizeof(float), cudaMemcpyDeviceToHost); if (result != cudaSuccess) { std::cerr << "cudaMemcpy result = " << result << std::endl; return result; } //compare with reference code result = strided_batched_gemm_nn_reference(m, n, k, alpha, ref_A, lda, batch_stride_A, ref_B, ldb, batch_stride_B, ref_C, ldc, batch_stride_C, beta, batch_count); if (result != 0) return result; // Expect bit-level accuracy for this simple example if (ref_C != result_C) { std::cout << "CUTLASS strided batched gemm does not run correctly" << std::endl; return cudaErrorUnknown; } // free memory result = cudaFree(A); if (result != cudaSuccess) { std::cerr << "cudaFree result = " << result << std::endl; return result; } result = cudaFree(B); if (result != cudaSuccess) { std::cerr << "cudaFree result = " << result << std::endl; return result; } result = cudaFree(C); if (result != cudaSuccess) { std::cerr << "cudaFree result = " << result << std::endl; return result; } if (result == cudaSuccess) { std::cout << "Passed." << std::endl; } // Exit. return result == cudaSuccess ? 0 : -1; }
a1981bc2444ac3d585b804b28da59f5b32ed8df0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /*Kernel for matrix outer product*/ __global__ void k_gemm_f32(float alpha, const float* A, int stride_row_a, int stride_col_a,const float* B, int stride_row_b, int stride_col_b,float* C, int stride_row_c, int stride_col_c){ const int TILE_WIDTH=16; const int VEC_SIZE=4; //multiplies TILE_WIDTH for the b row vectors. 4 means one thread calculated C's entries with a rowlength of 4*16=64 numbers in B. Must be multiple of TILE_WIDTH float Cc[TILE_WIDTH]={0}; //initializes all elements to zero __shared__ float Ac[TILE_WIDTH*TILE_WIDTH]; //buffer that holds columns of a int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; int a_begin=by*TILE_WIDTH*stride_col_a; int a_end=a_begin+stride_col_a;//check if correct int b_begin=bx*TILE_WIDTH*VEC_SIZE*stride_row_b; //we multiply by VEC_SIZE because B's tiles have length TILEWIDTH*VEC_SIZE for (;a_begin < a_end;a_begin+=TILE_WIDTH*stride_row_a){ //Load elements of A into shared memory for (int i=0; i< 4;i++){ Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; } __syncthreads(); const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b]; float* ptrA=Ac; #pragma unroll for (int i=0;i<TILE_WIDTH;i++){ float bv=alpha*ptrB[0]; //this loop could be unrolled for (int j=0;j<TILE_WIDTH;j++){ Cc[j]+=ptrA[j]*bv; } ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A) ptrB+=stride_col_b; } b_begin+=TILE_WIDTH*stride_col_b; __syncthreads(); } int c=stride_col_c*TILE_WIDTH*by+(TILE_WIDTH*VEC_SIZE*bx+tx+TILE_WIDTH*ty)*stride_row_c; for (int i=0;i<TILE_WIDTH;i++){ C[c]+=Cc[i]; c+=stride_col_c; } } //Todo!! /*Kernel for matrix outer product. This version does not require A,B,C to be multiples of the blocksizes*/ __global__ void k_gemm_f32_nonblockmultiple(const int m, const int n, const int k,float alpha, const float* A, int stride_row_a, int stride_col_a,const float* B, int stride_row_b, int stride_col_b,float* C, int stride_row_c, int stride_col_c){ const int TILE_WIDTH=16; const int VEC_SIZE=4; //multiplies TILE_WIDTH for the b row vectors. 4 means one thread calculated with a rowlength of 4*16=64 numbers in B. Must be multiple of TILE_WIDTH float Cc[TILE_WIDTH]={0}; //initializes all elements to zero __shared__ float Ac[TILE_WIDTH*TILE_WIDTH]; //buffer that holds columns of a int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; int qm=m%TILE_WIDTH; //int qn=(VEC_SIZE*TILE_WIDTH)%n; int qk=k%TILE_WIDTH; int rowA=by*TILE_WIDTH; int colB=bx*TILE_WIDTH*VEC_SIZE+TILE_WIDTH*ty+tx; int a_begin=by*TILE_WIDTH*stride_col_a; int b_begin=bx*TILE_WIDTH*VEC_SIZE*stride_row_b; //we multiply by VEC_SIZE because B's tiles have length TILEWIDTH*VEC_SIZE bool does_compute=false; //printf("qk:%d\n",qk); int rk=k/TILE_WIDTH; for (int q=0;q<rk;q++){ //Load elements of A into shared memory //printf("i: %d\n",a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a); if ((tx<k)&&((rowA+TILE_WIDTH-1)<m)){ for (int i=0; i< 4;i++){ //printf("Aci: %d, i: %d and A:%f\n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a,A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]); Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; } } else{ for (int i=0; i< 4;i++){ if((rowA+i*4+ty)<m && (tx<k)){ //printf("is: %f\n",A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]); Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; // printf("is:Ac index: %d, index: %d and A:%f\n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a,A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]); } else{ Ac[i*4+ty+TILE_WIDTH*tx]=0.0; } } } /* for (int i=0;i<TILE_WIDTH*TILE_WIDTH;i++){ Ac[i]=-7; } */ __syncthreads(); /* if (tx==0 && ty==0){ for (int i=0;i<TILE_WIDTH*TILE_WIDTH;i++){ printf("%f\t",Ac[i]); } } */ if (colB>=n){ for (int j=0;j<TILE_WIDTH;j++){ Cc[j]=0.0; } } else{ //printf("Id: %d,%d,%d,%d\n",by,ty,bx,tx); const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b]; float* ptrA=Ac; does_compute=true; #pragma unroll for (int i=0;i<TILE_WIDTH;i++){ float bv=alpha*ptrB[0]; //this loop could be unrolled for (int j=0;j<TILE_WIDTH;j++){ Cc[j]+=ptrA[j]*bv; /* if (ptrA[j]!=0){ printf("%f vs. %f\n",ptrA[j],bv); } */ } ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A) ptrB+=stride_col_b; } b_begin+=TILE_WIDTH*stride_col_b; } a_begin+=TILE_WIDTH*stride_row_a; __syncthreads(); } if (qk>0){ if (tx<qk){ //printf("rowA:%d, ty:%d\n",rowA,ty); a_begin=(by*TILE_WIDTH*stride_col_a)+rk*TILE_WIDTH*stride_row_a; for (int i=0; i< 4;i++){ if((rowA+i*4+ty)<m){ Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; //printf("Ac index2: %d, index: %d and \n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a); } else{ Ac[i*4+ty+TILE_WIDTH*tx]=0.0; } } } else{ for (int i=0; i< 4;i++){ Ac[i*4+ty+TILE_WIDTH*tx]=0.0; } } __syncthreads(); //return; if (colB<n){ // printf("Id: %d,%d,%d,%d\n",by,ty,bx,tx); const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b]; float* ptrA=Ac; does_compute=true; for (int i=0;i<qk;i++){ float bv=alpha*ptrB[0]; //this loop could be unrolled for (int j=0;j<TILE_WIDTH;j++){ Cc[j]+=ptrA[j]*bv; /*if (ptrA[j]!=0){ printf("%f vs2. %f\n",ptrA[j],bv); } */ } ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A) ptrB+=stride_col_b; } } } __syncthreads(); //maybe redundant if (does_compute){ int c=stride_col_c*TILE_WIDTH*by+(TILE_WIDTH*VEC_SIZE*bx+tx+TILE_WIDTH*ty)*stride_row_c; int c_length=((rowA+TILE_WIDTH)<=m)?TILE_WIDTH:qm; for (int i=0;i<c_length;i++){ C[c]+=Cc[i]; c+=stride_col_c; } } } __global__ void k_scal_f32(int m, int n, float beta, float* C, int stride_row_c, int stride_col_c){ const int BLOCK_WIDTH=256; //size of a block const int TILE_WIDTH=64; //size of block per single thread int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; //printf("Bin drin mit : bx %d, tx %d, by %d, ty %d \n",bx,tx,by,ty); float* c_begin=&C[(by*BLOCK_WIDTH+ty*TILE_WIDTH)*stride_col_c+(bx*BLOCK_WIDTH+tx*TILE_WIDTH)*stride_row_c]; if ((((by+1)*BLOCK_WIDTH)<=m) && (((bx+1)*BLOCK_WIDTH)<=n)){ for (int i=0;i<TILE_WIDTH;i++){ for (int j=0;j<TILE_WIDTH;j++){ c_begin[i*stride_col_c+j*stride_row_c]*=beta; } } } else{ int column=by*BLOCK_WIDTH+ty*TILE_WIDTH; for (int i=0;i<TILE_WIDTH;i++){ if (column<m){ int row=bx*BLOCK_WIDTH+tx*TILE_WIDTH; for (int j=0;j<TILE_WIDTH;j++){ if (row<n){ c_begin[i*stride_col_c+j*stride_row_c]*=beta; //printf("Bin hier drin mit %d und %d mit by %d ty %d bx %d tx %d\n",i,j,by,ty,bx,tx); } row=row+1; } } column=column+1; } } } //matrix matrix multiplication __host__ void gemm_f32_blockmultiple(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){ float* A_d; float* B_d; float* C_d; int sizeA=sizeof(float)*m*k; int sizeB=sizeof(float)*n*k; int sizeC=sizeof(float)*m*n; float bsmx=16; float bsmy=4; dim3 threadLayout=dim3(bsmx,bsmy,1); dim3 grid=dim3(ceil(n/(4.0*bsmx)),ceil(m/bsmx),1); hipMalloc((void**) &C_d,sizeC); if (beta==0){ hipMemset(C_d, 0, sizeC); } else{ hipMemcpy((void*) C_d, (void*) C_h, sizeC,hipMemcpyHostToDevice); hipLaunchKernelGGL(( k_scal_f32), dim3(grid),dim3(threadLayout), 0, 0, m,n,beta,C_d,1,n); } if (alpha!=0.0){ hipMalloc((void**) &A_d,sizeA); hipMalloc((void**) &B_d,sizeB); hipError_t copy1=hipMemcpy((void*) A_d, (void*) A_h, sizeA, hipMemcpyHostToDevice); hipError_t copy2=hipMemcpy((void*) B_d, (void*) B_h, sizeB, hipMemcpyHostToDevice); if ((copy1==hipSuccess)&& (copy2==hipSuccess)){ hipLaunchKernelGGL(( k_gemm_f32), dim3(grid),dim3(threadLayout), 0, 0, alpha, A_d, 1, k,B_d,1,n,C_d,1,n); hipMemcpy((void*) C_h, (void*) C_d, sizeC, hipMemcpyDeviceToHost); hipFree(A_d); hipFree(B_d); } } hipFree(C_d); } //General matrix-to-matrix multiplication for 32 bit floats. Input matrices are padded if they are not a multiple of block size bsmx and bsmy __host__ void gemm_f32_nonblockmultiple(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){ float* A_d; float* B_d; float* C_d; float bsmx=16; //blocksize x float bsmy=4; //blocksize y int mB=ceil(m/bsmx)*bsmx; int nB=ceil(n/(4.0*bsmx))*(4.0*bsmx); int kB=ceil(k/bsmx)*bsmx; int sizeCb=sizeof(float)*mB*nB; hipMalloc((void**) &C_d, sizeCb); dim3 threadLayout=dim3(bsmx,bsmy,1); dim3 grid=dim3(ceil(nB/(4.0*bsmx)),ceil(mB/bsmx),1); if (beta==0){ hipMemset(C_d, 0, sizeCb); } else{ hipError_t copy; for (int i=0;i<m;i++){ copy=hipMemcpy((void*) (C_d+i*nB), (void*) (C_h+i*n), sizeof(float)*n,hipMemcpyHostToDevice); } if (copy!=hipSuccess){ printf("Copy fehlgeschlagen\n"); } // printf("Starte nun den Kernel\n"); dim3 threadsize=dim3(4,4,1); dim3 blocksize=dim3(ceil(n/256.0),ceil(m/256.0),1); hipLaunchKernelGGL(( k_scal_f32), dim3(blocksize),dim3(threadsize), 0, 0, m,n,beta,C_d,1,nB); //hipDeviceSynchronize(); } if (alpha!=0.0){ int sizeAb=sizeof(float)*mB*kB; int sizeBb=sizeof(float)*kB*nB; hipMalloc((void**) &A_d,sizeAb); hipMalloc((void**) &B_d,sizeBb); hipMemset(A_d,0.0,sizeAb); hipMemset(B_d,0.0,sizeBb); hipError_t copy1; hipError_t copy2; for (int i=0;i<m;i++){ copy1=hipMemcpy((void*) (A_d+i*kB), (void*) (A_h+i*k), sizeof(float)*k,hipMemcpyHostToDevice); } for (int i=0;i<k;i++){ copy2=hipMemcpy((void*) (B_d+i*nB), (void*) (B_h+i*n), sizeof(float)*n, hipMemcpyHostToDevice); } if ((copy1==hipSuccess)&& (copy2==hipSuccess)){ hipLaunchKernelGGL(( k_gemm_f32), dim3(grid),dim3(threadLayout), 0, 0, alpha, A_d, 1, kB,B_d,1,nB,C_d,1,nB); hipFree(A_d); hipFree(B_d); } } for (int i=0;i<m;i++){ hipError_t copy=hipMemcpy((void*) (C_h+i*n), (void*) (C_d+i*nB),sizeof(float)*n,hipMemcpyDeviceToHost); if (copy!=hipSuccess){ printf("Copy fehlgeschlagen\n"); } } hipFree(C_d); } //General matrix-to-matrix multiplication for 32 bit floats. Input matrices are padded if they are not a multiple of block size bsmx and bsmy __host__ void gemm_f32(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){ if ((alpha==0.0) && (beta==1.0)){ return; } int res1=m%16; int res2=n/(4*4); if ((res1==0)&&(res2==0)){ gemm_f32_blockmultiple(m,n,k,alpha,A_h,B_h,beta,C_h); } else{ // printf("nonblockmultiple\n"); gemm_f32_nonblockmultiple(m,n,k,alpha,A_h,B_h,beta,C_h); } } //General matrix-to-matrix multiplication for 32 bit floats. This assumes that the input parameters are already allocated in device memory __host__ void gemm_f32_device(int m, int n, int k, float alpha, const float* A_d, int stride_row_a, int stride_col_a, const float* B_d, int stride_row_b, int stride_col_b, float beta, float* C_d,int stride_row_c, int stride_col_c){ if ((alpha==0.0) && (beta==1.0)){ return; } float bsmx=16; float bsmy=4; dim3 threadLayout=dim3(bsmx,bsmy,1); dim3 grid=dim3(ceil(n/(4.0*bsmx)),ceil(m/bsmx),1); hipLaunchKernelGGL(( k_scal_f32), dim3(grid),dim3(threadLayout), 0, 0, m,n,beta,C_d,stride_row_c,stride_col_c); if (alpha!=0){ int res1=m%(int)bsmx; int res2=n%(int)bsmx; if ((res1==0)&&(res2==0)){ // printf("gemm blockmultiple\n"); hipLaunchKernelGGL(( k_gemm_f32), dim3(grid),dim3(threadLayout), 0, 0, alpha, A_d, stride_row_a, stride_col_a,B_d,stride_row_b,stride_col_b,C_d,stride_row_c,stride_col_c); } else{ //printf("gemm nonblockmultiple\n"); hipLaunchKernelGGL(( k_gemm_f32_nonblockmultiple), dim3(grid),dim3(threadLayout), 0, 0, m,n,k,alpha, A_d, stride_row_a, stride_col_a,B_d,stride_row_b,stride_col_b,C_d,stride_row_c,stride_col_c); } } }
a1981bc2444ac3d585b804b28da59f5b32ed8df0.cu
#include <stdio.h> /*Kernel for matrix outer product*/ __global__ void k_gemm_f32(float alpha, const float* A, int stride_row_a, int stride_col_a,const float* B, int stride_row_b, int stride_col_b,float* C, int stride_row_c, int stride_col_c){ const int TILE_WIDTH=16; const int VEC_SIZE=4; //multiplies TILE_WIDTH for the b row vectors. 4 means one thread calculated C's entries with a rowlength of 4*16=64 numbers in B. Must be multiple of TILE_WIDTH float Cc[TILE_WIDTH]={0}; //initializes all elements to zero __shared__ float Ac[TILE_WIDTH*TILE_WIDTH]; //buffer that holds columns of a int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; int a_begin=by*TILE_WIDTH*stride_col_a; int a_end=a_begin+stride_col_a;//check if correct int b_begin=bx*TILE_WIDTH*VEC_SIZE*stride_row_b; //we multiply by VEC_SIZE because B's tiles have length TILEWIDTH*VEC_SIZE for (;a_begin < a_end;a_begin+=TILE_WIDTH*stride_row_a){ //Load elements of A into shared memory for (int i=0; i< 4;i++){ Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; } __syncthreads(); const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b]; float* ptrA=Ac; #pragma unroll for (int i=0;i<TILE_WIDTH;i++){ float bv=alpha*ptrB[0]; //this loop could be unrolled for (int j=0;j<TILE_WIDTH;j++){ Cc[j]+=ptrA[j]*bv; } ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A) ptrB+=stride_col_b; } b_begin+=TILE_WIDTH*stride_col_b; __syncthreads(); } int c=stride_col_c*TILE_WIDTH*by+(TILE_WIDTH*VEC_SIZE*bx+tx+TILE_WIDTH*ty)*stride_row_c; for (int i=0;i<TILE_WIDTH;i++){ C[c]+=Cc[i]; c+=stride_col_c; } } //Todo!! /*Kernel for matrix outer product. This version does not require A,B,C to be multiples of the blocksizes*/ __global__ void k_gemm_f32_nonblockmultiple(const int m, const int n, const int k,float alpha, const float* A, int stride_row_a, int stride_col_a,const float* B, int stride_row_b, int stride_col_b,float* C, int stride_row_c, int stride_col_c){ const int TILE_WIDTH=16; const int VEC_SIZE=4; //multiplies TILE_WIDTH for the b row vectors. 4 means one thread calculated with a rowlength of 4*16=64 numbers in B. Must be multiple of TILE_WIDTH float Cc[TILE_WIDTH]={0}; //initializes all elements to zero __shared__ float Ac[TILE_WIDTH*TILE_WIDTH]; //buffer that holds columns of a int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; int qm=m%TILE_WIDTH; //int qn=(VEC_SIZE*TILE_WIDTH)%n; int qk=k%TILE_WIDTH; int rowA=by*TILE_WIDTH; int colB=bx*TILE_WIDTH*VEC_SIZE+TILE_WIDTH*ty+tx; int a_begin=by*TILE_WIDTH*stride_col_a; int b_begin=bx*TILE_WIDTH*VEC_SIZE*stride_row_b; //we multiply by VEC_SIZE because B's tiles have length TILEWIDTH*VEC_SIZE bool does_compute=false; //printf("qk:%d\n",qk); int rk=k/TILE_WIDTH; for (int q=0;q<rk;q++){ //Load elements of A into shared memory //printf("i: %d\n",a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a); if ((tx<k)&&((rowA+TILE_WIDTH-1)<m)){ for (int i=0; i< 4;i++){ //printf("Aci: %d, i: %d and A:%f\n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a,A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]); Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; } } else{ for (int i=0; i< 4;i++){ if((rowA+i*4+ty)<m && (tx<k)){ //printf("is: %f\n",A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]); Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; // printf("is:Ac index: %d, index: %d and A:%f\n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a,A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]); } else{ Ac[i*4+ty+TILE_WIDTH*tx]=0.0; } } } /* for (int i=0;i<TILE_WIDTH*TILE_WIDTH;i++){ Ac[i]=-7; } */ __syncthreads(); /* if (tx==0 && ty==0){ for (int i=0;i<TILE_WIDTH*TILE_WIDTH;i++){ printf("%f\t",Ac[i]); } } */ if (colB>=n){ for (int j=0;j<TILE_WIDTH;j++){ Cc[j]=0.0; } } else{ //printf("Id: %d,%d,%d,%d\n",by,ty,bx,tx); const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b]; float* ptrA=Ac; does_compute=true; #pragma unroll for (int i=0;i<TILE_WIDTH;i++){ float bv=alpha*ptrB[0]; //this loop could be unrolled for (int j=0;j<TILE_WIDTH;j++){ Cc[j]+=ptrA[j]*bv; /* if (ptrA[j]!=0){ printf("%f vs. %f\n",ptrA[j],bv); } */ } ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A) ptrB+=stride_col_b; } b_begin+=TILE_WIDTH*stride_col_b; } a_begin+=TILE_WIDTH*stride_row_a; __syncthreads(); } if (qk>0){ if (tx<qk){ //printf("rowA:%d, ty:%d\n",rowA,ty); a_begin=(by*TILE_WIDTH*stride_col_a)+rk*TILE_WIDTH*stride_row_a; for (int i=0; i< 4;i++){ if((rowA+i*4+ty)<m){ Ac[i*4+ty+TILE_WIDTH*tx]=A[a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a]; //printf("Ac index2: %d, index: %d and \n",i*4+ty+TILE_WIDTH*tx,a_begin+stride_col_a*(i*4+ty)+tx*stride_row_a); } else{ Ac[i*4+ty+TILE_WIDTH*tx]=0.0; } } } else{ for (int i=0; i< 4;i++){ Ac[i*4+ty+TILE_WIDTH*tx]=0.0; } } __syncthreads(); //return; if (colB<n){ // printf("Id: %d,%d,%d,%d\n",by,ty,bx,tx); const float* ptrB=&B[b_begin+(TILE_WIDTH*ty+tx)*stride_row_b]; float* ptrA=Ac; does_compute=true; for (int i=0;i<qk;i++){ float bv=alpha*ptrB[0]; //this loop could be unrolled for (int j=0;j<TILE_WIDTH;j++){ Cc[j]+=ptrA[j]*bv; /*if (ptrA[j]!=0){ printf("%f vs2. %f\n",ptrA[j],bv); } */ } ptrA+=TILE_WIDTH; //next column of A (it is the next column because Ac is a transposed block of A) ptrB+=stride_col_b; } } } __syncthreads(); //maybe redundant if (does_compute){ int c=stride_col_c*TILE_WIDTH*by+(TILE_WIDTH*VEC_SIZE*bx+tx+TILE_WIDTH*ty)*stride_row_c; int c_length=((rowA+TILE_WIDTH)<=m)?TILE_WIDTH:qm; for (int i=0;i<c_length;i++){ C[c]+=Cc[i]; c+=stride_col_c; } } } __global__ void k_scal_f32(int m, int n, float beta, float* C, int stride_row_c, int stride_col_c){ const int BLOCK_WIDTH=256; //size of a block const int TILE_WIDTH=64; //size of block per single thread int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; //printf("Bin drin mit : bx %d, tx %d, by %d, ty %d \n",bx,tx,by,ty); float* c_begin=&C[(by*BLOCK_WIDTH+ty*TILE_WIDTH)*stride_col_c+(bx*BLOCK_WIDTH+tx*TILE_WIDTH)*stride_row_c]; if ((((by+1)*BLOCK_WIDTH)<=m) && (((bx+1)*BLOCK_WIDTH)<=n)){ for (int i=0;i<TILE_WIDTH;i++){ for (int j=0;j<TILE_WIDTH;j++){ c_begin[i*stride_col_c+j*stride_row_c]*=beta; } } } else{ int column=by*BLOCK_WIDTH+ty*TILE_WIDTH; for (int i=0;i<TILE_WIDTH;i++){ if (column<m){ int row=bx*BLOCK_WIDTH+tx*TILE_WIDTH; for (int j=0;j<TILE_WIDTH;j++){ if (row<n){ c_begin[i*stride_col_c+j*stride_row_c]*=beta; //printf("Bin hier drin mit %d und %d mit by %d ty %d bx %d tx %d\n",i,j,by,ty,bx,tx); } row=row+1; } } column=column+1; } } } //matrix matrix multiplication __host__ void gemm_f32_blockmultiple(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){ float* A_d; float* B_d; float* C_d; int sizeA=sizeof(float)*m*k; int sizeB=sizeof(float)*n*k; int sizeC=sizeof(float)*m*n; float bsmx=16; float bsmy=4; dim3 threadLayout=dim3(bsmx,bsmy,1); dim3 grid=dim3(ceil(n/(4.0*bsmx)),ceil(m/bsmx),1); cudaMalloc((void**) &C_d,sizeC); if (beta==0){ cudaMemset(C_d, 0, sizeC); } else{ cudaMemcpy((void*) C_d, (void*) C_h, sizeC,cudaMemcpyHostToDevice); k_scal_f32<<<grid,threadLayout>>>(m,n,beta,C_d,1,n); } if (alpha!=0.0){ cudaMalloc((void**) &A_d,sizeA); cudaMalloc((void**) &B_d,sizeB); cudaError_t copy1=cudaMemcpy((void*) A_d, (void*) A_h, sizeA, cudaMemcpyHostToDevice); cudaError_t copy2=cudaMemcpy((void*) B_d, (void*) B_h, sizeB, cudaMemcpyHostToDevice); if ((copy1==cudaSuccess)&& (copy2==cudaSuccess)){ k_gemm_f32<<<grid,threadLayout>>> (alpha, A_d, 1, k,B_d,1,n,C_d,1,n); cudaMemcpy((void*) C_h, (void*) C_d, sizeC, cudaMemcpyDeviceToHost); cudaFree(A_d); cudaFree(B_d); } } cudaFree(C_d); } //General matrix-to-matrix multiplication for 32 bit floats. Input matrices are padded if they are not a multiple of block size bsmx and bsmy __host__ void gemm_f32_nonblockmultiple(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){ float* A_d; float* B_d; float* C_d; float bsmx=16; //blocksize x float bsmy=4; //blocksize y int mB=ceil(m/bsmx)*bsmx; int nB=ceil(n/(4.0*bsmx))*(4.0*bsmx); int kB=ceil(k/bsmx)*bsmx; int sizeCb=sizeof(float)*mB*nB; cudaMalloc((void**) &C_d, sizeCb); dim3 threadLayout=dim3(bsmx,bsmy,1); dim3 grid=dim3(ceil(nB/(4.0*bsmx)),ceil(mB/bsmx),1); if (beta==0){ cudaMemset(C_d, 0, sizeCb); } else{ cudaError_t copy; for (int i=0;i<m;i++){ copy=cudaMemcpy((void*) (C_d+i*nB), (void*) (C_h+i*n), sizeof(float)*n,cudaMemcpyHostToDevice); } if (copy!=cudaSuccess){ printf("Copy fehlgeschlagen\n"); } // printf("Starte nun den Kernel\n"); dim3 threadsize=dim3(4,4,1); dim3 blocksize=dim3(ceil(n/256.0),ceil(m/256.0),1); k_scal_f32<<<blocksize,threadsize>>>(m,n,beta,C_d,1,nB); //cudaDeviceSynchronize(); } if (alpha!=0.0){ int sizeAb=sizeof(float)*mB*kB; int sizeBb=sizeof(float)*kB*nB; cudaMalloc((void**) &A_d,sizeAb); cudaMalloc((void**) &B_d,sizeBb); cudaMemset(A_d,0.0,sizeAb); cudaMemset(B_d,0.0,sizeBb); cudaError_t copy1; cudaError_t copy2; for (int i=0;i<m;i++){ copy1=cudaMemcpy((void*) (A_d+i*kB), (void*) (A_h+i*k), sizeof(float)*k,cudaMemcpyHostToDevice); } for (int i=0;i<k;i++){ copy2=cudaMemcpy((void*) (B_d+i*nB), (void*) (B_h+i*n), sizeof(float)*n, cudaMemcpyHostToDevice); } if ((copy1==cudaSuccess)&& (copy2==cudaSuccess)){ k_gemm_f32<<<grid,threadLayout>>> (alpha, A_d, 1, kB,B_d,1,nB,C_d,1,nB); cudaFree(A_d); cudaFree(B_d); } } for (int i=0;i<m;i++){ cudaError_t copy=cudaMemcpy((void*) (C_h+i*n), (void*) (C_d+i*nB),sizeof(float)*n,cudaMemcpyDeviceToHost); if (copy!=cudaSuccess){ printf("Copy fehlgeschlagen\n"); } } cudaFree(C_d); } //General matrix-to-matrix multiplication for 32 bit floats. Input matrices are padded if they are not a multiple of block size bsmx and bsmy __host__ void gemm_f32(int m, int n, int k, float alpha, const float* A_h, const float* B_h, float beta, float* C_h){ if ((alpha==0.0) && (beta==1.0)){ return; } int res1=m%16; int res2=n/(4*4); if ((res1==0)&&(res2==0)){ gemm_f32_blockmultiple(m,n,k,alpha,A_h,B_h,beta,C_h); } else{ // printf("nonblockmultiple\n"); gemm_f32_nonblockmultiple(m,n,k,alpha,A_h,B_h,beta,C_h); } } //General matrix-to-matrix multiplication for 32 bit floats. This assumes that the input parameters are already allocated in device memory __host__ void gemm_f32_device(int m, int n, int k, float alpha, const float* A_d, int stride_row_a, int stride_col_a, const float* B_d, int stride_row_b, int stride_col_b, float beta, float* C_d,int stride_row_c, int stride_col_c){ if ((alpha==0.0) && (beta==1.0)){ return; } float bsmx=16; float bsmy=4; dim3 threadLayout=dim3(bsmx,bsmy,1); dim3 grid=dim3(ceil(n/(4.0*bsmx)),ceil(m/bsmx),1); k_scal_f32<<<grid,threadLayout>>>(m,n,beta,C_d,stride_row_c,stride_col_c); if (alpha!=0){ int res1=m%(int)bsmx; int res2=n%(int)bsmx; if ((res1==0)&&(res2==0)){ // printf("gemm blockmultiple\n"); k_gemm_f32<<<grid,threadLayout>>>(alpha, A_d, stride_row_a, stride_col_a,B_d,stride_row_b,stride_col_b,C_d,stride_row_c,stride_col_c); } else{ //printf("gemm nonblockmultiple\n"); k_gemm_f32_nonblockmultiple<<<grid,threadLayout>>>(m,n,k,alpha, A_d, stride_row_a, stride_col_a,B_d,stride_row_b,stride_col_b,C_d,stride_row_c,stride_col_c); } } }
ddf9abea86b1846fe1cf3a95c47807f79798ba53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Msnhnet/layers/cuda/MsnhMaxPoolLayerGPU.h" namespace Msnhnet { __global__ void maxpoolDepthKernel(const int n, const int width, const int height, const int channel, const int outChannel, const int batch, float *const input, float *const output) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(index < n) { int j = index % width; index = index / width; int i = index % height; index = index / height; int b = index % batch; for (int g = 0; g < outChannel; ++g) { int outIndex = j + width*(i + height*(g + outChannel*b)); float max = -FLT_MAX; for (int k = g; k < channel; k+=outChannel) { int inIndex = j + width*(i + height*(k + channel*b)); float val = input[inIndex]; max = (val > max)?val:max; } output[outIndex] = max; } } } __global__ void maxpoolNormalKernel(const int n, const int width, const int height, const int channel, const int outWidth, const int outHeight, const int strideX, const int strideY, const int kSizeX, const int kSizeY, const int paddingX, const int paddingY, float *const input, float *const output) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(index < n) { int j = index % outWidth; index = index / outWidth; int i = index % outHeight; index = index / outHeight; int k = index % channel; index = index / channel; int b = index; int widthOffset = -paddingX; int heightOffset = -paddingY; int outIndex = j + outWidth*(i + outHeight*(k + channel*b)); float max = -INFINITY; for (int l = 0; l < kSizeY; ++l) { for (int m = 0; m < kSizeX; ++m) { int curHeight = heightOffset + i*strideY + l; int curWidth = widthOffset + j*strideX + m; int idx = curWidth + width*(curHeight + height*(k + b*channel)); bool valid = (curHeight >=0 && curHeight < height && curWidth >=0 && curWidth < width); float value = (valid != 0)? input[idx] : -INFINITY; max = (value > max) ? value : max; } } output[outIndex] = max; } } void MaxPoolLayerGPU::forwardDepthGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight, const int &outChannel, const int &batch, float *const &input, float *const &output) { size_t n = outHeight * outWidth * 1 * batch; hipLaunchKernelGGL(( maxpoolDepthKernel), dim3(Cuda::getGrid(n)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), n, width, height, channel, outChannel, batch, input, output); CUDA_CHECK(hipPeekAtLastError()); } void MaxPoolLayerGPU::forwardNormalGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight, const int &outChannel, const int &strideX, const int &strideY, const int &kSizeX, const int kSizeY, const int &paddingX, const int &paddingY, const int &batch, float *const &input, float *const &output) { size_t n = outHeight * outWidth * outChannel * batch; hipLaunchKernelGGL(( maxpoolNormalKernel), dim3(Cuda::getGrid(n)), dim3(Cuda::blockThread), 0, Cuda::getCudaStream(), n,width,height, channel, outHeight,outWidth, strideX,strideY, kSizeX,kSizeY, paddingX,paddingY, input,output); CUDA_CHECK(hipPeekAtLastError()); } }
ddf9abea86b1846fe1cf3a95c47807f79798ba53.cu
#include "Msnhnet/layers/cuda/MsnhMaxPoolLayerGPU.h" namespace Msnhnet { __global__ void maxpoolDepthKernel(const int n, const int width, const int height, const int channel, const int outChannel, const int batch, float *const input, float *const output) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(index < n) { int j = index % width; index = index / width; int i = index % height; index = index / height; int b = index % batch; for (int g = 0; g < outChannel; ++g) { int outIndex = j + width*(i + height*(g + outChannel*b)); float max = -FLT_MAX; for (int k = g; k < channel; k+=outChannel) { int inIndex = j + width*(i + height*(k + channel*b)); float val = input[inIndex]; max = (val > max)?val:max; } output[outIndex] = max; } } } __global__ void maxpoolNormalKernel(const int n, const int width, const int height, const int channel, const int outWidth, const int outHeight, const int strideX, const int strideY, const int kSizeX, const int kSizeY, const int paddingX, const int paddingY, float *const input, float *const output) { int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(index < n) { int j = index % outWidth; index = index / outWidth; int i = index % outHeight; index = index / outHeight; int k = index % channel; index = index / channel; int b = index; int widthOffset = -paddingX; int heightOffset = -paddingY; int outIndex = j + outWidth*(i + outHeight*(k + channel*b)); float max = -INFINITY; for (int l = 0; l < kSizeY; ++l) { for (int m = 0; m < kSizeX; ++m) { int curHeight = heightOffset + i*strideY + l; int curWidth = widthOffset + j*strideX + m; int idx = curWidth + width*(curHeight + height*(k + b*channel)); bool valid = (curHeight >=0 && curHeight < height && curWidth >=0 && curWidth < width); float value = (valid != 0)? input[idx] : -INFINITY; max = (value > max) ? value : max; } } output[outIndex] = max; } } void MaxPoolLayerGPU::forwardDepthGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight, const int &outChannel, const int &batch, float *const &input, float *const &output) { size_t n = outHeight * outWidth * 1 * batch; maxpoolDepthKernel<<<Cuda::getGrid(n), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(n, width, height, channel, outChannel, batch, input, output); CUDA_CHECK(cudaPeekAtLastError()); } void MaxPoolLayerGPU::forwardNormalGPU(const int &width, const int &height, const int &channel, const int &outWidth, const int &outHeight, const int &outChannel, const int &strideX, const int &strideY, const int &kSizeX, const int kSizeY, const int &paddingX, const int &paddingY, const int &batch, float *const &input, float *const &output) { size_t n = outHeight * outWidth * outChannel * batch; maxpoolNormalKernel<<<Cuda::getGrid(n), Cuda::blockThread, 0, Cuda::getCudaStream()>>>(n,width,height, channel, outHeight,outWidth, strideX,strideY, kSizeX,kSizeY, paddingX,paddingY, input,output); CUDA_CHECK(cudaPeekAtLastError()); } }
0c74233d47cc845017d3ef0e2d7998118c056782.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions mixed zc -> ds */ #include "common_magma.h" #define NB 64 // TODO check precision, as in zlag2c? __global__ void zclaswp_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaFloatComplex res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_C_MAKE( (float)cuCreal(A[newind+i*lda]), (float)cuCimag(A[newind+i*lda]) ); SA[i*lda] = res; } } } __global__ void zclaswp_inv_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaDoubleComplex res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_Z_MAKE( (double)cuCrealf(SA[newind+i*lda]), (double)cuCimagf(SA[newind+i*lda]) ); A[i*lda] = res; } } } /** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zclaswp_q( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = (m - 1)/NB + 1; dim3 grid(blocks, 1, 1); dim3 threads(NB, 1, 1); if (incx >= 0) hipLaunchKernelGGL(( zclaswp_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv); else hipLaunchKernelGGL(( zclaswp_inv_kernel), dim3(grid), dim3(threads), 0, queue , n, A, lda, SA, m, ipiv); } /** @see magmablas_zclaswp_q @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zclaswp( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { magmablas_zclaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream ); }
0c74233d47cc845017d3ef0e2d7998118c056782.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions mixed zc -> ds */ #include "common_magma.h" #define NB 64 // TODO check precision, as in zlag2c? __global__ void zclaswp_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaFloatComplex res; if (ind < m) { SA += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_C_MAKE( (float)cuCreal(A[newind+i*lda]), (float)cuCimag(A[newind+i*lda]) ); SA[i*lda] = res; } } } __global__ void zclaswp_inv_kernel(int n, magmaDoubleComplex *A, int lda, magmaFloatComplex *SA, int m, const magma_int_t *ipiv) { int ind = blockIdx.x*NB + threadIdx.x; int newind; magmaDoubleComplex res; if (ind < m) { A += ind; ipiv += ind; newind = ipiv[0]; for(int i=0; i < n; i++) { res = MAGMA_Z_MAKE( (double)cuCrealf(SA[newind+i*lda]), (double)cuCimagf(SA[newind+i*lda]) ); A[i*lda] = res; } } } /** Purpose ------- Row i of A is cast to single precision in row ipiv[i] of SA (incx > 0), or row i of SA is cast to double precision in row ipiv[i] of A (incx < 0), for 0 <= i < M. @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix A. @param[in,out] A DOUBLE PRECISION array on the GPU, dimension (LDA,N) On entry, the M-by-N matrix to which the row interchanges will be applied. TODO update docs @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in,out] SA REAL array on the GPU, dimension (LDA,N) On exit, the single precision, permuted matrix. TODO update docs @param[in] m The number of rows to be interchanged. @param[in] ipiv INTEGER array on the GPU, dimension (M) The vector of pivot indices. Row i of A is cast to single precision in row ipiv[i] of SA, for 0 <= i < m. @param[in] incx INTEGER If INCX is negative, the pivots are applied in reverse order, otherwise in straight-forward order. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zclaswp_q( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx, magma_queue_t queue ) { int blocks = (m - 1)/NB + 1; dim3 grid(blocks, 1, 1); dim3 threads(NB, 1, 1); if (incx >= 0) zclaswp_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv); else zclaswp_inv_kernel<<< grid, threads, 0, queue >>>(n, A, lda, SA, m, ipiv); } /** @see magmablas_zclaswp_q @ingroup magma_zaux2 ********************************************************************/ extern "C" void magmablas_zclaswp( magma_int_t n, magmaDoubleComplex_ptr A, magma_int_t lda, magmaFloatComplex_ptr SA, magma_int_t m, const magma_int_t *ipiv, magma_int_t incx ) { magmablas_zclaswp_q( n, A, lda, SA, m, ipiv, incx, magma_stream ); }
c9a2caa0036e917e4b8a551f6e01c8621b702be7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <cassert> #include <iostream> #include<cuda.h> #include<cuda_runtime.h> //#include<device_launch_parameter.h> using namespace std; using std::cout; __global__ void vectorAdd(int* a, int* b, int* c, int N) { // Calculate global thread thread ID int tid = (blockDim.x * blockIdx.x) + threadIdx.x; //int tid = 10; // Boundary check if (tid < N) { c[tid] = a[tid] + b[tid]; } } int main() { // Array size of 2^16 (65536 elements) const int N = 1 << 16; size_t bytes = N * sizeof(int); // Declare unified memory pointers int* a, * b, * c; // Allocation memory for these pointers hipMallocManaged(&a, bytes); hipMallocManaged(&b, bytes); hipMallocManaged(&c, bytes); // Initialize vectors for (int i = 0; i < N; i++) { a[i] = rand() % 100; b[i] = rand() % 100; } // Threads per CTA (1024 threads per CTA) int BLOCK_SIZE = 1 << 10; // CTAs per Grid int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; // Call CUDA kernel vectorAdd << < GRID_SIZE, BLOCK_SIZE >> > (a, b, c, N); // Wait for all previous operations before using values // We need this because we don't get the implicit synchronization of // hipMemcpy like in the original example hipDeviceSynchronize(); // Verify the result on the CPU for (int i = 0; i < N; i++) { assert(c[i] == a[i] + b[i]); } // Free unified memory (same as memory allocated with hipMalloc) hipFree(a); hipFree(b); hipFree(c); std::cout << "COMPLETED SUCCESSFULLY!\n"; return 0; }
c9a2caa0036e917e4b8a551f6e01c8621b702be7.cu
#include <stdio.h> #include <cassert> #include <iostream> #include<cuda.h> #include<cuda_runtime.h> //#include<device_launch_parameter.h> using namespace std; using std::cout; __global__ void vectorAdd(int* a, int* b, int* c, int N) { // Calculate global thread thread ID int tid = (blockDim.x * blockIdx.x) + threadIdx.x; //int tid = 10; // Boundary check if (tid < N) { c[tid] = a[tid] + b[tid]; } } int main() { // Array size of 2^16 (65536 elements) const int N = 1 << 16; size_t bytes = N * sizeof(int); // Declare unified memory pointers int* a, * b, * c; // Allocation memory for these pointers cudaMallocManaged(&a, bytes); cudaMallocManaged(&b, bytes); cudaMallocManaged(&c, bytes); // Initialize vectors for (int i = 0; i < N; i++) { a[i] = rand() % 100; b[i] = rand() % 100; } // Threads per CTA (1024 threads per CTA) int BLOCK_SIZE = 1 << 10; // CTAs per Grid int GRID_SIZE = (N + BLOCK_SIZE - 1) / BLOCK_SIZE; // Call CUDA kernel vectorAdd << < GRID_SIZE, BLOCK_SIZE >> > (a, b, c, N); // Wait for all previous operations before using values // We need this because we don't get the implicit synchronization of // cudaMemcpy like in the original example cudaDeviceSynchronize(); // Verify the result on the CPU for (int i = 0; i < N; i++) { assert(c[i] == a[i] + b[i]); } // Free unified memory (same as memory allocated with cudaMalloc) cudaFree(a); cudaFree(b); cudaFree(c); std::cout << "COMPLETED SUCCESSFULLY!\n"; return 0; }
73078901a39bacc83de40c98242b04a45edab709.hip
// !!! This is a file automatically generated by hipify!!! #include "../THCTensorMathPointwise.cuh" #include "THHTensor.hpp" #include "THHStream.h" #include "../generic/THCTensorMathPointwise.cu" #include <THH/THHGenerateFloatType.h>
73078901a39bacc83de40c98242b04a45edab709.cu
#include "../THCTensorMathPointwise.cuh" #include "THCTensor.hpp" #include "THCStream.h" #include "../generic/THCTensorMathPointwise.cu" #include <THC/THCGenerateFloatType.h>
8fd107207bba03ccb8970aa1aebb9128a2d89b65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #ifndef LBM_KERNEL_CU #define LBM_KERNEL_CU #include "lbm.h" /******************************************************************************/ __global__ void performStreamCollide_kernel( float* srcGrid, float* dstGrid ) { //Using some predefined macros here. Consider this the declaration // and initialization of the variables SWEEP_X, SWEEP_Y and SWEEP_Z SWEEP_VAR SWEEP_X = threadIdx.x; SWEEP_Y = blockIdx.x; SWEEP_Z = blockIdx.y; float temp_swp, tempC, tempN, tempS, tempE, tempW, tempT, tempB; float tempNE, tempNW, tempSE, tempSW, tempNT, tempNB, tempST ; float tempSB, tempET, tempEB, tempWT, tempWB ; //Load all of the input fields //This is a gather operation of the SCATTER preprocessor variable // is undefined in layout_config.h, or a "local" read otherwise tempC = SRC_C(srcGrid); tempN = SRC_N(srcGrid); tempS = SRC_S(srcGrid); tempE = SRC_E(srcGrid); tempW = SRC_W(srcGrid); tempT = SRC_T(srcGrid); tempB = SRC_B(srcGrid); tempNE= SRC_NE(srcGrid); tempNW= SRC_NW(srcGrid); tempSE = SRC_SE(srcGrid); tempSW = SRC_SW(srcGrid); tempNT = SRC_NT(srcGrid); tempNB = SRC_NB(srcGrid); tempST = SRC_ST(srcGrid); tempSB = SRC_SB(srcGrid); tempET = SRC_ET(srcGrid); tempEB = SRC_EB(srcGrid); tempWT = SRC_WT(srcGrid); tempWB = SRC_WB(srcGrid); //Test whether the cell is fluid or obstacle if( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) { //Swizzle the inputs: reflect any fluid coming into this cell // back to where it came from temp_swp = tempN ; tempN = tempS ; tempS = temp_swp ; temp_swp = tempE ; tempE = tempW ; tempW = temp_swp; temp_swp = tempT ; tempT = tempB ; tempB = temp_swp; temp_swp = tempNE; tempNE = tempSW ; tempSW = temp_swp; temp_swp = tempNW; tempNW = tempSE ; tempSE = temp_swp; temp_swp = tempNT ; tempNT = tempSB ; tempSB = temp_swp; temp_swp = tempNB ; tempNB = tempST ; tempST = temp_swp; temp_swp = tempET ; tempET= tempWB ; tempWB = temp_swp; temp_swp = tempEB ; tempEB = tempWT ; tempWT = temp_swp; } else { //The math meat of LBM: ignore for optimization float ux, uy, uz, rho, u2; float temp1, temp2, temp_base; rho = tempC + tempN + tempS + tempE + tempW + tempT + tempB + tempNE + tempNW + tempSE + tempSW + tempNT + tempNB + tempST + tempSB + tempET + tempEB + tempWT + tempWB; ux = + tempE - tempW + tempNE - tempNW + tempSE - tempSW + tempET + tempEB - tempWT - tempWB; uy = + tempN - tempS + tempNE + tempNW - tempSE - tempSW + tempNT + tempNB - tempST - tempSB; uz = + tempT - tempB + tempNT - tempNB + tempST - tempSB + tempET - tempEB + tempWT - tempWB; ux /= rho; uy /= rho; uz /= rho; if( TEST_FLAG_SWEEP( srcGrid, ACCEL )) { ux = 0.005f; uy = 0.002f; uz = 0.000f; } u2 = 1.5f * (ux*ux + uy*uy + uz*uz) - 1.0f; temp_base = OMEGA*rho; temp1 = DFL1*temp_base; //Put the output values for this cell in the shared memory temp_base = OMEGA*rho; temp1 = DFL1*temp_base; temp2 = 1.0f-OMEGA; tempC = temp2*tempC + temp1*( - u2); temp1 = DFL2*temp_base; tempN = temp2*tempN + temp1*( uy*(4.5f*uy + 3.0f) - u2); tempS = temp2*tempS + temp1*( uy*(4.5f*uy - 3.0f) - u2); tempT = temp2*tempT + temp1*( uz*(4.5f*uz + 3.0f) - u2); tempB = temp2*tempB + temp1*( uz*(4.5f*uz - 3.0f) - u2); tempE = temp2*tempE + temp1*( ux*(4.5f*ux + 3.0f) - u2); tempW = temp2*tempW + temp1*( ux*(4.5f*ux - 3.0f) - u2); temp1 = DFL3*temp_base; tempNT= temp2*tempNT + temp1 *( (+uy+uz)*(4.5f*(+uy+uz) + 3.0f) - u2); tempNB= temp2*tempNB + temp1 *( (+uy-uz)*(4.5f*(+uy-uz) + 3.0f) - u2); tempST= temp2*tempST + temp1 *( (-uy+uz)*(4.5f*(-uy+uz) + 3.0f) - u2); tempSB= temp2*tempSB + temp1 *( (-uy-uz)*(4.5f*(-uy-uz) + 3.0f) - u2); tempNE = temp2*tempNE + temp1 *( (+ux+uy)*(4.5f*(+ux+uy) + 3.0f) - u2); tempSE = temp2*tempSE + temp1 *((+ux-uy)*(4.5f*(+ux-uy) + 3.0f) - u2); tempET = temp2*tempET + temp1 *( (+ux+uz)*(4.5f*(+ux+uz) + 3.0f) - u2); tempEB = temp2*tempEB + temp1 *( (+ux-uz)*(4.5f*(+ux-uz) + 3.0f) - u2); tempNW = temp2*tempNW + temp1 *( (-ux+uy)*(4.5f*(-ux+uy) + 3.0f) - u2); tempSW = temp2*tempSW + temp1 *( (-ux-uy)*(4.5f*(-ux-uy) + 3.0f) - u2); tempWT = temp2*tempWT + temp1 *( (-ux+uz)*(4.5f*(-ux+uz) + 3.0f) - u2); tempWB = temp2*tempWB + temp1 *( (-ux-uz)*(4.5f*(-ux-uz) + 3.0f) - u2); } //Write the results computed above //This is a scatter operation of the SCATTER preprocessor variable // is defined in layout_config.h, or a "local" write otherwise DST_C ( dstGrid ) = tempC; DST_N ( dstGrid ) = tempN; DST_S ( dstGrid ) = tempS; DST_E ( dstGrid ) = tempE; DST_W ( dstGrid ) = tempW; DST_T ( dstGrid ) = tempT; DST_B ( dstGrid ) = tempB; DST_NE( dstGrid ) = tempNE; DST_NW( dstGrid ) = tempNW; DST_SE( dstGrid ) = tempSE; DST_SW( dstGrid ) = tempSW; DST_NT( dstGrid ) = tempNT; DST_NB( dstGrid ) = tempNB; DST_ST( dstGrid ) = tempST; DST_SB( dstGrid ) = tempSB; DST_ET( dstGrid ) = tempET; DST_EB( dstGrid ) = tempEB; DST_WT( dstGrid ) = tempWT; DST_WB( dstGrid ) = tempWB; } #endif // LBM_KERNEL_CU
8fd107207bba03ccb8970aa1aebb9128a2d89b65.cu
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #ifndef LBM_KERNEL_CU #define LBM_KERNEL_CU #include "lbm.h" /******************************************************************************/ __global__ void performStreamCollide_kernel( float* srcGrid, float* dstGrid ) { //Using some predefined macros here. Consider this the declaration // and initialization of the variables SWEEP_X, SWEEP_Y and SWEEP_Z SWEEP_VAR SWEEP_X = threadIdx.x; SWEEP_Y = blockIdx.x; SWEEP_Z = blockIdx.y; float temp_swp, tempC, tempN, tempS, tempE, tempW, tempT, tempB; float tempNE, tempNW, tempSE, tempSW, tempNT, tempNB, tempST ; float tempSB, tempET, tempEB, tempWT, tempWB ; //Load all of the input fields //This is a gather operation of the SCATTER preprocessor variable // is undefined in layout_config.h, or a "local" read otherwise tempC = SRC_C(srcGrid); tempN = SRC_N(srcGrid); tempS = SRC_S(srcGrid); tempE = SRC_E(srcGrid); tempW = SRC_W(srcGrid); tempT = SRC_T(srcGrid); tempB = SRC_B(srcGrid); tempNE= SRC_NE(srcGrid); tempNW= SRC_NW(srcGrid); tempSE = SRC_SE(srcGrid); tempSW = SRC_SW(srcGrid); tempNT = SRC_NT(srcGrid); tempNB = SRC_NB(srcGrid); tempST = SRC_ST(srcGrid); tempSB = SRC_SB(srcGrid); tempET = SRC_ET(srcGrid); tempEB = SRC_EB(srcGrid); tempWT = SRC_WT(srcGrid); tempWB = SRC_WB(srcGrid); //Test whether the cell is fluid or obstacle if( TEST_FLAG_SWEEP( srcGrid, OBSTACLE )) { //Swizzle the inputs: reflect any fluid coming into this cell // back to where it came from temp_swp = tempN ; tempN = tempS ; tempS = temp_swp ; temp_swp = tempE ; tempE = tempW ; tempW = temp_swp; temp_swp = tempT ; tempT = tempB ; tempB = temp_swp; temp_swp = tempNE; tempNE = tempSW ; tempSW = temp_swp; temp_swp = tempNW; tempNW = tempSE ; tempSE = temp_swp; temp_swp = tempNT ; tempNT = tempSB ; tempSB = temp_swp; temp_swp = tempNB ; tempNB = tempST ; tempST = temp_swp; temp_swp = tempET ; tempET= tempWB ; tempWB = temp_swp; temp_swp = tempEB ; tempEB = tempWT ; tempWT = temp_swp; } else { //The math meat of LBM: ignore for optimization float ux, uy, uz, rho, u2; float temp1, temp2, temp_base; rho = tempC + tempN + tempS + tempE + tempW + tempT + tempB + tempNE + tempNW + tempSE + tempSW + tempNT + tempNB + tempST + tempSB + tempET + tempEB + tempWT + tempWB; ux = + tempE - tempW + tempNE - tempNW + tempSE - tempSW + tempET + tempEB - tempWT - tempWB; uy = + tempN - tempS + tempNE + tempNW - tempSE - tempSW + tempNT + tempNB - tempST - tempSB; uz = + tempT - tempB + tempNT - tempNB + tempST - tempSB + tempET - tempEB + tempWT - tempWB; ux /= rho; uy /= rho; uz /= rho; if( TEST_FLAG_SWEEP( srcGrid, ACCEL )) { ux = 0.005f; uy = 0.002f; uz = 0.000f; } u2 = 1.5f * (ux*ux + uy*uy + uz*uz) - 1.0f; temp_base = OMEGA*rho; temp1 = DFL1*temp_base; //Put the output values for this cell in the shared memory temp_base = OMEGA*rho; temp1 = DFL1*temp_base; temp2 = 1.0f-OMEGA; tempC = temp2*tempC + temp1*( - u2); temp1 = DFL2*temp_base; tempN = temp2*tempN + temp1*( uy*(4.5f*uy + 3.0f) - u2); tempS = temp2*tempS + temp1*( uy*(4.5f*uy - 3.0f) - u2); tempT = temp2*tempT + temp1*( uz*(4.5f*uz + 3.0f) - u2); tempB = temp2*tempB + temp1*( uz*(4.5f*uz - 3.0f) - u2); tempE = temp2*tempE + temp1*( ux*(4.5f*ux + 3.0f) - u2); tempW = temp2*tempW + temp1*( ux*(4.5f*ux - 3.0f) - u2); temp1 = DFL3*temp_base; tempNT= temp2*tempNT + temp1 *( (+uy+uz)*(4.5f*(+uy+uz) + 3.0f) - u2); tempNB= temp2*tempNB + temp1 *( (+uy-uz)*(4.5f*(+uy-uz) + 3.0f) - u2); tempST= temp2*tempST + temp1 *( (-uy+uz)*(4.5f*(-uy+uz) + 3.0f) - u2); tempSB= temp2*tempSB + temp1 *( (-uy-uz)*(4.5f*(-uy-uz) + 3.0f) - u2); tempNE = temp2*tempNE + temp1 *( (+ux+uy)*(4.5f*(+ux+uy) + 3.0f) - u2); tempSE = temp2*tempSE + temp1 *((+ux-uy)*(4.5f*(+ux-uy) + 3.0f) - u2); tempET = temp2*tempET + temp1 *( (+ux+uz)*(4.5f*(+ux+uz) + 3.0f) - u2); tempEB = temp2*tempEB + temp1 *( (+ux-uz)*(4.5f*(+ux-uz) + 3.0f) - u2); tempNW = temp2*tempNW + temp1 *( (-ux+uy)*(4.5f*(-ux+uy) + 3.0f) - u2); tempSW = temp2*tempSW + temp1 *( (-ux-uy)*(4.5f*(-ux-uy) + 3.0f) - u2); tempWT = temp2*tempWT + temp1 *( (-ux+uz)*(4.5f*(-ux+uz) + 3.0f) - u2); tempWB = temp2*tempWB + temp1 *( (-ux-uz)*(4.5f*(-ux-uz) + 3.0f) - u2); } //Write the results computed above //This is a scatter operation of the SCATTER preprocessor variable // is defined in layout_config.h, or a "local" write otherwise DST_C ( dstGrid ) = tempC; DST_N ( dstGrid ) = tempN; DST_S ( dstGrid ) = tempS; DST_E ( dstGrid ) = tempE; DST_W ( dstGrid ) = tempW; DST_T ( dstGrid ) = tempT; DST_B ( dstGrid ) = tempB; DST_NE( dstGrid ) = tempNE; DST_NW( dstGrid ) = tempNW; DST_SE( dstGrid ) = tempSE; DST_SW( dstGrid ) = tempSW; DST_NT( dstGrid ) = tempNT; DST_NB( dstGrid ) = tempNB; DST_ST( dstGrid ) = tempST; DST_SB( dstGrid ) = tempSB; DST_ET( dstGrid ) = tempET; DST_EB( dstGrid ) = tempEB; DST_WT( dstGrid ) = tempWT; DST_WB( dstGrid ) = tempWB; } #endif // LBM_KERNEL_CU
d6604cd88423a9cd6b105ef59971b52701850bbf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <float.h> #include <math.h> #include <time.h> #include <string.h> #include <hip/hip_runtime.h> #include "constants.h" #define N_THREADS_PER_BLOCK 256 __global__ void find_min_max_u_kernel( const float *__restrict__ g_u, float *__restrict__ g_max, float *__restrict__ g_min ) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int tidFromBack = blockDim.x - 1 - tid; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_u[i]; __syncthreads(); for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; } } if (tidFromBack < s) { if (sdata[tid - s] < sdata[tid]) { sdata[tid] = sdata[tid - s]; } } __syncthreads(); } if (tid == 0) { g_max[blockIdx.x] = sdata[0]; } if (tidFromBack == 0) { g_min[blockIdx.x] = sdata[tid]; } } void find_min_max_u_cuda( const float *__restrict__ u, llint u_size, float *__restrict__ min_u, float *__restrict__ max_u ) { llint u_block = u_size / N_THREADS_PER_BLOCK; llint u_remainder = u_size % N_THREADS_PER_BLOCK; llint d_block = u_block; if (u_remainder != 0) { d_block += 1; } llint d_size = d_block * N_THREADS_PER_BLOCK; llint reminder_size = N_THREADS_PER_BLOCK - u_remainder; float *reminder = (float *)malloc(reminder_size * sizeof(float)); memcpy(reminder, u, reminder_size * sizeof(float)); float* max = (float*)malloc(d_block * sizeof(float)); float *min = (float*)malloc(d_block * sizeof(float)); float* d_u, * d_max, * d_min; hipMalloc(&d_u, d_size * sizeof(float)); hipMalloc(&d_max, d_block * sizeof(float)); hipMalloc(&d_min, d_block * sizeof(float)); hipMemcpy(d_u, u, u_size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_u+u_size, reminder, reminder_size * sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( find_min_max_u_kernel), dim3(d_block), dim3(N_THREADS_PER_BLOCK), sizeof(float) * N_THREADS_PER_BLOCK, 0, d_u, d_max, d_min); hipMemcpy(max, d_max, d_block * sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(min, d_min, d_block * sizeof(float), hipMemcpyDeviceToHost); *min_u = FLT_MAX, *max_u = FLT_MIN; for (size_t i = 0; i < d_block; i++) { *min_u = fminf(*min_u, min[i]); *max_u = fmaxf(*max_u, max[i]); } hipFree(d_max); hipFree(d_min); hipFree(d_u); free(reminder); free(max); free(min); }
d6604cd88423a9cd6b105ef59971b52701850bbf.cu
#include <stdio.h> #include <float.h> #include <math.h> #include <time.h> #include <string.h> #include <cuda.h> #include "constants.h" #define N_THREADS_PER_BLOCK 256 __global__ void find_min_max_u_kernel( const float *__restrict__ g_u, float *__restrict__ g_max, float *__restrict__ g_min ) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int tidFromBack = blockDim.x - 1 - tid; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; sdata[tid] = g_u[i]; __syncthreads(); for (unsigned int s = blockDim.x/2; s > 0; s >>= 1) { if (tid < s) { if (sdata[tid + s] > sdata[tid]) { sdata[tid] = sdata[tid + s]; } } if (tidFromBack < s) { if (sdata[tid - s] < sdata[tid]) { sdata[tid] = sdata[tid - s]; } } __syncthreads(); } if (tid == 0) { g_max[blockIdx.x] = sdata[0]; } if (tidFromBack == 0) { g_min[blockIdx.x] = sdata[tid]; } } void find_min_max_u_cuda( const float *__restrict__ u, llint u_size, float *__restrict__ min_u, float *__restrict__ max_u ) { llint u_block = u_size / N_THREADS_PER_BLOCK; llint u_remainder = u_size % N_THREADS_PER_BLOCK; llint d_block = u_block; if (u_remainder != 0) { d_block += 1; } llint d_size = d_block * N_THREADS_PER_BLOCK; llint reminder_size = N_THREADS_PER_BLOCK - u_remainder; float *reminder = (float *)malloc(reminder_size * sizeof(float)); memcpy(reminder, u, reminder_size * sizeof(float)); float* max = (float*)malloc(d_block * sizeof(float)); float *min = (float*)malloc(d_block * sizeof(float)); float* d_u, * d_max, * d_min; cudaMalloc(&d_u, d_size * sizeof(float)); cudaMalloc(&d_max, d_block * sizeof(float)); cudaMalloc(&d_min, d_block * sizeof(float)); cudaMemcpy(d_u, u, u_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_u+u_size, reminder, reminder_size * sizeof(float), cudaMemcpyHostToDevice); find_min_max_u_kernel<<<d_block, N_THREADS_PER_BLOCK, sizeof(float) * N_THREADS_PER_BLOCK>>>(d_u, d_max, d_min); cudaMemcpy(max, d_max, d_block * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(min, d_min, d_block * sizeof(float), cudaMemcpyDeviceToHost); *min_u = FLT_MAX, *max_u = FLT_MIN; for (size_t i = 0; i < d_block; i++) { *min_u = fminf(*min_u, min[i]); *max_u = fmaxf(*max_u, max[i]); } cudaFree(d_max); cudaFree(d_min); cudaFree(d_u); free(reminder); free(max); free(min); }
ff0a51011787e60aa3378c5f705e94ad6c2d7632.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ctranslate2/ops/rms_norm.h" #include <hipcub/hipcub.hpp> #include "cuda/helpers.h" #include "cuda/utils.h" namespace ctranslate2 { namespace ops { constexpr dim_t num_threads = 512; template <typename T> __global__ void rms_norm_kernel(const T* input, const T* gamma, T* output, cuda::index_t depth, float epsilon) { typedef hipcub::BlockReduce<float, num_threads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ float s_inv_rms; input += blockIdx.x * depth; output += blockIdx.x * depth; float sum_squares = 0; for (cuda::index_t i = threadIdx.x; i < depth; i += blockDim.x) sum_squares += float(input[i]) * float(input[i]); sum_squares = BlockReduce(temp_storage).Sum(sum_squares); if (threadIdx.x == 0) s_inv_rms = rsqrtf(sum_squares / depth + epsilon); __syncthreads(); for (cuda::index_t i = threadIdx.x; i < depth; i += blockDim.x) output[i] = float(input[i]) * s_inv_rms * float(gamma[i]); } template <Device D, typename T> void RMSNorm::compute(const StorageView& gamma, const StorageView& input, StorageView& output) const { const dim_t depth = input.dim(-1); const dim_t batch_size = input.size() / depth; hipLaunchKernelGGL(( rms_norm_kernel), dim3(batch_size), dim3(num_threads), 0, cuda::get_cuda_stream(), cuda::device_cast(input.data<T>()), cuda::device_cast(gamma.data<T>()), cuda::device_cast(output.data<T>()), depth, _epsilon); } #define DECLARE_IMPL(T) \ template void RMSNorm::compute<Device::CUDA, T>(const StorageView&, \ const StorageView&, \ StorageView&) const; DECLARE_IMPL(float) DECLARE_IMPL(float16_t) } }
ff0a51011787e60aa3378c5f705e94ad6c2d7632.cu
#include "ctranslate2/ops/rms_norm.h" #include <cub/block/block_reduce.cuh> #include "cuda/helpers.h" #include "cuda/utils.h" namespace ctranslate2 { namespace ops { constexpr dim_t num_threads = 512; template <typename T> __global__ void rms_norm_kernel(const T* input, const T* gamma, T* output, cuda::index_t depth, float epsilon) { typedef cub::BlockReduce<float, num_threads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; __shared__ float s_inv_rms; input += blockIdx.x * depth; output += blockIdx.x * depth; float sum_squares = 0; for (cuda::index_t i = threadIdx.x; i < depth; i += blockDim.x) sum_squares += float(input[i]) * float(input[i]); sum_squares = BlockReduce(temp_storage).Sum(sum_squares); if (threadIdx.x == 0) s_inv_rms = rsqrtf(sum_squares / depth + epsilon); __syncthreads(); for (cuda::index_t i = threadIdx.x; i < depth; i += blockDim.x) output[i] = float(input[i]) * s_inv_rms * float(gamma[i]); } template <Device D, typename T> void RMSNorm::compute(const StorageView& gamma, const StorageView& input, StorageView& output) const { const dim_t depth = input.dim(-1); const dim_t batch_size = input.size() / depth; rms_norm_kernel<<<batch_size, num_threads, 0, cuda::get_cuda_stream()>>>( cuda::device_cast(input.data<T>()), cuda::device_cast(gamma.data<T>()), cuda::device_cast(output.data<T>()), depth, _epsilon); } #define DECLARE_IMPL(T) \ template void RMSNorm::compute<Device::CUDA, T>(const StorageView&, \ const StorageView&, \ StorageView&) const; DECLARE_IMPL(float) DECLARE_IMPL(float16_t) } }
3cb598caea8c25c83c7b22859374b7b19ba08560.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add_constant(int* arr, int k, int arr_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < arr_size) { arr[i] += k; } }
3cb598caea8c25c83c7b22859374b7b19ba08560.cu
#include "includes.h" __global__ void add_constant(int* arr, int k, int arr_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < arr_size) { arr[i] += k; } }
0f8592f49a0f768ca660d86df66707d8f94c5ae3.hip
// !!! This is a file automatically generated by hipify!!! void {{name}}_setup_constants( {% for c in consts -%} {{ c.type }} {{ c.emit_name(name_prefix='h_') }}{{ ',' if not loop.last }} {% endfor %} ) { {% for c in consts -%} hipMemcpyToSymbol("{{ c.device_name() }}", &{{ c.emit_name(name_prefix='h_') }}, {{ c.sizeof() }}, 0, hipMemcpyHostToDevice); {% endfor %} }
0f8592f49a0f768ca660d86df66707d8f94c5ae3.cu
void {{name}}_setup_constants( {% for c in consts -%} {{ c.type }} {{ c.emit_name(name_prefix='h_') }}{{ ',' if not loop.last }} {% endfor %} ) { {% for c in consts -%} cudaMemcpyToSymbol("{{ c.device_name() }}", &{{ c.emit_name(name_prefix='h_') }}, {{ c.sizeof() }}, 0, cudaMemcpyHostToDevice); {% endfor %} }
853dfe7500fc84367301f0f3c258af8f3031e1bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <Indice2D.h> #include <cudaTools.h> #include <ReductionMinMaxTools.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void kernelMinMax(uchar* tabGM, uchar* ptrDevResult, int w, int h); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void reductionIntraThread(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h); __device__ void reductionIntraThreadIf(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * SizeSM must be twice the size of the Shared Memory ! */ __global__ void kernelMinMax(uchar* tabGM, uchar* ptrDevResult, int w, int h) { extern __shared__ uchar tabSM[]; uchar* tabSMMin = tabSM; uchar* tabSMMax = tabSM + (Indice2D::nbThreadLocal() * sizeof(uchar)); // INTRA reductionIntraThread(tabGM, tabSMMin, tabSMMax, w, h); ptrDevResult[0] = 255; ptrDevResult[1] = 0; // SYNC __syncthreads(); // TOOLS ReductionMinMaxTools::reductionMinMax(ptrDevResult, tabSMMin, tabSMMax); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void reductionIntraThread(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h) { const int NB_THREADS = Indice2D::nbThreadLocal(); const int TID = Indice2D::tidLocal(); const int limit = w * h; int s = TID; tabSMMin[TID] = tabGM[s]; tabSMMax[TID] = tabGM[s]; s += NB_THREADS; while (s < limit) { bool smaller = tabGM[s] < tabSMMin[TID]; tabSMMin[TID] = tabGM[s]*(int)smaller + tabSMMin[TID]*((int)(!smaller)); bool greater = tabGM[s] > tabSMMax[TID]; tabSMMax[TID] = tabGM[s]*(int)greater + tabSMMax[TID]*((int)(!greater)); s += NB_THREADS; } } __device__ void reductionIntraThreadIf(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h) { const int NB_THREADS = Indice2D::nbThreadLocal(); const int TID = Indice2D::tidLocal(); const int limit = w * h; int s = TID; tabSMMin[TID] = tabGM[s]; tabSMMax[TID] = tabGM[s]; s += NB_THREADS; while (s < limit) { if(tabGM[s] < tabSMMin[TID]) { tabSMMin[TID] = tabGM[s]; } if(tabGM[s] > tabSMMax[TID]) { tabSMMax[TID] = tabGM[s]; } s += NB_THREADS; } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
853dfe7500fc84367301f0f3c258af8f3031e1bb.cu
#include <stdio.h> #include <Indice2D.h> #include <cudaTools.h> #include <ReductionMinMaxTools.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void kernelMinMax(uchar* tabGM, uchar* ptrDevResult, int w, int h); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void reductionIntraThread(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h); __device__ void reductionIntraThreadIf(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * SizeSM must be twice the size of the Shared Memory ! */ __global__ void kernelMinMax(uchar* tabGM, uchar* ptrDevResult, int w, int h) { extern __shared__ uchar tabSM[]; uchar* tabSMMin = tabSM; uchar* tabSMMax = tabSM + (Indice2D::nbThreadLocal() * sizeof(uchar)); // INTRA reductionIntraThread(tabGM, tabSMMin, tabSMMax, w, h); ptrDevResult[0] = 255; ptrDevResult[1] = 0; // SYNC __syncthreads(); // TOOLS ReductionMinMaxTools::reductionMinMax(ptrDevResult, tabSMMin, tabSMMax); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ void reductionIntraThread(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h) { const int NB_THREADS = Indice2D::nbThreadLocal(); const int TID = Indice2D::tidLocal(); const int limit = w * h; int s = TID; tabSMMin[TID] = tabGM[s]; tabSMMax[TID] = tabGM[s]; s += NB_THREADS; while (s < limit) { bool smaller = tabGM[s] < tabSMMin[TID]; tabSMMin[TID] = tabGM[s]*(int)smaller + tabSMMin[TID]*((int)(!smaller)); bool greater = tabGM[s] > tabSMMax[TID]; tabSMMax[TID] = tabGM[s]*(int)greater + tabSMMax[TID]*((int)(!greater)); s += NB_THREADS; } } __device__ void reductionIntraThreadIf(uchar* tabGM, uchar* tabSMMin, uchar* tabSMMax, int w, int h) { const int NB_THREADS = Indice2D::nbThreadLocal(); const int TID = Indice2D::tidLocal(); const int limit = w * h; int s = TID; tabSMMin[TID] = tabGM[s]; tabSMMax[TID] = tabGM[s]; s += NB_THREADS; while (s < limit) { if(tabGM[s] < tabSMMin[TID]) { tabSMMin[TID] = tabGM[s]; } if(tabGM[s] > tabSMMax[TID]) { tabSMMax[TID] = tabGM[s]; } s += NB_THREADS; } } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
5ec1c2b7c334fc045049efbd37fe1b731aa03fce.hip
// !!! This is a file automatically generated by hipify!!! /* * * Copyright (c) 2012, Neurasmus B.V., The Netherlands, * web: www.neurasmus.com email: [email protected] * * Any use or reproduction in whole or in parts is prohibited * without the written consent of the copyright owner. * * All Rights Reserved. * * * Author: Sebastian Isaza * Created: 19-01-2012 * Modified: 07-08-2012 * * Description: Top source file of the Inferior Olive model, originally written * in Matlab by Jornt De Gruijl. It contains the implementation of all functions. * The main function allocates the necessary memory, initializes the system * state and runs the model calculations. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> //#include <sys/time.h> #include <time.h> #include "infoli.h" #include <hip/hip_runtime.h> //#include "StdAfx.h" typedef unsigned long long timestamp_t; static timestamp_t get_timestamp () { struct timeval now; gettimeofday (&now, NULL); return now.tv_usec + (timestamp_t)now.tv_sec * 1000000; } __global__ void compute_cell_kernel(double *cellStatePtr, double *cellVdend) { int n = 0, p,j,k; k = blockIdx.x*blockDim.x + threadIdx.x; j = blockIdx.y*blockDim.y + threadIdx.y; for(p=j-1;p<=j+1;p++){ for(q=k-1;q<=k+1;q++){ if(((p!=j)||(q!=k)) && ((p>=0)&&(q>=0)) && ((p<IO_NETWORK_DIM1)&&(q<IO_NETWORK_DIM2))){ cellCompParamsPtr[j][k].neighVdend[n++] = cellVdend[i%2][p][q]; }else if(p==j && q==k){ /** <<<<<<< THIS EXCEPTION FIXES THE BUG */ ; // do nothing, this is the cell itself } else{ //store same V_dend so that Ic becomes zero by the subtraction cellCompParamsPtr[j][k].neighVdend[n++] = cellVdend[i%2][j][k]; } } } cellCompParamsPtr[j][k].iAppIn = iApp; cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[i%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(i%2)^1][j][k]; //Compute one Cell... ComputeOneCell(&cellCompParamsPtr[j][k]); } int main(int argc, char *argv[]){ char *inFileName; char *outFileName = "InferiorOlive_Output.txt"; FILE *pInFile; FILE *pOutFile; char *iAppBuf; const int iAppBufSize = IAPP_MAX_CHARS*IO_NETWORK_DIM1*IO_NETWORK_DIM2+1; mod_prec iAppArray[IO_NETWORK_SIZE]; int i, j, k, p, q, n; int simSteps = 0; int simTime = 0; int inputFromFile = 0; int initSteps; double *cellStatePtr; cellCompParams **cellCompParamsPtr; int seedvar; char temp[100];//warning: this buffer may overflow mod_prec iApp; timestamp_t t0, t1, secs; //double secs; printf("Inferior Olive Model (%d x %d cell mesh)\n", IO_NETWORK_DIM1, IO_NETWORK_DIM2); //Process command line arguments if(argc == 1){ inputFromFile = 0; printf("Warning: No input file has been specified. A one-pulse input will be used.\n"); }else if(argc == 2){ inputFromFile = 1; inFileName = argv[1];//comment out for a hardcoded name pInFile = fopen(inFileName,"r"); if(pInFile==NULL){ printf("Error: Couldn't open %s\n", inFileName); exit(EXIT_FAILURE); } }else{ printf("Error: Too many arguments.\nUsage: ./InferiorOlive <Iapp_input_file> or ./InferiorOlive\n"); exit(EXIT_FAILURE); } //Open output file pOutFile = fopen(outFileName,"w"); if(pOutFile==NULL){ printf("Error: Couldn't create %s\n", outFileName); exit(EXIT_FAILURE); } sprintf(temp, "#simSteps Time(ms) Input(Iapp) Output(V_axon)\n"); fputs(temp, pOutFile); //Malloc for iAppBuffer holding iApp arrays, one 2D array (a single line in the file though) at the time printf("Malloc'ing memory...\n"); printf("iAppBuf: %dB\n", iAppBufSize); iAppBuf = (char *)malloc(iAppBufSize); if(iAppBuf==NULL){ printf("Error: Couldn't malloc for iAppBuf\n"); exit(EXIT_FAILURE); } //Malloc for the array of cellStates and cellCompParams printf("cellStatePtr: %dB\n", 2*IO_NETWORK_SIZE*sizeof(cellState)); //Two cell state structs are needed so as to avoid having to synchronize all consumers before they start rewriting the cell state. cellStatePtr = malloc(IO_NETWORK_DIM1*IO_NETWORK_DIM2*PARAM_SIZE*sizeof(double));//current and next state if(cellStatePtr==NULL){ printf("Error: Couldn't malloc for cellStatePtr\n"); exit(EXIT_FAILURE); } /* cellStatePtr[0] = malloc(IO_NETWORK_DIM1*sizeof(cellState *)); if(cellStatePtr[0]==NULL){ printf("Error: Couldn't malloc for cellStatePtr[0]\n"); exit(EXIT_FAILURE); } for(k=0;k<IO_NETWORK_DIM1;k++){ cellStatePtr[0][k] = malloc(IO_NETWORK_DIM2*sizeof(cellState)); if(cellStatePtr[0][k]==NULL){ printf("Error: Couldn't malloc for cellStatePtr[0][k]\n"); exit(EXIT_FAILURE); } } cellStatePtr[1] = malloc(IO_NETWORK_DIM1*sizeof(cellState)); if(cellStatePtr[1]==NULL){ printf("Error: Couldn't malloc for cellStatePt[1]r\n"); exit(EXIT_FAILURE); } for(k=0;k<IO_NETWORK_DIM1;k++){ cellStatePtr[1][k] = malloc(IO_NETWORK_DIM2*sizeof(cellState)); if(cellStatePtr[1][k]==NULL){ printf("Error: Couldn't malloc for cellStatePtr[1][k]\n"); exit(EXIT_FAILURE); } } */ printf("cellCompParamsPtr: %dB\n", IO_NETWORK_SIZE*sizeof(cellCompParams)); cellCompParamsPtr = malloc(IO_NETWORK_DIM1*sizeof(cellCompParams *)); if(cellCompParamsPtr==NULL){ printf("Error: Couldn't malloc for cellCompParamsPtr\n"); exit(EXIT_FAILURE); } for(k=0;k<IO_NETWORK_DIM1;k++){ cellCompParamsPtr[k] = malloc(IO_NETWORK_DIM2*sizeof(cellCompParams)); if(cellCompParamsPtr[k]==NULL){ printf("Error: Couldn't malloc for cellCompParamsPtr[k]\n"); exit(EXIT_FAILURE); } } //Write initial state values //InitState(cellStatePtr[0]); for(i=0;i<IO_NETWORK_SIZE;i++){ cellStatePtr[i*PARAM_SIZE + STATEADD + b] = -60;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0337836;// H current cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0112788;// High-threshold calciucellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0049291;// Calcium-dependent potcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.5;// High-threshold calcium currcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 3.7152;// Calcium concentration cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.68; //default arbitrary value bucellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = -60;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 1.0127807;// Sodium (artificial) cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.3596066;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.7423159;// Low-threshold calciumcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0321349;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.2369847;// Potassium (delayed recellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.2369847;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.1;// Potassium (voltage-dependencellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = -60;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.003596066;// Sodium (thalamocortcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.9;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.2369847;// Potassium (transient) } //Initialize g_CaL seedvar = 1; for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ srand(seedvar++); // use this for debugging, now there is difference cellStatePtr[1][j][k].soma.g_CaL = cellStatePtr[0][j][k].soma.g_CaL = 0.68; // Uncomment the next two lines to assign different soma conductances to each cell. //cellStatePtr[0][j][k].soma.g_CaL = 0.6+(0.2*(rand()%100)/100); //cellStatePtr[1][j][k].soma.g_CaL = cellStatePtr[0][j][k].soma.g_CaL; } } //Random initialization: put every cell in a different oscillation state if(RAND_INIT){ seedvar=1; for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ //Put each cell at a different random state //srand(time(NULL));//Initialize random seed - Too fast when called in a loop. srand(seedvar++); // use this for debugging, now there is difference initSteps = rand()%(int)ceil(100/DELTA); initSteps = initSteps | 0x00000001;//make it odd, so that the final state is in prevCellState printf("%d iterations - ",initSteps); for(i=0;i<initSteps;i++){ //Arrange inputs cellCompParamsPtr[j][k].iAppIn = 0;//No stimulus cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[i%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(i%2)^1][j][k]; ComputeOneCell(&cellCompParamsPtr[j][k]); } printf("Random initialization of the cell states finished.\n"); } } } t0 = get_timestamp(); if(inputFromFile){ simSteps = 0; //Read full lines until end of file. Every iteration (line) is one simulation step. while(ReadFileLine(iAppBuf, iAppBufSize, pInFile, iAppArray)){ //Compute one sim step for all cells for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ //Compute one Cell... //Arrange inputs cellCompParamsPtr[j][k].iAppIn = iAppArray[j*IO_NETWORK_DIM1+k]; cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[simSteps%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(simSteps%2)^1][j][k]; ComputeOneCell(&cellCompParamsPtr[j][k]); //Store results sprintf(temp, "%d %.3f %.3f %.8f\n", simSteps, (float)simSteps/20000, cellCompParamsPtr[j][k].iAppIn, cellStatePtr[(simSteps%2)^1][j][k].axon.V_axon); fputs(temp, pOutFile); } } simSteps++; } }else{ simTime = SIMTIME; // in miliseconds simSteps = ceil(simTime/DELTA); for(i=0;i<simSteps;i++){ //Compute one sim step for all cells //printf("simSteps: %d\n", i); if(i>20000-1 && i<20500-1){ iApp = 6;} // start @ 1 because skipping initial values else{ iApp = 0;} sprintf(temp, "%d %.2f %.1f ", i+1, i*0.05, iApp); // start @ 1 because skipping initial values fputs(temp, pOutFile); for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ //Get neighbors' voltage influence n = 0; for(p=j-1;p<=j+1;p++){ for(q=k-1;q<=k+1;q++){ if(((p!=j)||(q!=k)) && ((p>=0)&&(q>=0)) && ((p<IO_NETWORK_DIM1)&&(q<IO_NETWORK_DIM2))){ cellCompParamsPtr[j][k].neighVdend[n++] = cellStatePtr[i%2][p][q].dend.V_dend; }else if(p==j && q==k){ /** <<<<<<< THIS EXCEPTION FIXES THE BUG */ ; // do nothing, this is the cell itself } else{ //store same V_dend so that Ic becomes zero by the subtraction cellCompParamsPtr[j][k].neighVdend[n++] = cellStatePtr[i%2][j][k].dend.V_dend; } } } cellCompParamsPtr[j][k].iAppIn = iApp; cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[i%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(i%2)^1][j][k]; //Compute one Cell... ComputeOneCell(&cellCompParamsPtr[j][k]); //Store results //printf("V_dend, V_soma and V_axon at simStep %d are\t: %.8f\t %.8f\t%.8f\n", i, cellStatePtr[(i%2)^1][j][k].dend.V_dend, cellStatePtr[(i%2)^1][j][k].soma.V_soma, cellStatePtr[(i%2)^1][j][k].axon.V_axon); //sprintf(temp, "%d %.3f %.3f %.8f\n", i, (float)i/20000, cellCompParamsPtr[j][k].iAppIn, cellStatePtr[(i%2)^1][j][k].axon.V_axon); sprintf(temp, "%.8f ", cellStatePtr[(i%2)^1][j][k].axon.V_axon); fputs(temp, pOutFile); } } sprintf(temp, "\n"); fputs(temp, pOutFile); } } t1 = get_timestamp(); secs = (t1 - t0);// / 1000000; printf("%d ms of brain time in %d simulation steps\n", simTime, simSteps); printf(" %lld usecs real time \n", secs); //Free up memory and close files free(cellStatePtr[0]); free(cellStatePtr[1]); free(cellStatePtr); free(cellCompParamsPtr); free(iAppBuf); fclose (pOutFile); if(inputFromFile){ fclose (pInFile);} return EXIT_SUCCESS; } void ComputeOneCell(cellCompParams *cellCompParamsPtr){ //The three compartments can be computed concurrently but only across a single sim step CompDend(cellCompParamsPtr); CompSoma(cellCompParamsPtr); CompAxon(cellCompParamsPtr); return; } void CompDend(cellCompParams *cellCompParamsPtr){ struct channelParams chPrms; struct dendCurrVoltPrms chComps; //printf("Dendrite "); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->dend.V_dend; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Hcurrent_q; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Hcurrent_q; //Compute DendHCurr(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->dend.V_dend; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Calcium_r; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Calcium_r; //Compute DendCaCurr(&chPrms); //Prepare pointers to inputs/outputs chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Potassium_s; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->dend.Ca2Plus; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Potassium_s; //Compute DendKCurr(&chPrms); //Prepare pointers to inputs/outputs chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Ca2Plus; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->dend.I_CaH; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Ca2Plus; //Compute DendCal(&chPrms); chComps.iC = IcNeighbors(cellCompParamsPtr->neighVdend, cellCompParamsPtr->prevCellState->dend.V_dend); chComps.iApp = &cellCompParamsPtr->iAppIn; chComps.vDend = &cellCompParamsPtr->prevCellState->dend.V_dend; chComps.newVDend = &cellCompParamsPtr->newCellState->dend.V_dend; chComps.vSoma = &cellCompParamsPtr->prevCellState->soma.V_soma; chComps.q = &cellCompParamsPtr->newCellState->dend.Hcurrent_q; chComps.r = &cellCompParamsPtr->newCellState->dend.Calcium_r; chComps.s = &cellCompParamsPtr->newCellState->dend.Potassium_s; chComps.newI_CaH = &cellCompParamsPtr->newCellState->dend.I_CaH; DendCurrVolt(&chComps); return; } void DendHCurr(struct channelParams *chPrms){ mod_prec q_inf, tau_q, dq_dt, q_local; //Get inputs mod_prec prevV_dend = *chPrms->v; mod_prec prevHcurrent_q = *chPrms->prevComp1; // Update dendritic H current component q_inf = 1 /(1 + exp((prevV_dend + 80) / 4)); tau_q = 1 /(exp(-0.086 * prevV_dend - 14.6) + exp(0.070 * prevV_dend - 1.87)); dq_dt = (q_inf - prevHcurrent_q) / tau_q; q_local = DELTA * dq_dt + prevHcurrent_q; //Put result *chPrms->newComp1 = q_local; return; } void DendCaCurr(struct channelParams *chPrms){ mod_prec alpha_r, beta_r, r_inf, tau_r, dr_dt, r_local; //Get inputs mod_prec prevV_dend = *chPrms->v; mod_prec prevCalcium_r = *chPrms->prevComp1; // Update dendritic high-threshold Ca current component alpha_r = 1.7 / (1 + exp( -(prevV_dend - 5) / 13.9)); beta_r = 0.02 * (prevV_dend + 8.5) / (exp((prevV_dend + 8.5) / 5) - 1); r_inf = alpha_r / (alpha_r + beta_r); tau_r = 5 / (alpha_r + beta_r); dr_dt = (r_inf - prevCalcium_r) / tau_r; r_local = DELTA * dr_dt + prevCalcium_r; //Put result *chPrms->newComp1 = r_local; return; } void DendKCurr(struct channelParams *chPrms){ mod_prec alpha_s, beta_s, s_inf, tau_s, ds_dt, s_local; //Get inputs mod_prec prevPotassium_s = *chPrms->prevComp1; mod_prec prevCa2Plus = *chPrms->prevComp2; // Update dendritic Ca-dependent K current component alpha_s = min((0.00002*prevCa2Plus), 0.01); beta_s = 0.015; s_inf = alpha_s / (alpha_s + beta_s); tau_s = 1 / (alpha_s + beta_s); ds_dt = (s_inf - prevPotassium_s) / tau_s; s_local = DELTA * ds_dt + prevPotassium_s; //Put result *chPrms->newComp1 = s_local; return; } //Consider merging DendCal into DendKCurr since DendCal's output doesn't go to DendCurrVolt but to DendKCurr void DendCal(struct channelParams *chPrms){ mod_prec dCa_dt, Ca2Plus_local; //Get inputs mod_prec prevCa2Plus = *chPrms->prevComp1; mod_prec prevI_CaH = *chPrms->prevComp2; // update Calcium concentration dCa_dt = -3 * prevI_CaH - 0.075 * prevCa2Plus; Ca2Plus_local = DELTA * dCa_dt + prevCa2Plus; //Put result *chPrms->newComp1 = Ca2Plus_local;//This state value is read in DendKCurr return; } void DendCurrVolt(struct dendCurrVoltPrms *chComps){ //Loca variables mod_prec I_sd, I_CaH, I_K_Ca, I_ld, I_h, dVd_dt; //Get inputs mod_prec I_c = chComps->iC; mod_prec I_app = *chComps->iApp; mod_prec prevV_dend = *chComps->vDend; mod_prec prevV_soma = *chComps->vSoma; mod_prec q = *chComps->q; mod_prec r = *chComps->r; mod_prec s = *chComps->s; // DENDRITIC CURRENTS // Soma-dendrite interaction current I_sd I_sd = (G_INT / (1 - P1)) * (prevV_dend - prevV_soma); // Inward high-threshold Ca current I_CaH I_CaH = G_CAH * r * r * (prevV_dend - V_CA); // Outward Ca-dependent K current I_K_Ca I_K_Ca = G_K_CA * s * (prevV_dend - V_K); // Leakage current I_ld I_ld = G_LD * (prevV_dend - V_L); // Inward anomalous rectifier I_h I_h = G_H * q * (prevV_dend - V_H); dVd_dt = (-(I_CaH + I_sd + I_ld + I_K_Ca + I_c + I_h) + I_app) / C_M; //Put result (update V_dend) *chComps->newVDend = DELTA * dVd_dt + prevV_dend; *chComps->newI_CaH = I_CaH;//This is a state value read in DendCal return; } mod_prec IcNeighbors(mod_prec *neighVdend, mod_prec prevV_dend){ int i; mod_prec f, V, I_c; //printf("Ic[0]= %f\n", neighVdend[0]); I_c = 0; for(i=0;i<8;i++){ V = prevV_dend - neighVdend[i]; f = 0.8 * exp(-1*pow(V, 2)/100) + 0.2; // SCHWEIGHOFER 2004 VERSION I_c = I_c + (CONDUCTANCE * f * V); } return I_c; } void CompSoma(cellCompParams *cellCompParamsPtr){ struct channelParams chPrms; struct somaCurrVoltPrms chComps; // update somatic components // SCHWEIGHOFER: //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Calcium_k; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->soma.Calcium_l; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Calcium_k; chPrms.newComp2 = &cellCompParamsPtr->newCellState->soma.Calcium_l; //Compute SomaCalcium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Sodium_m; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->soma.Sodium_h; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Sodium_m; chPrms.newComp2 = &cellCompParamsPtr->newCellState->soma.Sodium_h; //Compute SomaSodium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Potassium_n; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->soma.Potassium_p; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Potassium_n; chPrms.newComp2 = &cellCompParamsPtr->newCellState->soma.Potassium_p; //Compute SomaPotassium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Potassium_x_s; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Potassium_x_s; //Compute SomaPotassiumX(&chPrms); chComps.g_CaL = &cellCompParamsPtr->prevCellState->soma.g_CaL; chComps.vDend = &cellCompParamsPtr->prevCellState->dend.V_dend; chComps.vSoma = &cellCompParamsPtr->prevCellState->soma.V_soma; chComps.newVSoma = &cellCompParamsPtr->newCellState->soma.V_soma; chComps.vAxon = &cellCompParamsPtr->prevCellState->axon.V_axon; chComps.k = &cellCompParamsPtr->newCellState->soma.Calcium_k; chComps.l = &cellCompParamsPtr->newCellState->soma.Calcium_l; chComps.m = &cellCompParamsPtr->newCellState->soma.Sodium_m; chComps.h = &cellCompParamsPtr->newCellState->soma.Sodium_h; chComps.n = &cellCompParamsPtr->newCellState->soma.Potassium_n; chComps.x_s = &cellCompParamsPtr->newCellState->soma.Potassium_x_s; SomaCurrVolt(&chComps); return; } void SomaCalcium(struct channelParams *chPrms){ mod_prec k_inf, l_inf, tau_k, tau_l, dk_dt, dl_dt, k_local, l_local; //Get inputs mod_prec prevV_soma = *chPrms->v; mod_prec prevCalcium_k = *chPrms->prevComp1; mod_prec prevCalcium_l = *chPrms->prevComp2; k_inf = (1 / (1 + exp(-1 * (prevV_soma + 61) / 4.2))); l_inf = (1 / (1 + exp(( prevV_soma + 85.5) / 8.5))); tau_k = 1; tau_l = ((20 * exp((prevV_soma + 160) / 30) / (1 + exp((prevV_soma + 84) / 7.3))) +35); dk_dt = (k_inf - prevCalcium_k) / tau_k; dl_dt = (l_inf - prevCalcium_l) / tau_l; k_local = DELTA * dk_dt + prevCalcium_k; l_local = DELTA * dl_dt + prevCalcium_l; //Put result *chPrms->newComp1= k_local; *chPrms->newComp2= l_local; return; } void SomaSodium(struct channelParams *chPrms){ mod_prec m_inf, h_inf, tau_h, dh_dt, m_local, h_local; //Get inputs mod_prec prevV_soma = *chPrms->v; //mod_prec prevSodium_m = *chPrms->prevComp1; mod_prec prevSodium_h = *chPrms->prevComp2; // RAT THALAMOCORTICAL SODIUM: m_inf = 1 / (1 + (exp((-30 - prevV_soma)/ 5.5))); h_inf = 1 / (1 + (exp((-70 - prevV_soma)/-5.8))); tau_h = 3 * exp((-40 - prevV_soma)/33); dh_dt = (h_inf - prevSodium_h)/tau_h; m_local = m_inf; h_local = prevSodium_h + DELTA * dh_dt; //Put result *chPrms->newComp1 = m_local; *chPrms->newComp2 = h_local; return; } void SomaPotassium(struct channelParams *chPrms){ mod_prec n_inf, p_inf, tau_n, tau_p, dn_dt, dp_dt, n_local, p_local; //Get inputs mod_prec prevV_soma = *chPrms->v; mod_prec prevPotassium_n = *chPrms->prevComp1; mod_prec prevPotassium_p = *chPrms->prevComp2; // NEOCORTICAL n_inf = 1 / (1 + exp( ( -3 - prevV_soma) / 10)); p_inf = 1 / (1 + exp( (-51 - prevV_soma) / -12)); tau_n = 5 + ( 47 * exp( -(-50 - prevV_soma) / 900)); tau_p = tau_n; dn_dt = (n_inf - prevPotassium_n) / tau_n; dp_dt = (p_inf - prevPotassium_p) / tau_p; n_local = DELTA * dn_dt + prevPotassium_n; p_local = DELTA * dp_dt + prevPotassium_p; //Put result *chPrms->newComp1 = n_local; *chPrms->newComp2 = p_local; return; } void SomaPotassiumX(struct channelParams *chPrms){ mod_prec alpha_x_s, beta_x_s, x_inf_s, tau_x_s, dx_dt_s, x_s_local; //Get inputs mod_prec prevV_soma = *chPrms->v; mod_prec prevPotassium_x_s = *chPrms->prevComp1; // Voltage-dependent (fast) potassium alpha_x_s = 0.13 * (prevV_soma + 25) / (1 - exp(-(prevV_soma + 25) / 10)); beta_x_s = 1.69 * exp(-0.0125 * (prevV_soma + 35)); x_inf_s = alpha_x_s / (alpha_x_s + beta_x_s); tau_x_s = 1 / (alpha_x_s + beta_x_s); dx_dt_s = (x_inf_s - prevPotassium_x_s) / tau_x_s; x_s_local = 0.05 * dx_dt_s + prevPotassium_x_s; //Put result *chPrms->newComp1 = x_s_local; return; } void SomaCurrVolt(struct somaCurrVoltPrms *chComps){ //Local variables mod_prec I_ds, I_CaL, I_Na_s, I_ls, I_Kdr_s, I_K_s, I_as, dVs_dt; //Get inputs mod_prec g_CaL = *chComps->g_CaL; mod_prec prevV_dend = *chComps->vDend; mod_prec prevV_soma = *chComps->vSoma; mod_prec prevV_axon = *chComps->vAxon; mod_prec k = *chComps->k; mod_prec l = *chComps->l; mod_prec m = *chComps->m; mod_prec h = *chComps->h; mod_prec n = *chComps->n; mod_prec x_s = *chComps->x_s; // SOMATIC CURRENTS // Dendrite-soma interaction current I_ds I_ds = (G_INT / P1) * (prevV_soma - prevV_dend); // Inward low-threshold Ca current I_CaL I_CaL = g_CaL * k * k * k * l * (prevV_soma - V_CA); //k^3 // Inward Na current I_Na_s I_Na_s = G_NA_S * m * m * m * h * (prevV_soma - V_NA); // Leakage current I_ls I_ls = G_LS * (prevV_soma - V_L); // Outward delayed potassium current I_Kdr I_Kdr_s = G_KDR_S * n * n * n * n * (prevV_soma - V_K); // SCHWEIGHOFER // I_K_s I_K_s = G_K_S * pow(x_s, 4) * (prevV_soma - V_K); // Axon-soma interaction current I_as I_as = (G_INT / (1 - P2)) * (prevV_soma - prevV_axon); dVs_dt = (-(I_CaL + I_ds + I_as + I_Na_s + I_ls + I_Kdr_s + I_K_s)) / C_M; *chComps->newVSoma = DELTA * dVs_dt + prevV_soma; return; } void CompAxon(cellCompParams *cellCompParamsPtr){ struct channelParams chPrms; struct axonCurrVoltPrms chComps; // update somatic components // SCHWEIGHOFER: //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->axon.V_axon; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->axon.Sodium_h_a; chPrms.newComp1 = &cellCompParamsPtr->newCellState->axon.Sodium_h_a; chPrms.newComp2 = &cellCompParamsPtr->newCellState->axon.Sodium_m_a; //Compute AxonSodium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->axon.V_axon; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->axon.Potassium_x_a; chPrms.newComp1 = &cellCompParamsPtr->newCellState->axon.Potassium_x_a; //Compute AxonPotassium(&chPrms); //Get inputs chComps.vSoma = &cellCompParamsPtr->prevCellState->soma.V_soma; chComps.vAxon = &cellCompParamsPtr->prevCellState->axon.V_axon; chComps.newVAxon = &cellCompParamsPtr->newCellState->axon.V_axon; chComps.m_a = &cellCompParamsPtr->newCellState->axon.Sodium_m_a; chComps.h_a = &cellCompParamsPtr->newCellState->axon.Sodium_h_a; chComps.x_a = &cellCompParamsPtr->newCellState->axon.Potassium_x_a; AxonCurrVolt(&chComps); return; } void AxonSodium(struct channelParams *chPrms){ mod_prec m_inf_a, h_inf_a, tau_h_a, dh_dt_a, m_a_local, h_a_local; //Get inputs mod_prec prevV_axon = *chPrms->v; mod_prec prevSodium_h_a = *chPrms->prevComp1; // Update axonal Na components // NOTE: current has shortened inactivation to account for high // firing frequencies in axon hillock m_inf_a = 1 / (1 + (exp((-30 - prevV_axon)/ 5.5))); h_inf_a = 1 / (1 + (exp((-60 - prevV_axon)/-5.8))); tau_h_a = 1.5 * exp((-40 - prevV_axon)/33); dh_dt_a = (h_inf_a - prevSodium_h_a)/tau_h_a; m_a_local = m_inf_a; h_a_local = prevSodium_h_a + DELTA * dh_dt_a; //Put result *chPrms->newComp1 = h_a_local; *chPrms->newComp2 = m_a_local; return; } void AxonPotassium(struct channelParams *chPrms){ mod_prec alpha_x_a, beta_x_a, x_inf_a, tau_x_a, dx_dt_a, x_a_local; //Get inputs mod_prec prevV_axon = *chPrms->v; mod_prec prevPotassium_x_a = *chPrms->prevComp1; // D'ANGELO 2001 -- Voltage-dependent potassium alpha_x_a = 0.13 * (prevV_axon + 25) / (1 - exp(-(prevV_axon + 25) / 10)); beta_x_a = 1.69 * exp(-0.0125 * (prevV_axon + 35)); x_inf_a = alpha_x_a / (alpha_x_a + beta_x_a); tau_x_a = 1 / (alpha_x_a + beta_x_a); dx_dt_a = (x_inf_a - prevPotassium_x_a) / tau_x_a; x_a_local = 0.05 * dx_dt_a + prevPotassium_x_a; //Put result *chPrms->newComp1 = x_a_local; return; } void AxonCurrVolt(struct axonCurrVoltPrms *chComps){ //Local variable mod_prec I_Na_a, I_la, I_sa, I_K_a, dVa_dt; //Get inputs mod_prec prevV_soma = *chComps->vSoma; mod_prec prevV_axon = *chComps->vAxon; mod_prec m_a = *chComps->m_a; mod_prec h_a = *chComps->h_a; mod_prec x_a = *chComps->x_a; // AXONAL CURRENTS // Sodium I_Na_a = G_NA_A * m_a * m_a * m_a * h_a * (prevV_axon - V_NA); // Leak I_la = G_LA * (prevV_axon - V_L); // Soma-axon interaction current I_sa I_sa = (G_INT / P2) * (prevV_axon - prevV_soma); // Potassium (transient) I_K_a = G_K_A * pow(x_a, 4) * (prevV_axon - V_K); dVa_dt = (-(I_K_a + I_sa + I_la + I_Na_a)) / C_M; *chComps->newVAxon = DELTA * dVa_dt + prevV_axon; return; } void InitState(cellState **cellStatePtr){ int j, k; cellState initState; //Initial dendritic parameters initState.dend.V_dend = -60; initState.dend.Calcium_r = 0.0112788;// High-threshold calcium initState.dend.Potassium_s = 0.0049291;// Calcium-dependent potassium initState.dend.Hcurrent_q = 0.0337836;// H current initState.dend.Ca2Plus = 3.7152;// Calcium concentration initState.dend.I_CaH = 0.5;// High-threshold calcium current //Initial somatic parameters initState.soma.g_CaL = 0.68; //default arbitrary value but it should be randomized per cell initState.soma.V_soma = -60; initState.soma.Sodium_m = 1.0127807;// Sodium (artificial) initState.soma.Sodium_h = 0.3596066; initState.soma.Potassium_n = 0.2369847;// Potassium (delayed rectifier) initState.soma.Potassium_p = 0.2369847; initState.soma.Potassium_x_s = 0.1;// Potassium (voltage-dependent) initState.soma.Calcium_k = 0.7423159;// Low-threshold calcium initState.soma.Calcium_l = 0.0321349; // Initial axonal parameters initState.axon.V_axon = -60; //sisaza: Sodium_m_a doesn't have a state, therefore this assignment doesn'thave any effect initState.axon.Sodium_m_a = 0.003596066;// Sodium (thalamocortical) initState.axon.Sodium_h_a = 0.9; initState.axon.Potassium_x_a = 0.2369847;// Potassium (transient) //Copy init sate to all cell states for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ memcpy(&cellStatePtr[j][k], &initState, sizeof(cellState)); } } return; } int ReadFileLine(char *iAppBuf, int iAppBufSize, FILE *pInFile, mod_prec *iAppArray){ //FIXME: make this function more robust char *strNumber; int i = 0; //Get one line if(fgets(iAppBuf, iAppBufSize, pInFile)){ //Convert the ASCII string of one element to a double precision floating point value strNumber = strtok(iAppBuf," "); i = 0; //printf("Line:\n"); while ((strNumber != NULL) && (i<IO_NETWORK_SIZE)){ iAppArray[i] = atof(strNumber);//atof() should change if using integers or fixed point //printf ("(%s) %0.2f ", strNumber, iAppArray[i]); strNumber = strtok(NULL, " "); i++; } //printf("i: %d\n", i); if(i<IO_NETWORK_SIZE){ //BUG: if only one element is missing but the line ends in a space, the error is not detected printf("Error: Input line doesn't have enough elements, only %d\n", i); exit(EXIT_FAILURE); } return 1;//success }else{ if(!feof(pInFile)){ printf("Error: Reading from input file didn't finish successfully\n"); exit(EXIT_FAILURE); } return 0;//end of file } } inline mod_prec min(mod_prec a, mod_prec b){ return (a < b) ? a : b; }
5ec1c2b7c334fc045049efbd37fe1b731aa03fce.cu
/* * * Copyright (c) 2012, Neurasmus B.V., The Netherlands, * web: www.neurasmus.com email: [email protected] * * Any use or reproduction in whole or in parts is prohibited * without the written consent of the copyright owner. * * All Rights Reserved. * * * Author: Sebastian Isaza * Created: 19-01-2012 * Modified: 07-08-2012 * * Description: Top source file of the Inferior Olive model, originally written * in Matlab by Jornt De Gruijl. It contains the implementation of all functions. * The main function allocates the necessary memory, initializes the system * state and runs the model calculations. * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> //#include <sys/time.h> #include <time.h> #include "infoli.h" #include <cuda_runtime.h> //#include "StdAfx.h" typedef unsigned long long timestamp_t; static timestamp_t get_timestamp () { struct timeval now; gettimeofday (&now, NULL); return now.tv_usec + (timestamp_t)now.tv_sec * 1000000; } __global__ void compute_cell_kernel(double *cellStatePtr, double *cellVdend) { int n = 0, p,j,k; k = blockIdx.x*blockDim.x + threadIdx.x; j = blockIdx.y*blockDim.y + threadIdx.y; for(p=j-1;p<=j+1;p++){ for(q=k-1;q<=k+1;q++){ if(((p!=j)||(q!=k)) && ((p>=0)&&(q>=0)) && ((p<IO_NETWORK_DIM1)&&(q<IO_NETWORK_DIM2))){ cellCompParamsPtr[j][k].neighVdend[n++] = cellVdend[i%2][p][q]; }else if(p==j && q==k){ /** <<<<<<< THIS EXCEPTION FIXES THE BUG */ ; // do nothing, this is the cell itself } else{ //store same V_dend so that Ic becomes zero by the subtraction cellCompParamsPtr[j][k].neighVdend[n++] = cellVdend[i%2][j][k]; } } } cellCompParamsPtr[j][k].iAppIn = iApp; cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[i%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(i%2)^1][j][k]; //Compute one Cell... ComputeOneCell(&cellCompParamsPtr[j][k]); } int main(int argc, char *argv[]){ char *inFileName; char *outFileName = "InferiorOlive_Output.txt"; FILE *pInFile; FILE *pOutFile; char *iAppBuf; const int iAppBufSize = IAPP_MAX_CHARS*IO_NETWORK_DIM1*IO_NETWORK_DIM2+1; mod_prec iAppArray[IO_NETWORK_SIZE]; int i, j, k, p, q, n; int simSteps = 0; int simTime = 0; int inputFromFile = 0; int initSteps; double *cellStatePtr; cellCompParams **cellCompParamsPtr; int seedvar; char temp[100];//warning: this buffer may overflow mod_prec iApp; timestamp_t t0, t1, secs; //double secs; printf("Inferior Olive Model (%d x %d cell mesh)\n", IO_NETWORK_DIM1, IO_NETWORK_DIM2); //Process command line arguments if(argc == 1){ inputFromFile = 0; printf("Warning: No input file has been specified. A one-pulse input will be used.\n"); }else if(argc == 2){ inputFromFile = 1; inFileName = argv[1];//comment out for a hardcoded name pInFile = fopen(inFileName,"r"); if(pInFile==NULL){ printf("Error: Couldn't open %s\n", inFileName); exit(EXIT_FAILURE); } }else{ printf("Error: Too many arguments.\nUsage: ./InferiorOlive <Iapp_input_file> or ./InferiorOlive\n"); exit(EXIT_FAILURE); } //Open output file pOutFile = fopen(outFileName,"w"); if(pOutFile==NULL){ printf("Error: Couldn't create %s\n", outFileName); exit(EXIT_FAILURE); } sprintf(temp, "#simSteps Time(ms) Input(Iapp) Output(V_axon)\n"); fputs(temp, pOutFile); //Malloc for iAppBuffer holding iApp arrays, one 2D array (a single line in the file though) at the time printf("Malloc'ing memory...\n"); printf("iAppBuf: %dB\n", iAppBufSize); iAppBuf = (char *)malloc(iAppBufSize); if(iAppBuf==NULL){ printf("Error: Couldn't malloc for iAppBuf\n"); exit(EXIT_FAILURE); } //Malloc for the array of cellStates and cellCompParams printf("cellStatePtr: %dB\n", 2*IO_NETWORK_SIZE*sizeof(cellState)); //Two cell state structs are needed so as to avoid having to synchronize all consumers before they start rewriting the cell state. cellStatePtr = malloc(IO_NETWORK_DIM1*IO_NETWORK_DIM2*PARAM_SIZE*sizeof(double));//current and next state if(cellStatePtr==NULL){ printf("Error: Couldn't malloc for cellStatePtr\n"); exit(EXIT_FAILURE); } /* cellStatePtr[0] = malloc(IO_NETWORK_DIM1*sizeof(cellState *)); if(cellStatePtr[0]==NULL){ printf("Error: Couldn't malloc for cellStatePtr[0]\n"); exit(EXIT_FAILURE); } for(k=0;k<IO_NETWORK_DIM1;k++){ cellStatePtr[0][k] = malloc(IO_NETWORK_DIM2*sizeof(cellState)); if(cellStatePtr[0][k]==NULL){ printf("Error: Couldn't malloc for cellStatePtr[0][k]\n"); exit(EXIT_FAILURE); } } cellStatePtr[1] = malloc(IO_NETWORK_DIM1*sizeof(cellState)); if(cellStatePtr[1]==NULL){ printf("Error: Couldn't malloc for cellStatePt[1]r\n"); exit(EXIT_FAILURE); } for(k=0;k<IO_NETWORK_DIM1;k++){ cellStatePtr[1][k] = malloc(IO_NETWORK_DIM2*sizeof(cellState)); if(cellStatePtr[1][k]==NULL){ printf("Error: Couldn't malloc for cellStatePtr[1][k]\n"); exit(EXIT_FAILURE); } } */ printf("cellCompParamsPtr: %dB\n", IO_NETWORK_SIZE*sizeof(cellCompParams)); cellCompParamsPtr = malloc(IO_NETWORK_DIM1*sizeof(cellCompParams *)); if(cellCompParamsPtr==NULL){ printf("Error: Couldn't malloc for cellCompParamsPtr\n"); exit(EXIT_FAILURE); } for(k=0;k<IO_NETWORK_DIM1;k++){ cellCompParamsPtr[k] = malloc(IO_NETWORK_DIM2*sizeof(cellCompParams)); if(cellCompParamsPtr[k]==NULL){ printf("Error: Couldn't malloc for cellCompParamsPtr[k]\n"); exit(EXIT_FAILURE); } } //Write initial state values //InitState(cellStatePtr[0]); for(i=0;i<IO_NETWORK_SIZE;i++){ cellStatePtr[i*PARAM_SIZE + STATEADD + b] = -60;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0337836;// H current cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0112788;// High-threshold calciucellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0049291;// Calcium-dependent potcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.5;// High-threshold calcium currcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 3.7152;// Calcium concentration cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.68; //default arbitrary value bucellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = -60;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 1.0127807;// Sodium (artificial) cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.3596066;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.7423159;// Low-threshold calciumcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.0321349;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.2369847;// Potassium (delayed recellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.2369847;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.1;// Potassium (voltage-dependencellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = -60;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.003596066;// Sodium (thalamocortcellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.9;// cellStateInit[b]; cellStatePtr[i*PARAM_SIZE + STATEADD + b] = 0.2369847;// Potassium (transient) } //Initialize g_CaL seedvar = 1; for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ srand(seedvar++); // use this for debugging, now there is difference cellStatePtr[1][j][k].soma.g_CaL = cellStatePtr[0][j][k].soma.g_CaL = 0.68; // Uncomment the next two lines to assign different soma conductances to each cell. //cellStatePtr[0][j][k].soma.g_CaL = 0.6+(0.2*(rand()%100)/100); //cellStatePtr[1][j][k].soma.g_CaL = cellStatePtr[0][j][k].soma.g_CaL; } } //Random initialization: put every cell in a different oscillation state if(RAND_INIT){ seedvar=1; for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ //Put each cell at a different random state //srand(time(NULL));//Initialize random seed - Too fast when called in a loop. srand(seedvar++); // use this for debugging, now there is difference initSteps = rand()%(int)ceil(100/DELTA); initSteps = initSteps | 0x00000001;//make it odd, so that the final state is in prevCellState printf("%d iterations - ",initSteps); for(i=0;i<initSteps;i++){ //Arrange inputs cellCompParamsPtr[j][k].iAppIn = 0;//No stimulus cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[i%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(i%2)^1][j][k]; ComputeOneCell(&cellCompParamsPtr[j][k]); } printf("Random initialization of the cell states finished.\n"); } } } t0 = get_timestamp(); if(inputFromFile){ simSteps = 0; //Read full lines until end of file. Every iteration (line) is one simulation step. while(ReadFileLine(iAppBuf, iAppBufSize, pInFile, iAppArray)){ //Compute one sim step for all cells for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ //Compute one Cell... //Arrange inputs cellCompParamsPtr[j][k].iAppIn = iAppArray[j*IO_NETWORK_DIM1+k]; cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[simSteps%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(simSteps%2)^1][j][k]; ComputeOneCell(&cellCompParamsPtr[j][k]); //Store results sprintf(temp, "%d %.3f %.3f %.8f\n", simSteps, (float)simSteps/20000, cellCompParamsPtr[j][k].iAppIn, cellStatePtr[(simSteps%2)^1][j][k].axon.V_axon); fputs(temp, pOutFile); } } simSteps++; } }else{ simTime = SIMTIME; // in miliseconds simSteps = ceil(simTime/DELTA); for(i=0;i<simSteps;i++){ //Compute one sim step for all cells //printf("simSteps: %d\n", i); if(i>20000-1 && i<20500-1){ iApp = 6;} // start @ 1 because skipping initial values else{ iApp = 0;} sprintf(temp, "%d %.2f %.1f ", i+1, i*0.05, iApp); // start @ 1 because skipping initial values fputs(temp, pOutFile); for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ //Get neighbors' voltage influence n = 0; for(p=j-1;p<=j+1;p++){ for(q=k-1;q<=k+1;q++){ if(((p!=j)||(q!=k)) && ((p>=0)&&(q>=0)) && ((p<IO_NETWORK_DIM1)&&(q<IO_NETWORK_DIM2))){ cellCompParamsPtr[j][k].neighVdend[n++] = cellStatePtr[i%2][p][q].dend.V_dend; }else if(p==j && q==k){ /** <<<<<<< THIS EXCEPTION FIXES THE BUG */ ; // do nothing, this is the cell itself } else{ //store same V_dend so that Ic becomes zero by the subtraction cellCompParamsPtr[j][k].neighVdend[n++] = cellStatePtr[i%2][j][k].dend.V_dend; } } } cellCompParamsPtr[j][k].iAppIn = iApp; cellCompParamsPtr[j][k].prevCellState = &cellStatePtr[i%2][j][k]; cellCompParamsPtr[j][k].newCellState = &cellStatePtr[(i%2)^1][j][k]; //Compute one Cell... ComputeOneCell(&cellCompParamsPtr[j][k]); //Store results //printf("V_dend, V_soma and V_axon at simStep %d are\t: %.8f\t %.8f\t%.8f\n", i, cellStatePtr[(i%2)^1][j][k].dend.V_dend, cellStatePtr[(i%2)^1][j][k].soma.V_soma, cellStatePtr[(i%2)^1][j][k].axon.V_axon); //sprintf(temp, "%d %.3f %.3f %.8f\n", i, (float)i/20000, cellCompParamsPtr[j][k].iAppIn, cellStatePtr[(i%2)^1][j][k].axon.V_axon); sprintf(temp, "%.8f ", cellStatePtr[(i%2)^1][j][k].axon.V_axon); fputs(temp, pOutFile); } } sprintf(temp, "\n"); fputs(temp, pOutFile); } } t1 = get_timestamp(); secs = (t1 - t0);// / 1000000; printf("%d ms of brain time in %d simulation steps\n", simTime, simSteps); printf(" %lld usecs real time \n", secs); //Free up memory and close files free(cellStatePtr[0]); free(cellStatePtr[1]); free(cellStatePtr); free(cellCompParamsPtr); free(iAppBuf); fclose (pOutFile); if(inputFromFile){ fclose (pInFile);} return EXIT_SUCCESS; } void ComputeOneCell(cellCompParams *cellCompParamsPtr){ //The three compartments can be computed concurrently but only across a single sim step CompDend(cellCompParamsPtr); CompSoma(cellCompParamsPtr); CompAxon(cellCompParamsPtr); return; } void CompDend(cellCompParams *cellCompParamsPtr){ struct channelParams chPrms; struct dendCurrVoltPrms chComps; //printf("Dendrite "); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->dend.V_dend; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Hcurrent_q; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Hcurrent_q; //Compute DendHCurr(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->dend.V_dend; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Calcium_r; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Calcium_r; //Compute DendCaCurr(&chPrms); //Prepare pointers to inputs/outputs chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Potassium_s; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->dend.Ca2Plus; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Potassium_s; //Compute DendKCurr(&chPrms); //Prepare pointers to inputs/outputs chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->dend.Ca2Plus; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->dend.I_CaH; chPrms.newComp1 = &cellCompParamsPtr->newCellState->dend.Ca2Plus; //Compute DendCal(&chPrms); chComps.iC = IcNeighbors(cellCompParamsPtr->neighVdend, cellCompParamsPtr->prevCellState->dend.V_dend); chComps.iApp = &cellCompParamsPtr->iAppIn; chComps.vDend = &cellCompParamsPtr->prevCellState->dend.V_dend; chComps.newVDend = &cellCompParamsPtr->newCellState->dend.V_dend; chComps.vSoma = &cellCompParamsPtr->prevCellState->soma.V_soma; chComps.q = &cellCompParamsPtr->newCellState->dend.Hcurrent_q; chComps.r = &cellCompParamsPtr->newCellState->dend.Calcium_r; chComps.s = &cellCompParamsPtr->newCellState->dend.Potassium_s; chComps.newI_CaH = &cellCompParamsPtr->newCellState->dend.I_CaH; DendCurrVolt(&chComps); return; } void DendHCurr(struct channelParams *chPrms){ mod_prec q_inf, tau_q, dq_dt, q_local; //Get inputs mod_prec prevV_dend = *chPrms->v; mod_prec prevHcurrent_q = *chPrms->prevComp1; // Update dendritic H current component q_inf = 1 /(1 + exp((prevV_dend + 80) / 4)); tau_q = 1 /(exp(-0.086 * prevV_dend - 14.6) + exp(0.070 * prevV_dend - 1.87)); dq_dt = (q_inf - prevHcurrent_q) / tau_q; q_local = DELTA * dq_dt + prevHcurrent_q; //Put result *chPrms->newComp1 = q_local; return; } void DendCaCurr(struct channelParams *chPrms){ mod_prec alpha_r, beta_r, r_inf, tau_r, dr_dt, r_local; //Get inputs mod_prec prevV_dend = *chPrms->v; mod_prec prevCalcium_r = *chPrms->prevComp1; // Update dendritic high-threshold Ca current component alpha_r = 1.7 / (1 + exp( -(prevV_dend - 5) / 13.9)); beta_r = 0.02 * (prevV_dend + 8.5) / (exp((prevV_dend + 8.5) / 5) - 1); r_inf = alpha_r / (alpha_r + beta_r); tau_r = 5 / (alpha_r + beta_r); dr_dt = (r_inf - prevCalcium_r) / tau_r; r_local = DELTA * dr_dt + prevCalcium_r; //Put result *chPrms->newComp1 = r_local; return; } void DendKCurr(struct channelParams *chPrms){ mod_prec alpha_s, beta_s, s_inf, tau_s, ds_dt, s_local; //Get inputs mod_prec prevPotassium_s = *chPrms->prevComp1; mod_prec prevCa2Plus = *chPrms->prevComp2; // Update dendritic Ca-dependent K current component alpha_s = min((0.00002*prevCa2Plus), 0.01); beta_s = 0.015; s_inf = alpha_s / (alpha_s + beta_s); tau_s = 1 / (alpha_s + beta_s); ds_dt = (s_inf - prevPotassium_s) / tau_s; s_local = DELTA * ds_dt + prevPotassium_s; //Put result *chPrms->newComp1 = s_local; return; } //Consider merging DendCal into DendKCurr since DendCal's output doesn't go to DendCurrVolt but to DendKCurr void DendCal(struct channelParams *chPrms){ mod_prec dCa_dt, Ca2Plus_local; //Get inputs mod_prec prevCa2Plus = *chPrms->prevComp1; mod_prec prevI_CaH = *chPrms->prevComp2; // update Calcium concentration dCa_dt = -3 * prevI_CaH - 0.075 * prevCa2Plus; Ca2Plus_local = DELTA * dCa_dt + prevCa2Plus; //Put result *chPrms->newComp1 = Ca2Plus_local;//This state value is read in DendKCurr return; } void DendCurrVolt(struct dendCurrVoltPrms *chComps){ //Loca variables mod_prec I_sd, I_CaH, I_K_Ca, I_ld, I_h, dVd_dt; //Get inputs mod_prec I_c = chComps->iC; mod_prec I_app = *chComps->iApp; mod_prec prevV_dend = *chComps->vDend; mod_prec prevV_soma = *chComps->vSoma; mod_prec q = *chComps->q; mod_prec r = *chComps->r; mod_prec s = *chComps->s; // DENDRITIC CURRENTS // Soma-dendrite interaction current I_sd I_sd = (G_INT / (1 - P1)) * (prevV_dend - prevV_soma); // Inward high-threshold Ca current I_CaH I_CaH = G_CAH * r * r * (prevV_dend - V_CA); // Outward Ca-dependent K current I_K_Ca I_K_Ca = G_K_CA * s * (prevV_dend - V_K); // Leakage current I_ld I_ld = G_LD * (prevV_dend - V_L); // Inward anomalous rectifier I_h I_h = G_H * q * (prevV_dend - V_H); dVd_dt = (-(I_CaH + I_sd + I_ld + I_K_Ca + I_c + I_h) + I_app) / C_M; //Put result (update V_dend) *chComps->newVDend = DELTA * dVd_dt + prevV_dend; *chComps->newI_CaH = I_CaH;//This is a state value read in DendCal return; } mod_prec IcNeighbors(mod_prec *neighVdend, mod_prec prevV_dend){ int i; mod_prec f, V, I_c; //printf("Ic[0]= %f\n", neighVdend[0]); I_c = 0; for(i=0;i<8;i++){ V = prevV_dend - neighVdend[i]; f = 0.8 * exp(-1*pow(V, 2)/100) + 0.2; // SCHWEIGHOFER 2004 VERSION I_c = I_c + (CONDUCTANCE * f * V); } return I_c; } void CompSoma(cellCompParams *cellCompParamsPtr){ struct channelParams chPrms; struct somaCurrVoltPrms chComps; // update somatic components // SCHWEIGHOFER: //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Calcium_k; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->soma.Calcium_l; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Calcium_k; chPrms.newComp2 = &cellCompParamsPtr->newCellState->soma.Calcium_l; //Compute SomaCalcium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Sodium_m; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->soma.Sodium_h; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Sodium_m; chPrms.newComp2 = &cellCompParamsPtr->newCellState->soma.Sodium_h; //Compute SomaSodium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Potassium_n; chPrms.prevComp2 = &cellCompParamsPtr->prevCellState->soma.Potassium_p; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Potassium_n; chPrms.newComp2 = &cellCompParamsPtr->newCellState->soma.Potassium_p; //Compute SomaPotassium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->soma.V_soma; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->soma.Potassium_x_s; chPrms.newComp1 = &cellCompParamsPtr->newCellState->soma.Potassium_x_s; //Compute SomaPotassiumX(&chPrms); chComps.g_CaL = &cellCompParamsPtr->prevCellState->soma.g_CaL; chComps.vDend = &cellCompParamsPtr->prevCellState->dend.V_dend; chComps.vSoma = &cellCompParamsPtr->prevCellState->soma.V_soma; chComps.newVSoma = &cellCompParamsPtr->newCellState->soma.V_soma; chComps.vAxon = &cellCompParamsPtr->prevCellState->axon.V_axon; chComps.k = &cellCompParamsPtr->newCellState->soma.Calcium_k; chComps.l = &cellCompParamsPtr->newCellState->soma.Calcium_l; chComps.m = &cellCompParamsPtr->newCellState->soma.Sodium_m; chComps.h = &cellCompParamsPtr->newCellState->soma.Sodium_h; chComps.n = &cellCompParamsPtr->newCellState->soma.Potassium_n; chComps.x_s = &cellCompParamsPtr->newCellState->soma.Potassium_x_s; SomaCurrVolt(&chComps); return; } void SomaCalcium(struct channelParams *chPrms){ mod_prec k_inf, l_inf, tau_k, tau_l, dk_dt, dl_dt, k_local, l_local; //Get inputs mod_prec prevV_soma = *chPrms->v; mod_prec prevCalcium_k = *chPrms->prevComp1; mod_prec prevCalcium_l = *chPrms->prevComp2; k_inf = (1 / (1 + exp(-1 * (prevV_soma + 61) / 4.2))); l_inf = (1 / (1 + exp(( prevV_soma + 85.5) / 8.5))); tau_k = 1; tau_l = ((20 * exp((prevV_soma + 160) / 30) / (1 + exp((prevV_soma + 84) / 7.3))) +35); dk_dt = (k_inf - prevCalcium_k) / tau_k; dl_dt = (l_inf - prevCalcium_l) / tau_l; k_local = DELTA * dk_dt + prevCalcium_k; l_local = DELTA * dl_dt + prevCalcium_l; //Put result *chPrms->newComp1= k_local; *chPrms->newComp2= l_local; return; } void SomaSodium(struct channelParams *chPrms){ mod_prec m_inf, h_inf, tau_h, dh_dt, m_local, h_local; //Get inputs mod_prec prevV_soma = *chPrms->v; //mod_prec prevSodium_m = *chPrms->prevComp1; mod_prec prevSodium_h = *chPrms->prevComp2; // RAT THALAMOCORTICAL SODIUM: m_inf = 1 / (1 + (exp((-30 - prevV_soma)/ 5.5))); h_inf = 1 / (1 + (exp((-70 - prevV_soma)/-5.8))); tau_h = 3 * exp((-40 - prevV_soma)/33); dh_dt = (h_inf - prevSodium_h)/tau_h; m_local = m_inf; h_local = prevSodium_h + DELTA * dh_dt; //Put result *chPrms->newComp1 = m_local; *chPrms->newComp2 = h_local; return; } void SomaPotassium(struct channelParams *chPrms){ mod_prec n_inf, p_inf, tau_n, tau_p, dn_dt, dp_dt, n_local, p_local; //Get inputs mod_prec prevV_soma = *chPrms->v; mod_prec prevPotassium_n = *chPrms->prevComp1; mod_prec prevPotassium_p = *chPrms->prevComp2; // NEOCORTICAL n_inf = 1 / (1 + exp( ( -3 - prevV_soma) / 10)); p_inf = 1 / (1 + exp( (-51 - prevV_soma) / -12)); tau_n = 5 + ( 47 * exp( -(-50 - prevV_soma) / 900)); tau_p = tau_n; dn_dt = (n_inf - prevPotassium_n) / tau_n; dp_dt = (p_inf - prevPotassium_p) / tau_p; n_local = DELTA * dn_dt + prevPotassium_n; p_local = DELTA * dp_dt + prevPotassium_p; //Put result *chPrms->newComp1 = n_local; *chPrms->newComp2 = p_local; return; } void SomaPotassiumX(struct channelParams *chPrms){ mod_prec alpha_x_s, beta_x_s, x_inf_s, tau_x_s, dx_dt_s, x_s_local; //Get inputs mod_prec prevV_soma = *chPrms->v; mod_prec prevPotassium_x_s = *chPrms->prevComp1; // Voltage-dependent (fast) potassium alpha_x_s = 0.13 * (prevV_soma + 25) / (1 - exp(-(prevV_soma + 25) / 10)); beta_x_s = 1.69 * exp(-0.0125 * (prevV_soma + 35)); x_inf_s = alpha_x_s / (alpha_x_s + beta_x_s); tau_x_s = 1 / (alpha_x_s + beta_x_s); dx_dt_s = (x_inf_s - prevPotassium_x_s) / tau_x_s; x_s_local = 0.05 * dx_dt_s + prevPotassium_x_s; //Put result *chPrms->newComp1 = x_s_local; return; } void SomaCurrVolt(struct somaCurrVoltPrms *chComps){ //Local variables mod_prec I_ds, I_CaL, I_Na_s, I_ls, I_Kdr_s, I_K_s, I_as, dVs_dt; //Get inputs mod_prec g_CaL = *chComps->g_CaL; mod_prec prevV_dend = *chComps->vDend; mod_prec prevV_soma = *chComps->vSoma; mod_prec prevV_axon = *chComps->vAxon; mod_prec k = *chComps->k; mod_prec l = *chComps->l; mod_prec m = *chComps->m; mod_prec h = *chComps->h; mod_prec n = *chComps->n; mod_prec x_s = *chComps->x_s; // SOMATIC CURRENTS // Dendrite-soma interaction current I_ds I_ds = (G_INT / P1) * (prevV_soma - prevV_dend); // Inward low-threshold Ca current I_CaL I_CaL = g_CaL * k * k * k * l * (prevV_soma - V_CA); //k^3 // Inward Na current I_Na_s I_Na_s = G_NA_S * m * m * m * h * (prevV_soma - V_NA); // Leakage current I_ls I_ls = G_LS * (prevV_soma - V_L); // Outward delayed potassium current I_Kdr I_Kdr_s = G_KDR_S * n * n * n * n * (prevV_soma - V_K); // SCHWEIGHOFER // I_K_s I_K_s = G_K_S * pow(x_s, 4) * (prevV_soma - V_K); // Axon-soma interaction current I_as I_as = (G_INT / (1 - P2)) * (prevV_soma - prevV_axon); dVs_dt = (-(I_CaL + I_ds + I_as + I_Na_s + I_ls + I_Kdr_s + I_K_s)) / C_M; *chComps->newVSoma = DELTA * dVs_dt + prevV_soma; return; } void CompAxon(cellCompParams *cellCompParamsPtr){ struct channelParams chPrms; struct axonCurrVoltPrms chComps; // update somatic components // SCHWEIGHOFER: //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->axon.V_axon; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->axon.Sodium_h_a; chPrms.newComp1 = &cellCompParamsPtr->newCellState->axon.Sodium_h_a; chPrms.newComp2 = &cellCompParamsPtr->newCellState->axon.Sodium_m_a; //Compute AxonSodium(&chPrms); //Prepare pointers to inputs/outputs chPrms.v = &cellCompParamsPtr->prevCellState->axon.V_axon; chPrms.prevComp1 = &cellCompParamsPtr->prevCellState->axon.Potassium_x_a; chPrms.newComp1 = &cellCompParamsPtr->newCellState->axon.Potassium_x_a; //Compute AxonPotassium(&chPrms); //Get inputs chComps.vSoma = &cellCompParamsPtr->prevCellState->soma.V_soma; chComps.vAxon = &cellCompParamsPtr->prevCellState->axon.V_axon; chComps.newVAxon = &cellCompParamsPtr->newCellState->axon.V_axon; chComps.m_a = &cellCompParamsPtr->newCellState->axon.Sodium_m_a; chComps.h_a = &cellCompParamsPtr->newCellState->axon.Sodium_h_a; chComps.x_a = &cellCompParamsPtr->newCellState->axon.Potassium_x_a; AxonCurrVolt(&chComps); return; } void AxonSodium(struct channelParams *chPrms){ mod_prec m_inf_a, h_inf_a, tau_h_a, dh_dt_a, m_a_local, h_a_local; //Get inputs mod_prec prevV_axon = *chPrms->v; mod_prec prevSodium_h_a = *chPrms->prevComp1; // Update axonal Na components // NOTE: current has shortened inactivation to account for high // firing frequencies in axon hillock m_inf_a = 1 / (1 + (exp((-30 - prevV_axon)/ 5.5))); h_inf_a = 1 / (1 + (exp((-60 - prevV_axon)/-5.8))); tau_h_a = 1.5 * exp((-40 - prevV_axon)/33); dh_dt_a = (h_inf_a - prevSodium_h_a)/tau_h_a; m_a_local = m_inf_a; h_a_local = prevSodium_h_a + DELTA * dh_dt_a; //Put result *chPrms->newComp1 = h_a_local; *chPrms->newComp2 = m_a_local; return; } void AxonPotassium(struct channelParams *chPrms){ mod_prec alpha_x_a, beta_x_a, x_inf_a, tau_x_a, dx_dt_a, x_a_local; //Get inputs mod_prec prevV_axon = *chPrms->v; mod_prec prevPotassium_x_a = *chPrms->prevComp1; // D'ANGELO 2001 -- Voltage-dependent potassium alpha_x_a = 0.13 * (prevV_axon + 25) / (1 - exp(-(prevV_axon + 25) / 10)); beta_x_a = 1.69 * exp(-0.0125 * (prevV_axon + 35)); x_inf_a = alpha_x_a / (alpha_x_a + beta_x_a); tau_x_a = 1 / (alpha_x_a + beta_x_a); dx_dt_a = (x_inf_a - prevPotassium_x_a) / tau_x_a; x_a_local = 0.05 * dx_dt_a + prevPotassium_x_a; //Put result *chPrms->newComp1 = x_a_local; return; } void AxonCurrVolt(struct axonCurrVoltPrms *chComps){ //Local variable mod_prec I_Na_a, I_la, I_sa, I_K_a, dVa_dt; //Get inputs mod_prec prevV_soma = *chComps->vSoma; mod_prec prevV_axon = *chComps->vAxon; mod_prec m_a = *chComps->m_a; mod_prec h_a = *chComps->h_a; mod_prec x_a = *chComps->x_a; // AXONAL CURRENTS // Sodium I_Na_a = G_NA_A * m_a * m_a * m_a * h_a * (prevV_axon - V_NA); // Leak I_la = G_LA * (prevV_axon - V_L); // Soma-axon interaction current I_sa I_sa = (G_INT / P2) * (prevV_axon - prevV_soma); // Potassium (transient) I_K_a = G_K_A * pow(x_a, 4) * (prevV_axon - V_K); dVa_dt = (-(I_K_a + I_sa + I_la + I_Na_a)) / C_M; *chComps->newVAxon = DELTA * dVa_dt + prevV_axon; return; } void InitState(cellState **cellStatePtr){ int j, k; cellState initState; //Initial dendritic parameters initState.dend.V_dend = -60; initState.dend.Calcium_r = 0.0112788;// High-threshold calcium initState.dend.Potassium_s = 0.0049291;// Calcium-dependent potassium initState.dend.Hcurrent_q = 0.0337836;// H current initState.dend.Ca2Plus = 3.7152;// Calcium concentration initState.dend.I_CaH = 0.5;// High-threshold calcium current //Initial somatic parameters initState.soma.g_CaL = 0.68; //default arbitrary value but it should be randomized per cell initState.soma.V_soma = -60; initState.soma.Sodium_m = 1.0127807;// Sodium (artificial) initState.soma.Sodium_h = 0.3596066; initState.soma.Potassium_n = 0.2369847;// Potassium (delayed rectifier) initState.soma.Potassium_p = 0.2369847; initState.soma.Potassium_x_s = 0.1;// Potassium (voltage-dependent) initState.soma.Calcium_k = 0.7423159;// Low-threshold calcium initState.soma.Calcium_l = 0.0321349; // Initial axonal parameters initState.axon.V_axon = -60; //sisaza: Sodium_m_a doesn't have a state, therefore this assignment doesn'thave any effect initState.axon.Sodium_m_a = 0.003596066;// Sodium (thalamocortical) initState.axon.Sodium_h_a = 0.9; initState.axon.Potassium_x_a = 0.2369847;// Potassium (transient) //Copy init sate to all cell states for(j=0;j<IO_NETWORK_DIM1;j++){ for(k=0;k<IO_NETWORK_DIM2;k++){ memcpy(&cellStatePtr[j][k], &initState, sizeof(cellState)); } } return; } int ReadFileLine(char *iAppBuf, int iAppBufSize, FILE *pInFile, mod_prec *iAppArray){ //FIXME: make this function more robust char *strNumber; int i = 0; //Get one line if(fgets(iAppBuf, iAppBufSize, pInFile)){ //Convert the ASCII string of one element to a double precision floating point value strNumber = strtok(iAppBuf," "); i = 0; //printf("Line:\n"); while ((strNumber != NULL) && (i<IO_NETWORK_SIZE)){ iAppArray[i] = atof(strNumber);//atof() should change if using integers or fixed point //printf ("(%s) %0.2f ", strNumber, iAppArray[i]); strNumber = strtok(NULL, " "); i++; } //printf("i: %d\n", i); if(i<IO_NETWORK_SIZE){ //BUG: if only one element is missing but the line ends in a space, the error is not detected printf("Error: Input line doesn't have enough elements, only %d\n", i); exit(EXIT_FAILURE); } return 1;//success }else{ if(!feof(pInFile)){ printf("Error: Reading from input file didn't finish successfully\n"); exit(EXIT_FAILURE); } return 0;//end of file } } inline mod_prec min(mod_prec a, mod_prec b){ return (a < b) ? a : b; }
9a5079f81c2fbaaa545f188c991de2cc51bdf361.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> const int N = 7; const int blocksize = 7; __global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello "; int b[N] = {15, 10, 6, 0, -11, 1, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); hipMalloc( (void**)&ad, csize ); hipMalloc( (void**)&bd, isize ); hipMemcpy( ad, a, csize, hipMemcpyHostToDevice ); hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd); hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost ); hipFree( ad ); printf("%s\n", a); return EXIT_SUCCESS; }
9a5079f81c2fbaaa545f188c991de2cc51bdf361.cu
#include <stdio.h> const int N = 7; const int blocksize = 7; __global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello "; int b[N] = {15, 10, 6, 0, -11, 1, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); cudaMalloc( (void**)&ad, csize ); cudaMalloc( (void**)&bd, isize ); cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice ); cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost ); cudaFree( ad ); printf("%s\n", a); return EXIT_SUCCESS; }
79ad4fb23e7564c2fa21ce7a31694f179b8f1328.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cuda_toolkit/helper_math.h> #include <quadmap/device_image.cuh> #include <quadmap/texture_memory.cuh> #include <ctime> namespace quadmap { //declear function void generate_gradient(DeviceImage<float> &image, DeviceImage<float2> &gradient_map); __global__ void gradient_kernel(DeviceImage<float> *image_dev_ptr, DeviceImage<float2> *gradient_dev_ptr); //define function void generate_gradient(DeviceImage<float> &image, DeviceImage<float2> &gradient_map) { int width = gradient_map.width; // 752 int height = gradient_map.height; // 480 // std::cout << "frame width = " << width << ", height = " << height << std::endl; dim3 block; dim3 grid; block.x = 16; block.y = 16; grid.x = (width + block.x - 1) / block.x; grid.y = (height + block.y - 1) / block.y; hipLaunchKernelGGL(( gradient_kernel), dim3(grid), dim3(block), 0, 0, image.dev_ptr, gradient_map.dev_ptr); } /** * * @param image_dev_ptr * @param gradient_dev_ptr * note: float2: x,y * solution : openclfloat4,R,G,CL_RG */ __global__ void gradient_kernel(DeviceImage<float> *image_dev_ptr, DeviceImage<float2> *gradient_dev_ptr) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int width = gradient_dev_ptr->width; const int height = gradient_dev_ptr->height; if (x >= width - 1 || y >= height - 1 || x <= 0 || y <= 0) return; // width() float right_color = image_dev_ptr->atXY(x+1,y); float left_color = image_dev_ptr->atXY(x-1,y); // height() float down_color = image_dev_ptr->atXY(x,y+1); float up_color = image_dev_ptr->atXY(x,y-1); gradient_dev_ptr->atXY(x, y) = make_float2((right_color - left_color)/2.0, (down_color - up_color)/2.0); } }
79ad4fb23e7564c2fa21ce7a31694f179b8f1328.cu
#include <cuda_toolkit/helper_math.h> #include <quadmap/device_image.cuh> #include <quadmap/texture_memory.cuh> #include <ctime> namespace quadmap { //declear function void generate_gradient(DeviceImage<float> &image, DeviceImage<float2> &gradient_map); __global__ void gradient_kernel(DeviceImage<float> *image_dev_ptr, DeviceImage<float2> *gradient_dev_ptr); //define function void generate_gradient(DeviceImage<float> &image, DeviceImage<float2> &gradient_map) { int width = gradient_map.width; // 看来是个常数 752 int height = gradient_map.height; // 看来是个常数 480 // std::cout << "frame width = " << width << ", height = " << height << std::endl; dim3 block; dim3 grid; block.x = 16; block.y = 16; grid.x = (width + block.x - 1) / block.x; grid.y = (height + block.y - 1) / block.y; gradient_kernel<<<grid, block>>>(image.dev_ptr, gradient_map.dev_ptr); } /** * * @param image_dev_ptr * @param gradient_dev_ptr * note: float2数据类型: 包含两个数据x,y * solution : 在opencl中利用float4格式,读取R,G两个通道的值,即CL_RG */ __global__ void gradient_kernel(DeviceImage<float> *image_dev_ptr, DeviceImage<float2> *gradient_dev_ptr) { const int x = threadIdx.x + blockIdx.x * blockDim.x; const int y = threadIdx.y + blockIdx.y * blockDim.y; const int width = gradient_dev_ptr->width; const int height = gradient_dev_ptr->height; if (x >= width - 1 || y >= height - 1 || x <= 0 || y <= 0) return; // width(横)方向的梯度 float right_color = image_dev_ptr->atXY(x+1,y); float left_color = image_dev_ptr->atXY(x-1,y); // height(纵)方向的梯度 float down_color = image_dev_ptr->atXY(x,y+1); float up_color = image_dev_ptr->atXY(x,y-1); gradient_dev_ptr->atXY(x, y) = make_float2((right_color - left_color)/2.0, (down_color - up_color)/2.0); } }
0769f1f3e9a6f359f1979913f8aef2b4df12cadb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "binary_op.hpp" namespace Shadow { namespace Vision { template <typename T> __device__ T Binary(T a, T b, int operation) { switch (operation) { case BinaryOp::kAdd: return a + b; case BinaryOp::kSub: return a - b; case BinaryOp::kMul: return a * b; case BinaryOp::kDiv: return a / b; case BinaryOp::kPow: return powf(a, b); case BinaryOp::kMax: return fmaxf(a, b); case BinaryOp::kMin: return fminf(a, b); default: return 0; } } template <typename T> __global__ void KernelBroadcastBinary(const T *in_data, const int *in_shape, const T *scalar_data, const int *scalar_shape, int operation, int num_axes, int count, const int *out_shape, T *out_data) { CUDA_KERNEL_LOOP(globalid, count) { int in_shape_acc[8], scalar_shape_acc[8]; in_shape_acc[num_axes - 1] = 1, scalar_shape_acc[num_axes - 1] = 1; for (int n = num_axes - 1; n > 0; --n) { in_shape_acc[n - 1] = in_shape[n] * in_shape_acc[n]; scalar_shape_acc[n - 1] = scalar_shape[n] * scalar_shape_acc[n]; } int in_index = 0, scalar_index = 0, cc = globalid; for (int n = num_axes - 1; n >= 0; --n) { int dim = cc % out_shape[n]; in_index += (dim % in_shape[n]) * in_shape_acc[n]; scalar_index += (dim % scalar_shape[n]) * scalar_shape_acc[n]; cc /= out_shape[n]; } out_data[globalid] = Binary(in_data[in_index], scalar_data[scalar_index], operation); } } template <typename T> void BroadcastBinary(const T *in_data, const int *in_shape, const T *scalar_data, const int *scalar_shape, int operation, int num_axes, int count, const int *out_shape, T *out_data, Context *context) { hipLaunchKernelGGL(( KernelBroadcastBinary<T>), dim3(GetBlocks(count)), dim3(NumThreads), 0, hipStream_t(context->cuda_stream()), in_data, in_shape, scalar_data, scalar_shape, operation, num_axes, count, out_shape, out_data); CUDA_CHECK(hipPeekAtLastError()); } template void BroadcastBinary(const float *, const int *, const float *, const int *, int, int, int, const int *, float *, Context *); } // namespace Vision } // namespace Shadow
0769f1f3e9a6f359f1979913f8aef2b4df12cadb.cu
#include "binary_op.hpp" namespace Shadow { namespace Vision { template <typename T> __device__ T Binary(T a, T b, int operation) { switch (operation) { case BinaryOp::kAdd: return a + b; case BinaryOp::kSub: return a - b; case BinaryOp::kMul: return a * b; case BinaryOp::kDiv: return a / b; case BinaryOp::kPow: return powf(a, b); case BinaryOp::kMax: return fmaxf(a, b); case BinaryOp::kMin: return fminf(a, b); default: return 0; } } template <typename T> __global__ void KernelBroadcastBinary(const T *in_data, const int *in_shape, const T *scalar_data, const int *scalar_shape, int operation, int num_axes, int count, const int *out_shape, T *out_data) { CUDA_KERNEL_LOOP(globalid, count) { int in_shape_acc[8], scalar_shape_acc[8]; in_shape_acc[num_axes - 1] = 1, scalar_shape_acc[num_axes - 1] = 1; for (int n = num_axes - 1; n > 0; --n) { in_shape_acc[n - 1] = in_shape[n] * in_shape_acc[n]; scalar_shape_acc[n - 1] = scalar_shape[n] * scalar_shape_acc[n]; } int in_index = 0, scalar_index = 0, cc = globalid; for (int n = num_axes - 1; n >= 0; --n) { int dim = cc % out_shape[n]; in_index += (dim % in_shape[n]) * in_shape_acc[n]; scalar_index += (dim % scalar_shape[n]) * scalar_shape_acc[n]; cc /= out_shape[n]; } out_data[globalid] = Binary(in_data[in_index], scalar_data[scalar_index], operation); } } template <typename T> void BroadcastBinary(const T *in_data, const int *in_shape, const T *scalar_data, const int *scalar_shape, int operation, int num_axes, int count, const int *out_shape, T *out_data, Context *context) { KernelBroadcastBinary<T><<<GetBlocks(count), NumThreads, 0, cudaStream_t(context->cuda_stream())>>>( in_data, in_shape, scalar_data, scalar_shape, operation, num_axes, count, out_shape, out_data); CUDA_CHECK(cudaPeekAtLastError()); } template void BroadcastBinary(const float *, const int *, const float *, const int *, int, int, int, const int *, float *, Context *); } // namespace Vision } // namespace Shadow
a925dfa4cc91a21fb2a38c85a27a493a84dbef1f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../third_party/cuda/helper_cuda.h" #include "mem_op.h" #include "gpu_macros.h" #include "runtime.h" #include "GBuffers.h" GBuffers* alloc_buffers(int neuron_num, int synapse_num, int max_delay, real dt) { GBuffers *ret = (GBuffers*)malloc(sizeof(GBuffers)); memset(ret, 0, sizeof(GBuffers)); checkCudaErrors(hipMalloc((void**)&(ret->c_gNeuronInput), sizeof(real)*(neuron_num))); checkCudaErrors(hipMemset(ret->c_gNeuronInput, 0, sizeof(real)*(neuron_num))); checkCudaErrors(hipMalloc((void**)&(ret->c_gNeuronInput_I), sizeof(real)*(neuron_num))); checkCudaErrors(hipMemset(ret->c_gNeuronInput_I, 0, sizeof(real)*(neuron_num))); checkCudaErrors(hipMalloc((void**)&(ret->c_gFiredTable), sizeof(int)*((neuron_num)*(max_delay+1)))); checkCudaErrors(hipMemset(ret->c_gFiredTable, 0, sizeof(int)*((neuron_num)*(max_delay+1)))); checkCudaErrors(hipMalloc((void**)&(ret->c_gFiredTableSizes), sizeof(int)*(max_delay+1))); checkCudaErrors(hipMemset(ret->c_gFiredTableSizes, 0, sizeof(int)*(max_delay+1))); checkCudaErrors(hipMalloc((void**)&(ret->c_gActiveTable), sizeof(int)*(neuron_num))); checkCudaErrors(hipMemset(ret->c_gActiveTable, 0, sizeof(int)*(neuron_num))); //checkCudaErrors(hipMalloc((void**)&(ret->c_gSynapsesActiveTable), sizeof(int)*(synapse_num))); //checkCudaErrors(hipMemset(ret->c_gSynapsesActiveTable, 0, sizeof(int)*(synapse_num))); //checkCudaErrors(hipMalloc((void**)&(ret->c_gSynapsesLogTable), sizeof(int)*(synapse_num))); //checkCudaErrors(hipMemset(ret->c_gSynapsesLogTable, 0, sizeof(int)*(synapse_num))); ret->c_gLayerInput = gpuMalloc<int>(neuron_num); ret->c_gXInput = gpuMalloc<real>(neuron_num); ret->c_gFireCount = gpuMalloc<int>(neuron_num); int timeTableCap = max_delay+1; checkCudaErrors(hipMemcpyToSymbol(MAX_DELAY, &max_delay, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(gTimeTableCap, &timeTableCap, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(gFiredTableCap, &neuron_num, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(gSynapsesTableCap, &synapse_num, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(DT, &dt, sizeof(real))); //checkCudaErrors(hipMalloc((void**)&ret->c_gTimeTable, sizeof(int)*(max_delay+1))); //checkCudaErrors(hipMemset(ret->c_gTimeTable, 0, sizeof(int)*(max_delay+1))); checkCudaErrors(hipHostMalloc((void**)(&ret->c_neuronsFired), sizeof(int)*(neuron_num))); checkCudaErrors(hipHostMalloc((void**)(&ret->c_synapsesFired), sizeof(int)*(synapse_num))); hipLaunchKernelGGL(( init_buffers), dim3(1), dim3(1), 0, 0, /*ret->c_gTimeTable,*/ ret->c_gNeuronInput, ret->c_gNeuronInput_I, ret->c_gFiredTable, ret->c_gFiredTableSizes, ret->c_gActiveTable, ret->c_gSynapsesActiveTable, ret->c_gSynapsesLogTable); hipLaunchKernelGGL(( init_log_buffers), dim3(1), dim3(1), 0, 0, ret->c_gLayerInput, ret->c_gXInput, ret->c_gFireCount); return ret; } void init_buffers(GBuffers * buf) { hipLaunchKernelGGL(( init_buffers), dim3(1), dim3(1), 0, 0, /*buf->c_gTimeTable,*/ buf->c_gNeuronInput, buf->c_gNeuronInput_I, buf->c_gFiredTable, buf->c_gFiredTableSizes, buf->c_gActiveTable, buf->c_gSynapsesActiveTable, buf->c_gSynapsesLogTable); hipLaunchKernelGGL(( init_log_buffers), dim3(1), dim3(1), 0, 0, buf->c_gLayerInput, buf->c_gXInput, buf->c_gFireCount); } int free_buffers(GBuffers *buf) { checkCudaErrors(hipFree(buf->c_gNeuronInput)); checkCudaErrors(hipFree(buf->c_gNeuronInput_I)); checkCudaErrors(hipFree(buf->c_gFiredTable)); checkCudaErrors(hipFree(buf->c_gFiredTableSizes)); checkCudaErrors(hipFree(buf->c_gActiveTable)); checkCudaErrors(hipFree(buf->c_gSynapsesActiveTable)); checkCudaErrors(hipFree(buf->c_gSynapsesLogTable)); checkCudaErrors(hipHostFree(buf->c_neuronsFired)); checkCudaErrors(hipHostFree(buf->c_synapsesFired)); return 0; }
a925dfa4cc91a21fb2a38c85a27a493a84dbef1f.cu
#include "../third_party/cuda/helper_cuda.h" #include "mem_op.h" #include "gpu_macros.h" #include "runtime.h" #include "GBuffers.h" GBuffers* alloc_buffers(int neuron_num, int synapse_num, int max_delay, real dt) { GBuffers *ret = (GBuffers*)malloc(sizeof(GBuffers)); memset(ret, 0, sizeof(GBuffers)); checkCudaErrors(cudaMalloc((void**)&(ret->c_gNeuronInput), sizeof(real)*(neuron_num))); checkCudaErrors(cudaMemset(ret->c_gNeuronInput, 0, sizeof(real)*(neuron_num))); checkCudaErrors(cudaMalloc((void**)&(ret->c_gNeuronInput_I), sizeof(real)*(neuron_num))); checkCudaErrors(cudaMemset(ret->c_gNeuronInput_I, 0, sizeof(real)*(neuron_num))); checkCudaErrors(cudaMalloc((void**)&(ret->c_gFiredTable), sizeof(int)*((neuron_num)*(max_delay+1)))); checkCudaErrors(cudaMemset(ret->c_gFiredTable, 0, sizeof(int)*((neuron_num)*(max_delay+1)))); checkCudaErrors(cudaMalloc((void**)&(ret->c_gFiredTableSizes), sizeof(int)*(max_delay+1))); checkCudaErrors(cudaMemset(ret->c_gFiredTableSizes, 0, sizeof(int)*(max_delay+1))); checkCudaErrors(cudaMalloc((void**)&(ret->c_gActiveTable), sizeof(int)*(neuron_num))); checkCudaErrors(cudaMemset(ret->c_gActiveTable, 0, sizeof(int)*(neuron_num))); //checkCudaErrors(cudaMalloc((void**)&(ret->c_gSynapsesActiveTable), sizeof(int)*(synapse_num))); //checkCudaErrors(cudaMemset(ret->c_gSynapsesActiveTable, 0, sizeof(int)*(synapse_num))); //checkCudaErrors(cudaMalloc((void**)&(ret->c_gSynapsesLogTable), sizeof(int)*(synapse_num))); //checkCudaErrors(cudaMemset(ret->c_gSynapsesLogTable, 0, sizeof(int)*(synapse_num))); ret->c_gLayerInput = gpuMalloc<int>(neuron_num); ret->c_gXInput = gpuMalloc<real>(neuron_num); ret->c_gFireCount = gpuMalloc<int>(neuron_num); int timeTableCap = max_delay+1; checkCudaErrors(cudaMemcpyToSymbol(MAX_DELAY, &max_delay, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(gTimeTableCap, &timeTableCap, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(gFiredTableCap, &neuron_num, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(gSynapsesTableCap, &synapse_num, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(DT, &dt, sizeof(real))); //checkCudaErrors(cudaMalloc((void**)&ret->c_gTimeTable, sizeof(int)*(max_delay+1))); //checkCudaErrors(cudaMemset(ret->c_gTimeTable, 0, sizeof(int)*(max_delay+1))); checkCudaErrors(cudaMallocHost((void**)(&ret->c_neuronsFired), sizeof(int)*(neuron_num))); checkCudaErrors(cudaMallocHost((void**)(&ret->c_synapsesFired), sizeof(int)*(synapse_num))); init_buffers<<<1, 1, 0>>>(/*ret->c_gTimeTable,*/ ret->c_gNeuronInput, ret->c_gNeuronInput_I, ret->c_gFiredTable, ret->c_gFiredTableSizes, ret->c_gActiveTable, ret->c_gSynapsesActiveTable, ret->c_gSynapsesLogTable); init_log_buffers<<<1, 1, 0>>>(ret->c_gLayerInput, ret->c_gXInput, ret->c_gFireCount); return ret; } void init_buffers(GBuffers * buf) { init_buffers<<<1, 1, 0>>>(/*buf->c_gTimeTable,*/ buf->c_gNeuronInput, buf->c_gNeuronInput_I, buf->c_gFiredTable, buf->c_gFiredTableSizes, buf->c_gActiveTable, buf->c_gSynapsesActiveTable, buf->c_gSynapsesLogTable); init_log_buffers<<<1, 1, 0>>>(buf->c_gLayerInput, buf->c_gXInput, buf->c_gFireCount); } int free_buffers(GBuffers *buf) { checkCudaErrors(cudaFree(buf->c_gNeuronInput)); checkCudaErrors(cudaFree(buf->c_gNeuronInput_I)); checkCudaErrors(cudaFree(buf->c_gFiredTable)); checkCudaErrors(cudaFree(buf->c_gFiredTableSizes)); checkCudaErrors(cudaFree(buf->c_gActiveTable)); checkCudaErrors(cudaFree(buf->c_gSynapsesActiveTable)); checkCudaErrors(cudaFree(buf->c_gSynapsesLogTable)); checkCudaErrors(cudaFreeHost(buf->c_neuronsFired)); checkCudaErrors(cudaFreeHost(buf->c_synapsesFired)); return 0; }
6112317d41f81025b5a9bbf2ccdf54cef5a2fb3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** ************************************************************************** * \file dct8x8_kernel1.cu * \brief Contains 1st CUDA implementations of DCT, IDCT and quantization routines, * used in JPEG internal data processing. Device code. * * This code implements first CUDA versions of forward and inverse Discrete Cosine * Transform to blocks of image pixels (of 8x8 size), as in JPEG standard. The data * processing is done using floating point representation. * The routine that performs quantization of coefficients can be found in * dct8x8_kernel_quantization.cu file. */ #pragma once #include "Common.h" /** * This unitary matrix performs discrete cosine transform of rows of the matrix to the left */ __constant__ float DCTv8matrix[] = { 0.3535533905932738f, 0.4903926402016152f, 0.4619397662556434f, 0.4157348061512726f, 0.3535533905932738f, 0.2777851165098011f, 0.1913417161825449f, 0.0975451610080642f, 0.3535533905932738f, 0.4157348061512726f, 0.1913417161825449f, -0.0975451610080641f, -0.3535533905932737f, -0.4903926402016152f, -0.4619397662556434f, -0.2777851165098011f, 0.3535533905932738f, 0.2777851165098011f, -0.1913417161825449f, -0.4903926402016152f, -0.3535533905932738f, 0.0975451610080642f, 0.4619397662556433f, 0.4157348061512727f, 0.3535533905932738f, 0.0975451610080642f, -0.4619397662556434f, -0.2777851165098011f, 0.3535533905932737f, 0.4157348061512727f, -0.1913417161825450f, -0.4903926402016153f, 0.3535533905932738f, -0.0975451610080641f, -0.4619397662556434f, 0.2777851165098009f, 0.3535533905932738f, -0.4157348061512726f, -0.1913417161825453f, 0.4903926402016152f, 0.3535533905932738f, -0.2777851165098010f, -0.1913417161825452f, 0.4903926402016153f, -0.3535533905932733f, -0.0975451610080649f, 0.4619397662556437f, -0.4157348061512720f, 0.3535533905932738f, -0.4157348061512727f, 0.1913417161825450f, 0.0975451610080640f, -0.3535533905932736f, 0.4903926402016152f, -0.4619397662556435f, 0.2777851165098022f, 0.3535533905932738f, -0.4903926402016152f, 0.4619397662556433f, -0.4157348061512721f, 0.3535533905932733f, -0.2777851165098008f, 0.1913417161825431f, -0.0975451610080625f }; // Temporary blocks __shared__ float CurBlockLocal1[BLOCK_SIZE2]; __shared__ float CurBlockLocal2[BLOCK_SIZE2]; /** ************************************************************************** * Performs 1st implementation of 8x8 block-wise Forward Discrete Cosine Transform of the given * image plane and outputs result to the array of coefficients. * * \param Dst [OUT] - Coefficients plane * \param ImgWidth [IN] - Stride of Dst * \param OffsetXBlocks [IN] - Offset along X in blocks from which to perform processing * \param OffsetYBlocks [IN] - Offset along Y in blocks from which to perform processing * * \return None */ __global__ void CUDAkernel1DCT(float *Dst, int ImgWidth, int OffsetXBlocks, int OffsetYBlocks) { // Block index const int bx = blockIdx.x + OffsetXBlocks; const int by = blockIdx.y + OffsetYBlocks; // Thread index (current coefficient) const int tx = threadIdx.x; const int ty = threadIdx.y; // Texture coordinates const float tex_x = (float)( (bx << BLOCK_SIZE_LOG2) + tx ) + 0.5f; const float tex_y = (float)( (by << BLOCK_SIZE_LOG2) + ty ) + 0.5f; //copy current image pixel to the first block CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = tex2D(TexSrc, tex_x, tex_y); //synchronize threads to make sure the block is copied __syncthreads(); //calculate the multiplication of DCTv8matrixT * A and place it in the second block float curelem = 0; int DCTv8matrixIndex = 0 * BLOCK_SIZE + ty; int CurBlockLocal1Index = 0 * BLOCK_SIZE + tx; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += DCTv8matrix[DCTv8matrixIndex] * CurBlockLocal1[CurBlockLocal1Index]; DCTv8matrixIndex += BLOCK_SIZE; CurBlockLocal1Index += BLOCK_SIZE; } CurBlockLocal2[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the first 2 matrices are multiplied and the result is stored in the second block __syncthreads(); //calculate the multiplication of (DCTv8matrixT * A) * DCTv8matrix and place it in the first block curelem = 0; int CurBlockLocal2Index = (ty << BLOCK_SIZE_LOG2) + 0; DCTv8matrixIndex = 0 * BLOCK_SIZE + tx; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += CurBlockLocal2[CurBlockLocal2Index] * DCTv8matrix[DCTv8matrixIndex]; CurBlockLocal2Index += 1; DCTv8matrixIndex += BLOCK_SIZE; } CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the matrices are multiplied and the result is stored back in the first block __syncthreads(); //copy current coefficient to its place in the result array Dst[ FMUL(((by << BLOCK_SIZE_LOG2) + ty), ImgWidth) + ((bx << BLOCK_SIZE_LOG2) + tx) ] = CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ]; } /** ************************************************************************** * Performs 1st implementation of 8x8 block-wise Inverse Discrete Cosine Transform of the given * DCT coefficients plane and outputs result to the image array * * \param Dst [OUT] - Image plane * \param ImgWidth [IN] - Stride of Dst * \param OffsetXBlocks [IN] - Offset along X in blocks from which to perform processing * \param OffsetYBlocks [IN] - Offset along Y in blocks from which to perform processing * * \return None */ __global__ void CUDAkernel1IDCT(float *Dst, int ImgWidth, int OffsetXBlocks, int OffsetYBlocks) { // Block index int bx = blockIdx.x + OffsetXBlocks; int by = blockIdx.y + OffsetYBlocks; // Thread index (current image pixel) int tx = threadIdx.x; int ty = threadIdx.y; // Texture coordinates const float tex_x = (float)( (bx << BLOCK_SIZE_LOG2) + tx ) + 0.5f; const float tex_y = (float)( (by << BLOCK_SIZE_LOG2) + ty ) + 0.5f; //copy current image pixel to the first block CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = tex2D(TexSrc, tex_x, tex_y); //synchronize threads to make sure the block is copied __syncthreads(); //calculate the multiplication of DCTv8matrix * A and place it in the second block float curelem = 0; int DCTv8matrixIndex = (ty << BLOCK_SIZE_LOG2) + 0; int CurBlockLocal1Index = 0 * BLOCK_SIZE + tx; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += DCTv8matrix[DCTv8matrixIndex] * CurBlockLocal1[CurBlockLocal1Index]; DCTv8matrixIndex += 1; CurBlockLocal1Index += BLOCK_SIZE; } CurBlockLocal2[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the first 2 matrices are multiplied and the result is stored in the second block __syncthreads(); //calculate the multiplication of (DCTv8matrix * A) * DCTv8matrixT and place it in the first block curelem = 0; int CurBlockLocal2Index = (ty << BLOCK_SIZE_LOG2) + 0; DCTv8matrixIndex = (tx << BLOCK_SIZE_LOG2) + 0; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += CurBlockLocal2[CurBlockLocal2Index] * DCTv8matrix[DCTv8matrixIndex]; CurBlockLocal2Index += 1; DCTv8matrixIndex += 1; } CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the matrices are multiplied and the result is stored back in the first block __syncthreads(); //copy current coefficient to its place in the result array Dst[ FMUL(((by << BLOCK_SIZE_LOG2) + ty), ImgWidth) + ((bx << BLOCK_SIZE_LOG2) + tx) ] = CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ]; }
6112317d41f81025b5a9bbf2ccdf54cef5a2fb3c.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** ************************************************************************** * \file dct8x8_kernel1.cu * \brief Contains 1st CUDA implementations of DCT, IDCT and quantization routines, * used in JPEG internal data processing. Device code. * * This code implements first CUDA versions of forward and inverse Discrete Cosine * Transform to blocks of image pixels (of 8x8 size), as in JPEG standard. The data * processing is done using floating point representation. * The routine that performs quantization of coefficients can be found in * dct8x8_kernel_quantization.cu file. */ #pragma once #include "Common.h" /** * This unitary matrix performs discrete cosine transform of rows of the matrix to the left */ __constant__ float DCTv8matrix[] = { 0.3535533905932738f, 0.4903926402016152f, 0.4619397662556434f, 0.4157348061512726f, 0.3535533905932738f, 0.2777851165098011f, 0.1913417161825449f, 0.0975451610080642f, 0.3535533905932738f, 0.4157348061512726f, 0.1913417161825449f, -0.0975451610080641f, -0.3535533905932737f, -0.4903926402016152f, -0.4619397662556434f, -0.2777851165098011f, 0.3535533905932738f, 0.2777851165098011f, -0.1913417161825449f, -0.4903926402016152f, -0.3535533905932738f, 0.0975451610080642f, 0.4619397662556433f, 0.4157348061512727f, 0.3535533905932738f, 0.0975451610080642f, -0.4619397662556434f, -0.2777851165098011f, 0.3535533905932737f, 0.4157348061512727f, -0.1913417161825450f, -0.4903926402016153f, 0.3535533905932738f, -0.0975451610080641f, -0.4619397662556434f, 0.2777851165098009f, 0.3535533905932738f, -0.4157348061512726f, -0.1913417161825453f, 0.4903926402016152f, 0.3535533905932738f, -0.2777851165098010f, -0.1913417161825452f, 0.4903926402016153f, -0.3535533905932733f, -0.0975451610080649f, 0.4619397662556437f, -0.4157348061512720f, 0.3535533905932738f, -0.4157348061512727f, 0.1913417161825450f, 0.0975451610080640f, -0.3535533905932736f, 0.4903926402016152f, -0.4619397662556435f, 0.2777851165098022f, 0.3535533905932738f, -0.4903926402016152f, 0.4619397662556433f, -0.4157348061512721f, 0.3535533905932733f, -0.2777851165098008f, 0.1913417161825431f, -0.0975451610080625f }; // Temporary blocks __shared__ float CurBlockLocal1[BLOCK_SIZE2]; __shared__ float CurBlockLocal2[BLOCK_SIZE2]; /** ************************************************************************** * Performs 1st implementation of 8x8 block-wise Forward Discrete Cosine Transform of the given * image plane and outputs result to the array of coefficients. * * \param Dst [OUT] - Coefficients plane * \param ImgWidth [IN] - Stride of Dst * \param OffsetXBlocks [IN] - Offset along X in blocks from which to perform processing * \param OffsetYBlocks [IN] - Offset along Y in blocks from which to perform processing * * \return None */ __global__ void CUDAkernel1DCT(float *Dst, int ImgWidth, int OffsetXBlocks, int OffsetYBlocks) { // Block index const int bx = blockIdx.x + OffsetXBlocks; const int by = blockIdx.y + OffsetYBlocks; // Thread index (current coefficient) const int tx = threadIdx.x; const int ty = threadIdx.y; // Texture coordinates const float tex_x = (float)( (bx << BLOCK_SIZE_LOG2) + tx ) + 0.5f; const float tex_y = (float)( (by << BLOCK_SIZE_LOG2) + ty ) + 0.5f; //copy current image pixel to the first block CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = tex2D(TexSrc, tex_x, tex_y); //synchronize threads to make sure the block is copied __syncthreads(); //calculate the multiplication of DCTv8matrixT * A and place it in the second block float curelem = 0; int DCTv8matrixIndex = 0 * BLOCK_SIZE + ty; int CurBlockLocal1Index = 0 * BLOCK_SIZE + tx; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += DCTv8matrix[DCTv8matrixIndex] * CurBlockLocal1[CurBlockLocal1Index]; DCTv8matrixIndex += BLOCK_SIZE; CurBlockLocal1Index += BLOCK_SIZE; } CurBlockLocal2[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the first 2 matrices are multiplied and the result is stored in the second block __syncthreads(); //calculate the multiplication of (DCTv8matrixT * A) * DCTv8matrix and place it in the first block curelem = 0; int CurBlockLocal2Index = (ty << BLOCK_SIZE_LOG2) + 0; DCTv8matrixIndex = 0 * BLOCK_SIZE + tx; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += CurBlockLocal2[CurBlockLocal2Index] * DCTv8matrix[DCTv8matrixIndex]; CurBlockLocal2Index += 1; DCTv8matrixIndex += BLOCK_SIZE; } CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the matrices are multiplied and the result is stored back in the first block __syncthreads(); //copy current coefficient to its place in the result array Dst[ FMUL(((by << BLOCK_SIZE_LOG2) + ty), ImgWidth) + ((bx << BLOCK_SIZE_LOG2) + tx) ] = CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ]; } /** ************************************************************************** * Performs 1st implementation of 8x8 block-wise Inverse Discrete Cosine Transform of the given * DCT coefficients plane and outputs result to the image array * * \param Dst [OUT] - Image plane * \param ImgWidth [IN] - Stride of Dst * \param OffsetXBlocks [IN] - Offset along X in blocks from which to perform processing * \param OffsetYBlocks [IN] - Offset along Y in blocks from which to perform processing * * \return None */ __global__ void CUDAkernel1IDCT(float *Dst, int ImgWidth, int OffsetXBlocks, int OffsetYBlocks) { // Block index int bx = blockIdx.x + OffsetXBlocks; int by = blockIdx.y + OffsetYBlocks; // Thread index (current image pixel) int tx = threadIdx.x; int ty = threadIdx.y; // Texture coordinates const float tex_x = (float)( (bx << BLOCK_SIZE_LOG2) + tx ) + 0.5f; const float tex_y = (float)( (by << BLOCK_SIZE_LOG2) + ty ) + 0.5f; //copy current image pixel to the first block CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = tex2D(TexSrc, tex_x, tex_y); //synchronize threads to make sure the block is copied __syncthreads(); //calculate the multiplication of DCTv8matrix * A and place it in the second block float curelem = 0; int DCTv8matrixIndex = (ty << BLOCK_SIZE_LOG2) + 0; int CurBlockLocal1Index = 0 * BLOCK_SIZE + tx; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += DCTv8matrix[DCTv8matrixIndex] * CurBlockLocal1[CurBlockLocal1Index]; DCTv8matrixIndex += 1; CurBlockLocal1Index += BLOCK_SIZE; } CurBlockLocal2[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the first 2 matrices are multiplied and the result is stored in the second block __syncthreads(); //calculate the multiplication of (DCTv8matrix * A) * DCTv8matrixT and place it in the first block curelem = 0; int CurBlockLocal2Index = (ty << BLOCK_SIZE_LOG2) + 0; DCTv8matrixIndex = (tx << BLOCK_SIZE_LOG2) + 0; #pragma unroll for (int i=0; i<BLOCK_SIZE; i++) { curelem += CurBlockLocal2[CurBlockLocal2Index] * DCTv8matrix[DCTv8matrixIndex]; CurBlockLocal2Index += 1; DCTv8matrixIndex += 1; } CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ] = curelem; //synchronize threads to make sure the matrices are multiplied and the result is stored back in the first block __syncthreads(); //copy current coefficient to its place in the result array Dst[ FMUL(((by << BLOCK_SIZE_LOG2) + ty), ImgWidth) + ((bx << BLOCK_SIZE_LOG2) + tx) ] = CurBlockLocal1[ (ty << BLOCK_SIZE_LOG2) + tx ]; }
17633c86c8d08c02d514ad2335e1ad3eda42a562.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); /* Helper function to round up to a power of 2. */ static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } __global__ void upsweep_kernel(int length, int twod, int twod1, int* device_result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if ((index < length) && (index%twod1 == 0)) { device_result[index + twod1 - 1] += device_result[index + twod - 1]; } if(index == length-1) { device_result[index] = 0; } } __global__ void downsweep_kernel(int length, int twod, int twod1, int* device_result) { int index = blockIdx.x * blockDim.x + threadIdx.x; if ((index < length) && (index%twod1 == 0)) { int t = device_result[index + twod - 1]; device_result[index + twod - 1] = device_result[index + twod1 - 1]; device_result[index + twod1 -1] += t; } } __global__ void print_kernel(int length, int* device_result) { for(int i = 0; i < length; i++) { printf("out[%d] = %d\n",i,device_result[i]); } } void exclusive_scan(int* device_start, int length, int* device_result) { //I am doing inplace update on device_result /* Fill in this function with your exclusive scan implementation. * You are passed the locations of the input and output in device memory, * but this is host code -- you will need to declare one or more CUDA * kernels (with the __global__ decorator) in order to actually run code * in parallel on the GPU. * Note you are given the real length of the array, but may assume that * both the input and the output arrays are sized to accommodate the next * power of 2 larger than the input. */ const int threadsPerBlock = 512; const int numBlocks = (length-1)/threadsPerBlock + 1; for (int twod = 1; twod < length ; twod *=2 ) { int twod1 = twod*2; hipLaunchKernelGGL(( upsweep_kernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, length, twod, twod1, device_result); hipError_t err1 = hipDeviceSynchronize(); if(err1 != hipSuccess) { fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err1),__FILE__,__LINE__); exit(EXIT_FAILURE); } } for (int twod = length/2; twod >= 1; twod /= 2) { int twod1 = twod*2; hipLaunchKernelGGL(( downsweep_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, length, twod, twod1, device_result); hipError_t err2 = hipDeviceSynchronize(); if(err2 != hipSuccess) { fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err2),__FILE__,__LINE__); exit(EXIT_FAILURE); } } } /* This function is a wrapper around the code you will write - it copies the * input to the GPU and times the invocation of the exclusive_scan() function * above. You should not modify it. */ double cudaScan(int* inarray, int* end, int* resultarray) { int* device_result; int* device_input; // We round the array sizes up to a power of 2, but elements after // the end of the original input are left uninitialized and not checked // for correctness. // You may have an easier time in your implementation if you assume the // array's length is a power of 2, but this will result in extra work on // non-power-of-2 inputs. int rounded_length = nextPow2(end - inarray); hipMalloc((void **)&device_result, sizeof(int) * rounded_length); hipMalloc((void **)&device_input, sizeof(int) * rounded_length); hipMemcpy(device_input, inarray, (end - inarray) * sizeof(int), hipMemcpyHostToDevice); // For convenience, both the input and output vectors on the device are // initialized to the input values. This means that you are free to simply // implement an in-place scan on the result vector if you wish. // If you do this, you will need to keep that fact in mind when calling // exclusive_scan from find_repeats. hipMemcpy(device_result, inarray, (end - inarray) * sizeof(int), hipMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); //exclusive_scan(device_input, end - inarray, device_result); exclusive_scan(device_input, rounded_length, device_result); // Wait for any work left over to be completed. hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); double overallDuration = endTime - startTime; hipMemcpy(resultarray, device_result, (end - inarray) * sizeof(int), hipMemcpyDeviceToHost); return overallDuration; } /* Wrapper around the Thrust library's exclusive scan function * As above, copies the input onto the GPU and times only the execution * of the scan itself. * You are not expected to produce competitive performance to the * Thrust version. */ double cudaScanThrust(int* inarray, int* end, int* resultarray) { int length = end - inarray; thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length); thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length); hipMemcpy(d_input.get(), inarray, length * sizeof(int), hipMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); thrust::exclusive_scan(d_input, d_input + length, d_output); // Wait for any work left over to be completed. hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); hipMemcpy(resultarray, d_output.get(), length * sizeof(int), hipMemcpyDeviceToHost); thrust::device_free(d_input); thrust::device_free(d_output); double overallDuration = endTime - startTime; return overallDuration; } __global__ void predicate_kernel(int* device_input, int length, int* predicate) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < length) { if(device_input[index] == device_input[index+1]) { predicate[index] = 1; } else { predicate[index] = 0; } } } __global__ void repeat_kernel(int* predicate, int length, int* device_output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < length) { if(predicate[index] != predicate[index+1]) { device_output[predicate[index]] = index; } } } int find_repeats(int *device_input, int length, int *device_output) { /* Finds all pairs of adjacent repeated elements in the list, storing the * indices of the first element of each pair (in order) into device_result. * Returns the number of pairs found. * Your task is to implement this function. You will probably want to * make use of one or more calls to exclusive_scan(), as well as * additional CUDA kernel launches. * Note: As in the scan code, we ensure that allocated arrays are a power * of 2 in size, so you can use your exclusive_scan function with them if * it requires that. However, you must ensure that the results of * find_repeats are correct given the original length. */ const int threadsPerBlock = 512; const int numBlocks = (length-1)/threadsPerBlock + 1; int *device_predicate; int rounded_length = nextPow2(length); hipError_t err3 = hipMalloc((void **)&device_predicate, sizeof(int) * rounded_length); if(err3 != hipSuccess) { fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err3),__FILE__,__LINE__); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( predicate_kernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, device_input, length, device_predicate); hipError_t err4 = hipDeviceSynchronize(); if(err4 != hipSuccess) { fprintf(stderr,"%s in %s at line %d\n", hipGetErrorString(err4),__FILE__,__LINE__); exit(EXIT_FAILURE); } int rounded_length = nextPow2(length); exclusive_scan(device_input, rounded_length, device_predicate); hipLaunchKernelGGL(( repeat_kernel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, device_predicate, length, device_output); int ret_len = device_predicate[length-1]; hipFree(device_predicate); return ret_len; } /* Timing wrapper around find_repeats. You should not modify this function. */ double cudaFindRepeats(int *input, int length, int *output, int *output_length) { int *device_input; int *device_output; int rounded_length = nextPow2(length); hipMalloc((void **)&device_input, rounded_length * sizeof(int)); hipMalloc((void **)&device_output, rounded_length * sizeof(int)); hipMemcpy(device_input, input, length * sizeof(int), hipMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); int result = find_repeats(device_input, length, device_output); //int result = find_repeats(device_input, rounded_length, device_output); // Wait for any work left over to be completed. hipDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); *output_length = result; hipMemcpy(output, device_output, length * sizeof(int), hipMemcpyDeviceToHost); hipFree(device_input); hipFree(device_output); return endTime - startTime; } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
17633c86c8d08c02d514ad2335e1ad3eda42a562.cu
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/device_malloc.h> #include <thrust/device_free.h> #include "CycleTimer.h" extern float toBW(int bytes, float sec); /* Helper function to round up to a power of 2. */ static inline int nextPow2(int n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; return n; } __global__ void upsweep_kernel(int length, int twod, int twod1, int* device_result) { // compute overall index from position of thread in current block, // and given the block we are in int index = blockIdx.x * blockDim.x + threadIdx.x; if ((index < length) && (index%twod1 == 0)) { device_result[index + twod1 - 1] += device_result[index + twod - 1]; } if(index == length-1) { device_result[index] = 0; } } __global__ void downsweep_kernel(int length, int twod, int twod1, int* device_result) { int index = blockIdx.x * blockDim.x + threadIdx.x; if ((index < length) && (index%twod1 == 0)) { int t = device_result[index + twod - 1]; device_result[index + twod - 1] = device_result[index + twod1 - 1]; device_result[index + twod1 -1] += t; } } __global__ void print_kernel(int length, int* device_result) { for(int i = 0; i < length; i++) { printf("out[%d] = %d\n",i,device_result[i]); } } void exclusive_scan(int* device_start, int length, int* device_result) { //I am doing inplace update on device_result /* Fill in this function with your exclusive scan implementation. * You are passed the locations of the input and output in device memory, * but this is host code -- you will need to declare one or more CUDA * kernels (with the __global__ decorator) in order to actually run code * in parallel on the GPU. * Note you are given the real length of the array, but may assume that * both the input and the output arrays are sized to accommodate the next * power of 2 larger than the input. */ const int threadsPerBlock = 512; const int numBlocks = (length-1)/threadsPerBlock + 1; for (int twod = 1; twod < length ; twod *=2 ) { int twod1 = twod*2; upsweep_kernel<<<numBlocks,threadsPerBlock>>>(length, twod, twod1, device_result); cudaError_t err1 = cudaDeviceSynchronize(); if(err1 != cudaSuccess) { fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err1),__FILE__,__LINE__); exit(EXIT_FAILURE); } } for (int twod = length/2; twod >= 1; twod /= 2) { int twod1 = twod*2; downsweep_kernel<<<numBlocks, threadsPerBlock>>>(length, twod, twod1, device_result); cudaError_t err2 = cudaDeviceSynchronize(); if(err2 != cudaSuccess) { fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err2),__FILE__,__LINE__); exit(EXIT_FAILURE); } } } /* This function is a wrapper around the code you will write - it copies the * input to the GPU and times the invocation of the exclusive_scan() function * above. You should not modify it. */ double cudaScan(int* inarray, int* end, int* resultarray) { int* device_result; int* device_input; // We round the array sizes up to a power of 2, but elements after // the end of the original input are left uninitialized and not checked // for correctness. // You may have an easier time in your implementation if you assume the // array's length is a power of 2, but this will result in extra work on // non-power-of-2 inputs. int rounded_length = nextPow2(end - inarray); cudaMalloc((void **)&device_result, sizeof(int) * rounded_length); cudaMalloc((void **)&device_input, sizeof(int) * rounded_length); cudaMemcpy(device_input, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice); // For convenience, both the input and output vectors on the device are // initialized to the input values. This means that you are free to simply // implement an in-place scan on the result vector if you wish. // If you do this, you will need to keep that fact in mind when calling // exclusive_scan from find_repeats. cudaMemcpy(device_result, inarray, (end - inarray) * sizeof(int), cudaMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); //exclusive_scan(device_input, end - inarray, device_result); exclusive_scan(device_input, rounded_length, device_result); // Wait for any work left over to be completed. cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); double overallDuration = endTime - startTime; cudaMemcpy(resultarray, device_result, (end - inarray) * sizeof(int), cudaMemcpyDeviceToHost); return overallDuration; } /* Wrapper around the Thrust library's exclusive scan function * As above, copies the input onto the GPU and times only the execution * of the scan itself. * You are not expected to produce competitive performance to the * Thrust version. */ double cudaScanThrust(int* inarray, int* end, int* resultarray) { int length = end - inarray; thrust::device_ptr<int> d_input = thrust::device_malloc<int>(length); thrust::device_ptr<int> d_output = thrust::device_malloc<int>(length); cudaMemcpy(d_input.get(), inarray, length * sizeof(int), cudaMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); thrust::exclusive_scan(d_input, d_input + length, d_output); // Wait for any work left over to be completed. cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); cudaMemcpy(resultarray, d_output.get(), length * sizeof(int), cudaMemcpyDeviceToHost); thrust::device_free(d_input); thrust::device_free(d_output); double overallDuration = endTime - startTime; return overallDuration; } __global__ void predicate_kernel(int* device_input, int length, int* predicate) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < length) { if(device_input[index] == device_input[index+1]) { predicate[index] = 1; } else { predicate[index] = 0; } } } __global__ void repeat_kernel(int* predicate, int length, int* device_output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < length) { if(predicate[index] != predicate[index+1]) { device_output[predicate[index]] = index; } } } int find_repeats(int *device_input, int length, int *device_output) { /* Finds all pairs of adjacent repeated elements in the list, storing the * indices of the first element of each pair (in order) into device_result. * Returns the number of pairs found. * Your task is to implement this function. You will probably want to * make use of one or more calls to exclusive_scan(), as well as * additional CUDA kernel launches. * Note: As in the scan code, we ensure that allocated arrays are a power * of 2 in size, so you can use your exclusive_scan function with them if * it requires that. However, you must ensure that the results of * find_repeats are correct given the original length. */ const int threadsPerBlock = 512; const int numBlocks = (length-1)/threadsPerBlock + 1; int *device_predicate; int rounded_length = nextPow2(length); cudaError_t err3 = cudaMalloc((void **)&device_predicate, sizeof(int) * rounded_length); if(err3 != cudaSuccess) { fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err3),__FILE__,__LINE__); exit(EXIT_FAILURE); } predicate_kernel<<<numBlocks,threadsPerBlock>>>(device_input, length, device_predicate); cudaError_t err4 = cudaDeviceSynchronize(); if(err4 != cudaSuccess) { fprintf(stderr,"%s in %s at line %d\n", cudaGetErrorString(err4),__FILE__,__LINE__); exit(EXIT_FAILURE); } int rounded_length = nextPow2(length); exclusive_scan(device_input, rounded_length, device_predicate); repeat_kernel<<<numBlocks,threadsPerBlock>>>(device_predicate, length, device_output); int ret_len = device_predicate[length-1]; cudaFree(device_predicate); return ret_len; } /* Timing wrapper around find_repeats. You should not modify this function. */ double cudaFindRepeats(int *input, int length, int *output, int *output_length) { int *device_input; int *device_output; int rounded_length = nextPow2(length); cudaMalloc((void **)&device_input, rounded_length * sizeof(int)); cudaMalloc((void **)&device_output, rounded_length * sizeof(int)); cudaMemcpy(device_input, input, length * sizeof(int), cudaMemcpyHostToDevice); double startTime = CycleTimer::currentSeconds(); int result = find_repeats(device_input, length, device_output); //int result = find_repeats(device_input, rounded_length, device_output); // Wait for any work left over to be completed. cudaDeviceSynchronize(); double endTime = CycleTimer::currentSeconds(); *output_length = result; cudaMemcpy(output, device_output, length * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_input); cudaFree(device_output); return endTime - startTime; } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
1cad9640d765b61f33091a4769cf514c78c91c7b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <float.h> #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #include "limits.cuh" using namespace at; // fix for pytorch<=0.4.1 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } //type-safe sign template <typename scalar_t> __device__ scalar_t sgn(scalar_t val) { return (scalar_t(0) < val) - (val < scalar_t(0)); } // Overflow and Underflow clamp template <typename scalar_t> __device__ scalar_t clamp(const scalar_t n, const scalar_t lower, const scalar_t upper) { const scalar_t tmp = abs(n); const scalar_t result = max(lower, min(tmp, upper)); return result * sgn(n); } template <typename scalar_t> __global__ void SoftPool1dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *output_data){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset = (n * channels + c) * dim; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } template <typename scalar_t> __global__ void SoftPool2dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset = (n * channels + c) * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } template <typename scalar_t> __global__ void SoftPool3dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_depth = depth/stride_d; int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset = (n * channels + c) * depth * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } } int SoftPool1dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor output){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool1dForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_input, batches, channels, dim, kernel_d, stride_d, output_data); }) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool2dForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, output_data); }) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool3dForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, output_data); }) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void SoftPool1dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *diff_input){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset0 = (n * channels + c) * dim; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; scalar_t mask_sum = 0.; const scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } template <typename scalar_t> __global__ void SoftPool2dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset0 = (n * channels + c) * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } template <typename scalar_t> __global__ void SoftPool3dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w , const int stride_d, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_depth = depth/stride_d; int pooled_height = width/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset0 = (n * channels + c) * depth * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } } int SoftPool1dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor input_grad){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool1dBackward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, diff_output, data_input, batches, channels, dim, kernel_d, stride_d, diff_input); } ) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool2dBackward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, diff_output, data_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, diff_input); } ) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); hipLaunchKernelGGL(( SoftPool3dBackward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, diff_output, data_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, diff_input); } ) ); hipError_t err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err)); exit(-1); } return 1; }
1cad9640d765b61f33091a4769cf514c78c91c7b.cu
#include <float.h> #include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #include "limits.cuh" using namespace at; // fix for pytorch<=0.4.1 #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 1024 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } //type-safe sign template <typename scalar_t> __device__ scalar_t sgn(scalar_t val) { return (scalar_t(0) < val) - (val < scalar_t(0)); } // Overflow and Underflow clamp template <typename scalar_t> __device__ scalar_t clamp(const scalar_t n, const scalar_t lower, const scalar_t upper) { const scalar_t tmp = abs(n); const scalar_t result = max(lower, min(tmp, upper)); return result * sgn(n); } template <typename scalar_t> __global__ void SoftPool1dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *output_data){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset = (n * channels + c) * dim; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } template <typename scalar_t> __global__ void SoftPool2dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset = (n * channels + c) * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } template <typename scalar_t> __global__ void SoftPool3dForward(const int nthreads, const scalar_t *bottom_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, scalar_t *output_data){ int pooled_depth = depth/stride_d; int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset = (n * channels + c) * depth * height * width; const scalar_t *offset_bottom_input = bottom_input + offset; scalar_t mask_sum = 0.; output_data[index] = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); const scalar_t zero = 0.; for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = pd*stride_d + id - kernel_d/2; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = ph*stride_h + iy - kernel_h/2; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = pw*stride_w + ix - kernel_w/2; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_bottom_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset] <= FLT_MAX scalar_t weighted_inp = offset_bottom_input[offset] * mask; weighted_inp = clamp(weighted_inp, zero, upper); // Overflow check (F.) 0 <= sum[(e^{inp[offset]}/sum{e^{inp[offset]}}) * inp[offset]] <= FLT_MAX output_data[index] += weighted_inp; output_data[index] = clamp(output_data[index], zero, upper); } } } } } int SoftPool1dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor output){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); SoftPool1dForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_input, batches, channels, dim, kernel_d, stride_d, output_data); }) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); SoftPool2dForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, output_data); }) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dForwardLauncher(const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor output){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherForward", ([&] { const scalar_t *bottom_input = input.data_ptr<scalar_t>(); scalar_t *output_data = output.data_ptr<scalar_t>(); SoftPool3dForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, output_data); }) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } template <typename scalar_t> __global__ void SoftPool1dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, scalar_t *diff_input){ int pooled_dim = dim/stride_d; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pd = index % pooled_dim; int c = (index / pooled_dim) % channels; int n = index / pooled_dim / channels; const int offset0 = (n * channels + c) * dim; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; scalar_t mask_sum = 0.; const scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= dim || d_offset < 0)continue; const int offset = d_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } template <typename scalar_t> __global__ void SoftPool2dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_height = height/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const int offset0 = (n * channels + c) * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } template <typename scalar_t> __global__ void SoftPool3dBackward(const int nthreads, const scalar_t *diff_output, const scalar_t *data_input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w , const int stride_d, const int stride_h, const int stride_w, scalar_t *diff_input){ int pooled_depth = depth/stride_d; int pooled_height = width/stride_h; int pooled_width = width/stride_w; CUDA_1D_KERNEL_LOOP(index, nthreads) { int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int pd = (index / pooled_width / pooled_height) % pooled_depth; int c = (index / pooled_width / pooled_height / pooled_depth) % channels; int n = index / pooled_width / pooled_height / pooled_depth / channels; const int offset0 = (n * channels + c) * depth * height * width; const scalar_t *offset_data_input = data_input + offset0; const scalar_t diff_output_index = diff_output[index]; scalar_t *offset_diff_input = diff_input + offset0; const int base_d = pd*stride_d - kernel_d/2; const int base_y = ph*stride_h - kernel_h/2; const int base_x = pw*stride_w - kernel_w/2; scalar_t mask_sum = 0.; scalar_t zero = 0.; const scalar_t upper = n_limits<scalar_t>::max(); const scalar_t lower = n_limits<scalar_t>::min(); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (A.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); mask_sum += mask; } } } // Overflow check (B.) FLT_MIN <= sum{e^{inp[offset]}} <= FLT_MAX mask_sum = clamp(mask_sum, lower, upper); for(int id=0; id<kernel_d; id++){ const int d_offset = base_d + id; if(d_offset >= depth || d_offset < 0)continue; for(int iy=0; iy<kernel_h; iy++){ const int y_offset = base_y + iy; if(y_offset >= height || y_offset < 0)continue; for(int ix=0; ix<kernel_w; ix++){ const int x_offset = base_x + ix; if(x_offset >= width || x_offset < 0)continue; const int offset = d_offset*height + y_offset*width + x_offset; // (Over/Under)flow check (C.) 0 <= e^{inp[offset]} <= FLT_MAX scalar_t mask = exp(offset_data_input[offset]); mask = clamp(mask, zero, upper); // Underflow check (D.) 0 <= e^{inp[offset]}/sum{e^{inp[offset]}} <= 1 mask /= mask_sum; mask = clamp(mask, zero, upper); // Underflow check (E.) 0 <= (e^{inp[offset]}/sum{e^{inp[offset]}}) * grad <= FLT_MAX scalar_t weighted_grad = diff_output_index * mask; weighted_grad = clamp(weighted_grad, zero, upper); atomicAdd(offset_diff_input+offset, weighted_grad); } } } } } int SoftPool1dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int dim, const int kernel_d, const int stride_d, at::Tensor input_grad){ const int output_size = batches * dim/stride_d * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool1dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); SoftPool1dBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, diff_output, data_input, batches, channels, dim, kernel_d, stride_d, diff_input); } ) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool2dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool2dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); SoftPool2dBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, diff_output, data_input, batches, channels, height, width, kernel_h, kernel_w, stride_h, stride_w, diff_input); } ) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; } int SoftPool3dBackwardLauncher(const at::Tensor output_grad, const at::Tensor input, const int batches, const int channels, const int depth, const int height, const int width, const int kernel_d, const int kernel_h, const int kernel_w, const int stride_d, const int stride_h, const int stride_w, at::Tensor input_grad){ const int output_size = batches * depth/stride_d * height/stride_h * width/stride_w * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "SoftPool3dLauncherBackward", ([&] { scalar_t *diff_input = input_grad.data_ptr<scalar_t>(); const scalar_t *diff_output = output_grad.data_ptr<scalar_t>(); const scalar_t *data_input = input.data_ptr<scalar_t>(); SoftPool3dBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, diff_output, data_input, batches, channels, depth, height, width, kernel_d, kernel_h, kernel_w, stride_d, stride_h, stride_w, diff_input); } ) ); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err)); exit(-1); } return 1; }
f0d7a43dd3b0004b3f97a311800450a0bdc098f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string> #include <vector> #include <hip/hiprtc.h> #include <jitify.hpp> #include "types.hpp.jit" #include "timestamps.hpp.jit" #include "operation.h.jit" #include "traits.h.jit" const char* kernel = R"***( // #include <cstdint> // #include <type_traits> #include <cudf/types.hpp> #include <simt/limits> #include <cudf/wrappers/timestamps.hpp> // problematic // #include "operation.h" template<int N, typename T> __global__ void kernel(T* data) {} )***"; int main(void) { const std::vector<std::string> headers{ cudf_types_hpp, cudf_wrappers_timestamps_hpp, operation_h, traits_h }; static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(kernel, headers, { "-std=c++14", "-D__CUDACC_RTC__", "-D__CHAR_BIT__=" + std::to_string(__CHAR_BIT__), // define libcudacxx jitify guards "-D_LIBCUDACXX_HAS_NO_CTIME", "-D_LIBCUDACXX_HAS_NO_WCHAR", "-D_LIBCUDACXX_HAS_NO_CFLOAT", "-D_LIBCUDACXX_HAS_NO_STDINT", "-D_LIBCUDACXX_HAS_NO_CSTDDEF", "-D_LIBCUDACXX_HAS_NO_CLIMITS", "-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS", "-I/home/ptaylor/dev/rapids/jitify-libcu++-test/thirdparty/libcudacxx/include", }); }
f0d7a43dd3b0004b3f97a311800450a0bdc098f9.cu
#include <string> #include <vector> #include <nvrtc.h> #include <jitify.hpp> #include "types.hpp.jit" #include "timestamps.hpp.jit" #include "operation.h.jit" #include "traits.h.jit" const char* kernel = R"***( // #include <cstdint> // #include <type_traits> #include <cudf/types.hpp> #include <simt/limits> #include <cudf/wrappers/timestamps.hpp> // problematic // #include "operation.h" template<int N, typename T> __global__ void kernel(T* data) {} )***"; int main(void) { const std::vector<std::string> headers{ cudf_types_hpp, cudf_wrappers_timestamps_hpp, operation_h, traits_h }; static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(kernel, headers, { "-std=c++14", "-D__CUDACC_RTC__", "-D__CHAR_BIT__=" + std::to_string(__CHAR_BIT__), // define libcudacxx jitify guards "-D_LIBCUDACXX_HAS_NO_CTIME", "-D_LIBCUDACXX_HAS_NO_WCHAR", "-D_LIBCUDACXX_HAS_NO_CFLOAT", "-D_LIBCUDACXX_HAS_NO_STDINT", "-D_LIBCUDACXX_HAS_NO_CSTDDEF", "-D_LIBCUDACXX_HAS_NO_CLIMITS", "-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS", "-I/home/ptaylor/dev/rapids/jitify-libcu++-test/thirdparty/libcudacxx/include", }); }
01fd77caf311c1724a5d3d57ad4764d3c8b30dcd.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/primitive/include/permute.h" #include "oneflow/core/primitive/common/permute_impl.h" #include "oneflow/core/stream/cuda_stream_context.h" #include <hip/hip_runtime.h> namespace oneflow { namespace primitive { namespace permute { namespace internal { namespace { constexpr int32_t kMov4TileSize = 32; constexpr int32_t kMov2TileSize = 64; constexpr int32_t kBlockRows = 8; template<size_t num_dims, size_t movement_size, typename IndexType> __global__ void PermuteKernel(PermuteKernelParams<num_dims, IndexType> params) { using T = typename std::aligned_storage<movement_size, movement_size>::type; const T* src = reinterpret_cast<const T*>(params.src); T* dst = reinterpret_cast<T*>(params.dst); IndexType src_index[num_dims]; IndexType dst_index[num_dims]; CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) { params.dst_index_helper.OffsetToNdIndex(i, dst_index); #pragma unroll for (size_t dim = 0; dim < num_dims; ++dim) { src_index[params.permutation[dim]] = dst_index[dim]; } IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index); dst[i] = src[src_offset]; } } // (B, X, Y) -> (B, Y, X) // refer from https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/ template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> __global__ void BatchTransposeKernel(const void* src_ptr, void* dst_ptr, IndexType H, IndexType W, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { using T = typename std::aligned_storage<movement_size, movement_size>::type; __shared__ T tile[tile_size][tile_size + 1]; // To avoid bank conflict. const T* src = reinterpret_cast<const T*>(src_ptr); T* dst = reinterpret_cast<T*>(dst_ptr); IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType flatten_index = i - batch_index * batch_num_tile; // equal to i % (num_tile_rows*num_tile_cols). the // flatten index of tile in a batch. const IndexType row_index = flatten_index / num_tile_cols; // the row index of tile in a batch. const IndexType col_index = flatten_index - row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * H * W; IndexType x = col_index * tile_size + threadIdx.x; IndexType y = row_index * tile_size + threadIdx.y; if (x < W) { IndexType y_range = ((tile_size - threadIdx.y) < (H - y)) ? (tile_size - threadIdx.y) : (H - y); #pragma unroll // each thread process 4 elements. // `i < y_range` equals to: `threadIdx.y + i < tile_size && y + i < H`. for (int i = 0; i < y_range; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = src[offset + (y + i) * W + x]; } } __syncthreads(); x = row_index * tile_size + threadIdx.x; y = col_index * tile_size + threadIdx.y; if (x < H) { IndexType x_range = ((tile_size - threadIdx.y) < (W - y)) ? (tile_size - threadIdx.y) : (W - y); #pragma unroll // `i < x_range` equals to: `threadIdx.y + i < tile_size && y + i < W`. for (int i = 0; i < x_range; i += kBlockRows) { dst[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } __syncthreads(); } } /* Here is a Movementsie=2 version of Batch Transpose. When the H W can be divided by 2. we can read data use movementsize=4, and write back as movementsize=2. */ template<size_t num_dims, size_t tile_size, typename IndexType> __global__ void BatchTransposeMovement2Kernel(const void* src_ptr, void* dst_ptr, IndexType rows, IndexType cols, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { static_assert(tile_size % 2 == 0); using T_MOV2 = typename std::aligned_storage<2, 2>::type; using T_MOV4 = typename std::aligned_storage<4, 4>::type; const T_MOV4* src = reinterpret_cast<const T_MOV4*>(src_ptr); T_MOV4* dst = reinterpret_cast<T_MOV4*>(dst_ptr); // Use union structure to process Load and Store. __shared__ union { T_MOV2 tile_m2[tile_size][tile_size + 2]; // half [64][66] T_MOV4 tile_m4[tile_size][tile_size / 2 + 1]; // half2 [64][33] } tile_mem; IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType flatten_index = i - batch_index * batch_num_tile; // equal to i%(num_tile_rows*num_tile_cols). the flatten // index of tile in a batch. const IndexType row_index = flatten_index / num_tile_cols; // the row index of tile in a batch. const IndexType col_index = flatten_index - row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * rows * cols; IndexType x = col_index * tile_size + threadIdx.x * 2; // cause each thread process a half2 element, we // need to multiply 2 for threadIdx.x. IndexType y = row_index * tile_size + threadIdx.y; if (x < cols) { // each thread process 4 elements. IndexType y_range = ((tile_size - threadIdx.y) < (rows - y)) ? (tile_size - threadIdx.y) : (rows - y); #pragma unroll // `i < y_range` equals to: `threadIdx.y + i < tile_size && y + i < rows`. for (int i = 0; i < y_range; i += kBlockRows) { // each thread load a half2. tile_mem.tile_m4[threadIdx.y + i][threadIdx.x] = src[(offset + (y + i) * cols + x) / 2]; } } __syncthreads(); x = row_index * tile_size + threadIdx.x * 2; // cause each thread process a half2 element, we // need to multiply 2 for threadIdx.x. y = col_index * tile_size + threadIdx.y; if (x < rows) { IndexType x_range = ((tile_size - threadIdx.y) < (cols - y)) ? (tile_size - threadIdx.y) : (cols - y); #pragma unroll // `i < x_range` equals to: `threadIdx.y + i < tile_size && y + i < cols`. for (int i = 0; i < x_range; i += kBlockRows) { /* When write back as column, it cannot be stored as half2 directly. So we split as 2 half elements, and write back separately. */ union { T_MOV4 m4; T_MOV2 m2[2]; } tmp_storage; tmp_storage.m2[0] = tile_mem.tile_m2[threadIdx.x * 2][threadIdx.y + i]; tmp_storage.m2[1] = tile_mem.tile_m2[threadIdx.x * 2 + 1][threadIdx.y + i]; dst[(offset + (y + i) * rows + x) / 2] = tmp_storage.m4; } } __syncthreads(); } } template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> void LaunchBatchTransposeKernel(hipStream_t& cuda_stream, const PermuteKernelParams<num_dims, IndexType>& params, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { IndexType num_tile_rows = (rows + tile_size - 1) / tile_size; IndexType num_tile_cols = (cols + tile_size - 1) / tile_size; const int32_t block_nums = num_batches * num_tile_rows * num_tile_cols; int32_t checked_block_nums = ::min(block_nums, kCudaMaxBlocksNum); if (tile_size == kMov2TileSize) { const int32_t half2_thread = tile_size / 2; // cause each thread process two half elements. hipLaunchKernelGGL(( BatchTransposeMovement2Kernel<num_dims, kMov2TileSize, IndexType>) , dim3(checked_block_nums), dim3(dim3(half2_thread, kBlockRows)), 0, cuda_stream, params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); // Set threads num as 32x8 cause each threads // process 4 elements to 32x32 share memory. } else { hipLaunchKernelGGL(( BatchTransposeKernel<num_dims, movement_size, tile_size, IndexType>) , dim3(checked_block_nums), dim3(dim3(tile_size, kBlockRows)), 0, cuda_stream, params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); } } template<size_t tile_size, typename IndexType> bool CheckIfGreaterEqualThanTileSize(const IndexType& rows, const IndexType& cols) { if (rows < tile_size || cols < tile_size) { return false; } return true; } template<size_t num_dims, size_t tile_size, typename IndexType> bool CheckLaunchBatchTranspose(const int* permutation, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { if (CheckIfGreaterEqualThanTileSize<tile_size, IndexType>(rows, cols)) { if (num_batches == 1) { // 2d tensor case: (0, 1) -> (1, 0) return true; } else if (num_dims == 3 && permutation[2] == 1 && permutation[1] == 2) { // 3d tensor case: (0, 1, 2) -> (0, 2, 1) return true; } else { return false; } } return false; } template<typename IndexType, size_t movement_size> bool CheckUseMov2(const IndexType& rows, const IndexType& cols, const void* src, void* dst) { auto src_ptr = reinterpret_cast<std::uintptr_t>(src); auto dst_ptr = reinterpret_cast<std::uintptr_t>(dst); return (movement_size == 2) && (rows % 2 == 0) && (cols % 2 == 0) && (src_ptr % 4 == 0) && (dst_ptr % 4 == 0); ; } template<size_t num_dims, typename IndexType> void InferBatchTransposeShape(const int64_t* src_dims, IndexType* num_batches, IndexType* rows, IndexType* cols) { if (num_dims == 2) { *num_batches = 1; *rows = src_dims[0]; *cols = src_dims[1]; } else { *num_batches = src_dims[0]; *rows = src_dims[1]; *cols = src_dims[2]; } } template<size_t num_dims, size_t movement_size, typename IndexType> void LaunchKernel(StreamContext* stream_ctx, const int64_t* src_dims, const void* src, const int* permutation, void* dst, size_t count) { PermuteKernelParams<num_dims, IndexType> params = MakePermuteParams<num_dims, IndexType>(src_dims, src, permutation, dst, count); hipStream_t cuda_stream = CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream(); if (num_dims == 2 || num_dims == 3) { IndexType num_batches; IndexType rows; IndexType cols; InferBatchTransposeShape<num_dims, IndexType>(src_dims, &num_batches, &rows, &cols); if (CheckLaunchBatchTranspose<num_dims, kMov4TileSize>(params.permutation, num_batches, rows, cols)) { if (CheckUseMov2<IndexType, movement_size>(rows, cols, src, dst)) { LaunchBatchTransposeKernel<num_dims, 2, kMov2TileSize, IndexType>(cuda_stream, params, num_batches, rows, cols); } else { LaunchBatchTransposeKernel<num_dims, movement_size, kMov4TileSize, IndexType>( cuda_stream, params, num_batches, rows, cols); } } else { hipLaunchKernelGGL(( PermuteKernel<num_dims, movement_size, IndexType>) , dim3(BlocksNum4ThreadsNum(params.count)), dim3(kCudaThreadsNumPerBlock), 0, cuda_stream, params); } } else { hipLaunchKernelGGL(( PermuteKernel<num_dims, movement_size, IndexType>) , dim3(BlocksNum4ThreadsNum(params.count)), dim3(kCudaThreadsNumPerBlock), 0, cuda_stream, params); } } class PermuteImpl : public Permute { public: OF_DISALLOW_COPY_AND_MOVE(PermuteImpl); PermuteImpl() = default; ~PermuteImpl() override = default; using Permute::Launch; void Launch(StreamContext* stream_ctx, DataType data_type, size_t num_dims, const int64_t* src_dims, const void* src, const int* permutation, void* dst) override { SimplifyThenLaunch(stream_ctx, data_type, num_dims, src_dims, src, permutation, dst); } }; class PermuteFactoryImpl : public PermuteFactory { public: OF_DISALLOW_COPY_AND_MOVE(PermuteFactoryImpl); PermuteFactoryImpl() = default; ~PermuteFactoryImpl() override = default; std::unique_ptr<Permute> New(size_t max_num_dims) override { if (max_num_dims <= kMaxNumDims) { return std::unique_ptr<Permute>(new PermuteImpl()); } else { return nullptr; } } }; REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, PermuteFactory, PermuteFactoryImpl); } // namespace } // namespace internal } // namespace permute } // namespace primitive } // namespace oneflow
01fd77caf311c1724a5d3d57ad4764d3c8b30dcd.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/primitive/include/permute.h" #include "oneflow/core/primitive/common/permute_impl.h" #include "oneflow/core/stream/cuda_stream_context.h" #include <cuda_runtime.h> namespace oneflow { namespace primitive { namespace permute { namespace internal { namespace { constexpr int32_t kMov4TileSize = 32; constexpr int32_t kMov2TileSize = 64; constexpr int32_t kBlockRows = 8; template<size_t num_dims, size_t movement_size, typename IndexType> __global__ void PermuteKernel(PermuteKernelParams<num_dims, IndexType> params) { using T = typename std::aligned_storage<movement_size, movement_size>::type; const T* src = reinterpret_cast<const T*>(params.src); T* dst = reinterpret_cast<T*>(params.dst); IndexType src_index[num_dims]; IndexType dst_index[num_dims]; CUDA_1D_KERNEL_LOOP_T(IndexType, i, params.count) { params.dst_index_helper.OffsetToNdIndex(i, dst_index); #pragma unroll for (size_t dim = 0; dim < num_dims; ++dim) { src_index[params.permutation[dim]] = dst_index[dim]; } IndexType src_offset = params.src_index_helper.NdIndexToOffset(src_index); dst[i] = src[src_offset]; } } // (B, X, Y) -> (B, Y, X) // refer from https://developer.nvidia.com/blog/efficient-matrix-transpose-cuda-cc/ template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> __global__ void BatchTransposeKernel(const void* src_ptr, void* dst_ptr, IndexType H, IndexType W, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { using T = typename std::aligned_storage<movement_size, movement_size>::type; __shared__ T tile[tile_size][tile_size + 1]; // To avoid bank conflict. const T* src = reinterpret_cast<const T*>(src_ptr); T* dst = reinterpret_cast<T*>(dst_ptr); IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType flatten_index = i - batch_index * batch_num_tile; // equal to i % (num_tile_rows*num_tile_cols). the // flatten index of tile in a batch. const IndexType row_index = flatten_index / num_tile_cols; // the row index of tile in a batch. const IndexType col_index = flatten_index - row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * H * W; IndexType x = col_index * tile_size + threadIdx.x; IndexType y = row_index * tile_size + threadIdx.y; if (x < W) { IndexType y_range = ((tile_size - threadIdx.y) < (H - y)) ? (tile_size - threadIdx.y) : (H - y); #pragma unroll // each thread process 4 elements. // `i < y_range` equals to: `threadIdx.y + i < tile_size && y + i < H`. for (int i = 0; i < y_range; i += kBlockRows) { tile[threadIdx.y + i][threadIdx.x] = src[offset + (y + i) * W + x]; } } __syncthreads(); x = row_index * tile_size + threadIdx.x; y = col_index * tile_size + threadIdx.y; if (x < H) { IndexType x_range = ((tile_size - threadIdx.y) < (W - y)) ? (tile_size - threadIdx.y) : (W - y); #pragma unroll // `i < x_range` equals to: `threadIdx.y + i < tile_size && y + i < W`. for (int i = 0; i < x_range; i += kBlockRows) { dst[offset + (y + i) * H + x] = tile[threadIdx.x][threadIdx.y + i]; } } __syncthreads(); } } /* Here is a Movementsie=2 version of Batch Transpose. When the H W can be divided by 2. we can read data use movementsize=4, and write back as movementsize=2. */ template<size_t num_dims, size_t tile_size, typename IndexType> __global__ void BatchTransposeMovement2Kernel(const void* src_ptr, void* dst_ptr, IndexType rows, IndexType cols, IndexType num_tile_rows, IndexType num_tile_cols, int32_t block_nums) { static_assert(tile_size % 2 == 0); using T_MOV2 = typename std::aligned_storage<2, 2>::type; using T_MOV4 = typename std::aligned_storage<4, 4>::type; const T_MOV4* src = reinterpret_cast<const T_MOV4*>(src_ptr); T_MOV4* dst = reinterpret_cast<T_MOV4*>(dst_ptr); // Use union structure to process Load and Store. __shared__ union { T_MOV2 tile_m2[tile_size][tile_size + 2]; // half [64][66] T_MOV4 tile_m4[tile_size][tile_size / 2 + 1]; // half2 [64][33] } tile_mem; IndexType batch_num_tile = num_tile_rows * num_tile_cols; for (int i = blockIdx.x, step = gridDim.x; i < block_nums; i += step) { const IndexType batch_index = i / batch_num_tile; // the index of batch. const IndexType flatten_index = i - batch_index * batch_num_tile; // equal to i%(num_tile_rows*num_tile_cols). the flatten // index of tile in a batch. const IndexType row_index = flatten_index / num_tile_cols; // the row index of tile in a batch. const IndexType col_index = flatten_index - row_index * num_tile_cols; // equal to k % num_tile_cols. the col index of tile in a batch. const IndexType offset = batch_index * rows * cols; IndexType x = col_index * tile_size + threadIdx.x * 2; // cause each thread process a half2 element, we // need to multiply 2 for threadIdx.x. IndexType y = row_index * tile_size + threadIdx.y; if (x < cols) { // each thread process 4 elements. IndexType y_range = ((tile_size - threadIdx.y) < (rows - y)) ? (tile_size - threadIdx.y) : (rows - y); #pragma unroll // `i < y_range` equals to: `threadIdx.y + i < tile_size && y + i < rows`. for (int i = 0; i < y_range; i += kBlockRows) { // each thread load a half2. tile_mem.tile_m4[threadIdx.y + i][threadIdx.x] = src[(offset + (y + i) * cols + x) / 2]; } } __syncthreads(); x = row_index * tile_size + threadIdx.x * 2; // cause each thread process a half2 element, we // need to multiply 2 for threadIdx.x. y = col_index * tile_size + threadIdx.y; if (x < rows) { IndexType x_range = ((tile_size - threadIdx.y) < (cols - y)) ? (tile_size - threadIdx.y) : (cols - y); #pragma unroll // `i < x_range` equals to: `threadIdx.y + i < tile_size && y + i < cols`. for (int i = 0; i < x_range; i += kBlockRows) { /* When write back as column, it cannot be stored as half2 directly. So we split as 2 half elements, and write back separately. */ union { T_MOV4 m4; T_MOV2 m2[2]; } tmp_storage; tmp_storage.m2[0] = tile_mem.tile_m2[threadIdx.x * 2][threadIdx.y + i]; tmp_storage.m2[1] = tile_mem.tile_m2[threadIdx.x * 2 + 1][threadIdx.y + i]; dst[(offset + (y + i) * rows + x) / 2] = tmp_storage.m4; } } __syncthreads(); } } template<size_t num_dims, size_t movement_size, size_t tile_size, typename IndexType> void LaunchBatchTransposeKernel(cudaStream_t& cuda_stream, const PermuteKernelParams<num_dims, IndexType>& params, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { IndexType num_tile_rows = (rows + tile_size - 1) / tile_size; IndexType num_tile_cols = (cols + tile_size - 1) / tile_size; const int32_t block_nums = num_batches * num_tile_rows * num_tile_cols; int32_t checked_block_nums = std::min(block_nums, kCudaMaxBlocksNum); if (tile_size == kMov2TileSize) { const int32_t half2_thread = tile_size / 2; // cause each thread process two half elements. BatchTransposeMovement2Kernel<num_dims, kMov2TileSize, IndexType> <<<checked_block_nums, dim3(half2_thread, kBlockRows), 0, cuda_stream>>>( params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); // Set threads num as 32x8 cause each threads // process 4 elements to 32x32 share memory. } else { BatchTransposeKernel<num_dims, movement_size, tile_size, IndexType> <<<checked_block_nums, dim3(tile_size, kBlockRows), 0, cuda_stream>>>( params.src, params.dst, rows, cols, num_tile_rows, num_tile_cols, block_nums); } } template<size_t tile_size, typename IndexType> bool CheckIfGreaterEqualThanTileSize(const IndexType& rows, const IndexType& cols) { if (rows < tile_size || cols < tile_size) { return false; } return true; } template<size_t num_dims, size_t tile_size, typename IndexType> bool CheckLaunchBatchTranspose(const int* permutation, const IndexType& num_batches, const IndexType& rows, const IndexType& cols) { if (CheckIfGreaterEqualThanTileSize<tile_size, IndexType>(rows, cols)) { if (num_batches == 1) { // 2d tensor case: (0, 1) -> (1, 0) return true; } else if (num_dims == 3 && permutation[2] == 1 && permutation[1] == 2) { // 3d tensor case: (0, 1, 2) -> (0, 2, 1) return true; } else { return false; } } return false; } template<typename IndexType, size_t movement_size> bool CheckUseMov2(const IndexType& rows, const IndexType& cols, const void* src, void* dst) { auto src_ptr = reinterpret_cast<std::uintptr_t>(src); auto dst_ptr = reinterpret_cast<std::uintptr_t>(dst); return (movement_size == 2) && (rows % 2 == 0) && (cols % 2 == 0) && (src_ptr % 4 == 0) && (dst_ptr % 4 == 0); ; } template<size_t num_dims, typename IndexType> void InferBatchTransposeShape(const int64_t* src_dims, IndexType* num_batches, IndexType* rows, IndexType* cols) { if (num_dims == 2) { *num_batches = 1; *rows = src_dims[0]; *cols = src_dims[1]; } else { *num_batches = src_dims[0]; *rows = src_dims[1]; *cols = src_dims[2]; } } template<size_t num_dims, size_t movement_size, typename IndexType> void LaunchKernel(StreamContext* stream_ctx, const int64_t* src_dims, const void* src, const int* permutation, void* dst, size_t count) { PermuteKernelParams<num_dims, IndexType> params = MakePermuteParams<num_dims, IndexType>(src_dims, src, permutation, dst, count); cudaStream_t cuda_stream = CHECK_NOTNULL(dynamic_cast<CudaStreamContext*>(stream_ctx))->cuda_stream(); if (num_dims == 2 || num_dims == 3) { IndexType num_batches; IndexType rows; IndexType cols; InferBatchTransposeShape<num_dims, IndexType>(src_dims, &num_batches, &rows, &cols); if (CheckLaunchBatchTranspose<num_dims, kMov4TileSize>(params.permutation, num_batches, rows, cols)) { if (CheckUseMov2<IndexType, movement_size>(rows, cols, src, dst)) { LaunchBatchTransposeKernel<num_dims, 2, kMov2TileSize, IndexType>(cuda_stream, params, num_batches, rows, cols); } else { LaunchBatchTransposeKernel<num_dims, movement_size, kMov4TileSize, IndexType>( cuda_stream, params, num_batches, rows, cols); } } else { PermuteKernel<num_dims, movement_size, IndexType> <<<BlocksNum4ThreadsNum(params.count), kCudaThreadsNumPerBlock, 0, cuda_stream>>>(params); } } else { PermuteKernel<num_dims, movement_size, IndexType> <<<BlocksNum4ThreadsNum(params.count), kCudaThreadsNumPerBlock, 0, cuda_stream>>>(params); } } class PermuteImpl : public Permute { public: OF_DISALLOW_COPY_AND_MOVE(PermuteImpl); PermuteImpl() = default; ~PermuteImpl() override = default; using Permute::Launch; void Launch(StreamContext* stream_ctx, DataType data_type, size_t num_dims, const int64_t* src_dims, const void* src, const int* permutation, void* dst) override { SimplifyThenLaunch(stream_ctx, data_type, num_dims, src_dims, src, permutation, dst); } }; class PermuteFactoryImpl : public PermuteFactory { public: OF_DISALLOW_COPY_AND_MOVE(PermuteFactoryImpl); PermuteFactoryImpl() = default; ~PermuteFactoryImpl() override = default; std::unique_ptr<Permute> New(size_t max_num_dims) override { if (max_num_dims <= kMaxNumDims) { return std::unique_ptr<Permute>(new PermuteImpl()); } else { return nullptr; } } }; REGISTER_PRIMITIVE_FACTORY(DeviceType::kGPU, PermuteFactory, PermuteFactoryImpl); } // namespace } // namespace internal } // namespace permute } // namespace primitive } // namespace oneflow
c22687b889845d28c3cc9412d1ef3ff4b4648868.hip
// !!! This is a file automatically generated by hipify!!! #include <common/cuda/texture_reference.hpp> #include <hip/hip_runtime_api.h> #include <hip/hip_vector_types.h> #include <hip/channel_descriptor.h> #include <type_traits> #ifndef __HIPCC__ #define __HIPCC__ #endif #include <hip/hip_texture_types.h> namespace common { namespace cuda { //(),,NVCC,3 texture<uchar4, hipTextureType2D, hipReadModeElementType> texRef_2d_uchar4; texture<float4, hipTextureType2D, hipReadModeElementType> texRef_2d_float4; hipError_t cuda_get_texture_reference_2d_uchar4(const textureReference ** texref) { return hipGetTextureReference(texref, &texRef_2d_uchar4); } hipError_t cuda_get_texture_reference_2d_float4(const textureReference ** texref) { return hipGetTextureReference(texref, &texRef_2d_float4); } }// namespace cuda } // namespace common
c22687b889845d28c3cc9412d1ef3ff4b4648868.cu
#include <common/cuda/texture_reference.hpp> #include <cuda_runtime_api.h> #include <vector_types.h> #include <channel_descriptor.h> #include <type_traits> #ifndef __CUDACC__ #define __CUDACC__ #endif #include <cuda_texture_types.h> namespace common { namespace cuda { //纹理参照系必须定义在所有函数体外(全局性),需要显式声明,用NVCC编译,不支持3元组 texture<uchar4, cudaTextureType2D, cudaReadModeElementType> texRef_2d_uchar4; texture<float4, cudaTextureType2D, cudaReadModeElementType> texRef_2d_float4; cudaError_t cuda_get_texture_reference_2d_uchar4(const textureReference ** texref) { return cudaGetTextureReference(texref, &texRef_2d_uchar4); } cudaError_t cuda_get_texture_reference_2d_float4(const textureReference ** texref) { return cudaGetTextureReference(texref, &texRef_2d_float4); } }// namespace cuda } // namespace common
0a6457fcd4e92ab1c8146e9c9290ab3ed9d223f9.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "options.h" #include "gpu.h" #include "matrix.h" using namespace std; int main(int argc, char *argv[]) { srand(time(NULL)); Options opt(argc, argv); if (opt.errorMode()) { cout << opt.errorPrint() << endl; return 1; } if (opt.helpMode()) { cout << opt.helpPrint() << endl; return 0; } if (opt.versionMode()) { cout << opt.versionPrint() << endl; return 0; } if (opt.debugMode()) opt.infoPrint(); unsigned long m = opt.getM(); unsigned long n = opt.getN(); unsigned long p = opt.getP(); int blockSizeX = opt.getSizeX(); int blockSizeY = opt.getSizeY(); hipPitchedPtr A = makeMatrix(m, n); hipPitchedPtr B = makeMatrix(n, p); if (opt.debugMode()) { printMatrix(A); printMatrix(B); } hipPitchedPtr resultGpu = makeMatrix(0,0); hipPitchedPtr resultHost = makeMatrix(0,0); Gpu gpu(2, 0); if (opt.debugMode()) { gpu.infoDevices(); cout << "My Device" << endl; gpu.infoMyDevice(); } gpu.startTime(); if (opt.usePadding()) resultGpu = gpu.matmulPaddingGPU(A, B, blockSizeX, blockSizeY); else if (opt.sharedMemory()) resultGpu = gpu.matmulSharedTaskGPU(A, B, blockSizeX, blockSizeY); else if (opt.fastMode()) resultGpu = gpu.matmulFastGPU(A, B, opt); else resultGpu = gpu.matmulEasyGPU(A, B, blockSizeX, blockSizeY); cout << m << " " << gpu.getTime() << endl; if (opt.debugMode()) printMatrix(resultGpu); if (opt.checkResult()) { resultHost = matmulHOST(A, B); if (opt.debugMode()) printMatrix(resultHost); if (compareMatrix(resultHost, resultGpu)) cout << "GPU and HOST result is equal" << endl; else cout << "Error: GPU not equal HOST" << endl; } delete [] (Type *)resultHost.ptr; delete [] (Type *)resultGpu.ptr; delete [] (Type *)A.ptr; delete [] (Type *)B.ptr; return 0; }
0a6457fcd4e92ab1c8146e9c9290ab3ed9d223f9.cu
#include <iostream> #include "options.h" #include "gpu.h" #include "matrix.h" using namespace std; int main(int argc, char *argv[]) { srand(time(NULL)); Options opt(argc, argv); if (opt.errorMode()) { cout << opt.errorPrint() << endl; return 1; } if (opt.helpMode()) { cout << opt.helpPrint() << endl; return 0; } if (opt.versionMode()) { cout << opt.versionPrint() << endl; return 0; } if (opt.debugMode()) opt.infoPrint(); unsigned long m = opt.getM(); unsigned long n = opt.getN(); unsigned long p = opt.getP(); int blockSizeX = opt.getSizeX(); int blockSizeY = opt.getSizeY(); cudaPitchedPtr A = makeMatrix(m, n); cudaPitchedPtr B = makeMatrix(n, p); if (opt.debugMode()) { printMatrix(A); printMatrix(B); } cudaPitchedPtr resultGpu = makeMatrix(0,0); cudaPitchedPtr resultHost = makeMatrix(0,0); Gpu gpu(2, 0); if (opt.debugMode()) { gpu.infoDevices(); cout << "My Device" << endl; gpu.infoMyDevice(); } gpu.startTime(); if (opt.usePadding()) resultGpu = gpu.matmulPaddingGPU(A, B, blockSizeX, blockSizeY); else if (opt.sharedMemory()) resultGpu = gpu.matmulSharedTaskGPU(A, B, blockSizeX, blockSizeY); else if (opt.fastMode()) resultGpu = gpu.matmulFastGPU(A, B, opt); else resultGpu = gpu.matmulEasyGPU(A, B, blockSizeX, blockSizeY); cout << m << " " << gpu.getTime() << endl; if (opt.debugMode()) printMatrix(resultGpu); if (opt.checkResult()) { resultHost = matmulHOST(A, B); if (opt.debugMode()) printMatrix(resultHost); if (compareMatrix(resultHost, resultGpu)) cout << "GPU and HOST result is equal" << endl; else cout << "Error: GPU not equal HOST" << endl; } delete [] (Type *)resultHost.ptr; delete [] (Type *)resultGpu.ptr; delete [] (Type *)A.ptr; delete [] (Type *)B.ptr; return 0; }
8b4678a3aa3794788f5f96ffc84a0b0ebe235f7f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* label_cpu = bottom[1]->cpu_data(); // LOG(INFO) << "LABEL FORM LOSS:" << label_cpu[0]; const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss /= count; } else { loss /= outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
8b4678a3aa3794788f5f96ffc84a0b0ebe235f7f.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const Dtype* label_cpu = bottom[1]->cpu_data(); // LOG(INFO) << "LABEL FORM LOSS:" << label_cpu[0]; const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); loss /= count; } else { loss /= outer_num_; } top[0]->mutable_cpu_data()[0] = loss; if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; } } } template <typename Dtype> void SoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); const Dtype loss_weight = top[0]->cpu_diff()[0]; if (normalize_) { Dtype count; caffe_gpu_asum(nthreads, counts, &count); caffe_gpu_scal(prob_.count(), loss_weight / count, bottom_diff); } else { caffe_gpu_scal(prob_.count(), loss_weight / outer_num_, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossLayer); } // namespace caffe
08eb220be99682964b7274b1054c9ebe2888f805.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdlib> #include <cmath> #include <vector> #include <string> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 128, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors(hipHostMalloc(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(hipHostMalloc(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 128; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); vector<vector<float> > fwd_computation_time, bwd_computation_time; solver.getComputationTime(1, fwd_computation_time, bwd_computation_time); vector<vector<float> > fwd_transfer_time, bwd_transfer_time; solver.getTransferTime(1, fwd_transfer_time, bwd_transfer_time); printComputationTransferTimes(fwd_computation_time, bwd_computation_time, true, filename); printComputationTransferTimes(fwd_transfer_time, bwd_transfer_time, false, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); } void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename) { if (computation) filename.append("_compute_time.dat"); else filename.append("_transfer_time.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_times.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_times[i].size(); j++) { f << "fwd" << j << ": " << fwd_times[i][j] << endl; } for (int j = 0; j < bwd_times[i].size(); j++) { f << "bwd" << j << ": " << bwd_times[i][j] << endl; } f << endl; } f.close(); }
08eb220be99682964b7274b1054c9ebe2888f805.cu
#include <iostream> #include <cstdlib> #include <cmath> #include <vector> #include <string> #include "solver.h" using namespace std; typedef unsigned char uchar; int num_train = 128, num_test = 500; int reverseInt(int n) { int bytes = 4; unsigned char ch[bytes]; for (int i = 0; i < bytes; i++) { ch[i] = (n >> i * 8) & 255; } int p = 0; for (int i = 0; i < bytes; i++) { p += (int) ch[i] << (bytes - i - 1) * 8; } return p; } void readMNIST(vector<vector<uchar> > &train_images, vector<vector<uchar> > &test_images, vector<uchar> &train_labels, vector<uchar> &test_labels) { string filename_train_images = "data/train-images.idx3-ubyte"; string filename_train_labels = "data/train-labels.idx1-ubyte"; string filename_test_images = "data/t10k-images.idx3-ubyte"; string filename_test_labels = "data/t10k-labels.idx1-ubyte"; // read train/test images for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_images; else filename = filename_test_images; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_images = 0, n_rows = 0, n_cols = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_images, sizeof(n_images)); n_images = reverseInt(n_images); f.read((char *) &n_rows, sizeof(n_rows)); n_rows = reverseInt(n_rows); f.read((char *) &n_cols, sizeof(n_cols)); n_cols = reverseInt(n_cols); for (int k = 0; k < n_images; k++) { vector<uchar> temp; temp.reserve(n_rows * n_cols); for (int j = 0; j < n_rows * n_cols; j++) { uchar t = 0; f.read((char *)&t, sizeof(t)); temp.push_back(t); } if (i == 0) train_images.push_back(temp); else test_images.push_back(temp); } f.close(); } // read train/test labels for (int i = 0; i < 2; i++) { string filename; if (i == 0) filename = filename_train_labels; else filename = filename_test_labels; ifstream f(filename.c_str(), ios::binary); if (!f.is_open()) printf("Cannot read MNIST from %s\n", filename.c_str()); // read metadata int magic_number = 0, n_labels = 0; f.read((char *) &magic_number, sizeof(magic_number)); magic_number = reverseInt(magic_number); f.read((char *) &n_labels, sizeof(n_labels)); n_labels = reverseInt(n_labels); for (int k = 0; k < n_labels; k++) { uchar t = 0; f.read((char *)&t, sizeof(t)); if (i == 0) train_labels.push_back(t); else test_labels.push_back(t); } f.close(); } } void printTimes(vector<float> &time, string filename); void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename); void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename); int main(int argc, char *argv[]) { // int num_train = 100 * batch_size, num_val = batch_size; // void *X_train = malloc(num_train * input_channels * sizeof(float)); // int *y_train = (int *)malloc(num_train * sizeof(int)); // void *X_val = malloc(num_val * input_channels * sizeof(float)); // int *y_val = (int *)malloc(num_val * sizeof(int)); // for (int i = 0; i < num_train; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_train)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_train[i] = 0; // } // for (int i = 0; i < num_val; i++) { // for (int j = 0; j < input_channels; j++) // ((float *)X_val)[i * input_channels + j] = (rand() % 1000) * 1.0 / 1000; // y_val[i] = rand() % 2; // } // int rows = 28, cols = 28, channels = 1; // vector<vector<uchar> > train_images, test_images; // vector<uchar> train_labels, test_labels; // readMNIST(train_images, test_images, train_labels, test_labels); // float *f_train_images, *f_train_labels, *f_test_images, *f_test_labels; float *f_train_images, *f_test_images; int *f_train_labels, *f_test_labels; int rows = 227, cols = 227, channels = 3; int input_size = rows * cols * channels; // f_train_images = (float *)malloc(num_train * input_size * sizeof(float)); // f_train_labels = (int *)malloc(num_train * sizeof(int)); checkCudaErrors(cudaMallocHost(&f_train_images, num_train * input_size * sizeof(float))); checkCudaErrors(cudaMallocHost(&f_train_labels, num_train * sizeof(int))); f_test_images = (float *)malloc(num_test * input_size * sizeof(float)); f_test_labels = (int *)malloc(num_test * sizeof(int)); float *mean_image; mean_image = (float *)malloc(input_size * sizeof(float)); for (int i = 0; i < input_size; i++) { mean_image[i] = 0; for (int k = 0; k < num_train; k++) { mean_image[i] += f_train_images[k * input_size + i]; } mean_image[i] /= num_train; } for (int i = 0; i < num_train; i++) { for (int j = 0; j < input_size; j++) { f_train_images[i * input_size + j] -= mean_image[j]; } } for (int i = 0; i < num_test; i++) { for (int j = 0; j < input_size; j++) { f_test_images[i * input_size + j] -= mean_image[j]; } } // int input_channels = rows * cols * channels * 3, hidden_channels1 = 50, hidden_channels2 = 100, output_channels = 10; // vector<LayerSpecifier> layer_specifier; // ConvDescriptor layer0; // LayerSpecifier temp; // layer0.initializeValues(1, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // ActivationDescriptor layer0_actv; // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // BatchNormDescriptor layer0_bn; // for (int i = 0; i < 200; i++) { // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows, cols); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows, cols, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows, cols); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // } // PoolingDescriptor layer0_pool; // layer0_pool.initializeValues(3, 2, 2, rows, cols, 0, 0, 2, 2, POOLING_MAX); // temp.initPointer(POOLING); // *((PoolingDescriptor *)temp.params) = layer0_pool; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // // DropoutDescriptor layer0_dropout; // // layer0_dropout.initializeValues(0.2, 3, rows / 2, cols / 2); // // temp.initPointer(DROPOUT); // // *((DropoutDescriptor *)temp.params) = layer0_dropout; // // layer_specifier.push_back(temp); // layer0.initializeValues(3, 3, 3, 3, rows / 2, cols / 2, 1, 1, 1, 1); // temp.initPointer(CONV); // *((ConvDescriptor *)temp.params) = layer0; // layer_specifier.push_back(temp); // layer0_actv.initializeValues(RELU, 3, rows / 2, cols / 2); // temp.initPointer(ACTV); // *((ActivationDescriptor *)temp.params) = layer0_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_SPATIAL, 1e-5, 0.1, 3, rows / 2, cols / 2); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // FCDescriptor layer1; // layer1.initializeValues(input_channels, hidden_channels1); // temp.initPointer(FULLY_CONNECTED); // *((FCDescriptor *)(temp.params)) = layer1; // layer_specifier.push_back(temp); // temp.initPointer(ACTV); // ActivationDescriptor layer1_actv; // layer1_actv.initializeValues(RELU, hidden_channels1, 1, 1); // *((ActivationDescriptor *)temp.params) = layer1_actv; // layer_specifier.push_back(temp); // layer0_bn.initializeValues(BATCHNORM_PER_ACTIVATION, 1e-5, 0.1, hidden_channels1, 1, 1); // temp.initPointer(BATCHNORM); // *((BatchNormDescriptor *)temp.params) = layer0_bn; // layer_specifier.push_back(temp); // temp.initPointer(FULLY_CONNECTED); // FCDescriptor layer2; // layer2.initializeValues(hidden_channels1, output_channels); // *((FCDescriptor *)temp.params) = layer2; // layer_specifier.push_back(temp); // // temp.initPointer(FULLY_CONNECTED); // // FCDescriptor layer3; // // layer3.initializeValues(hidden_channels2, output_channels); // // *((FCDescriptor *)temp.params) = layer3; // // layer_specifier.push_back(temp); // temp.initPointer(SOFTMAX); // SoftmaxDescriptor smax; // smax.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, output_channels, 1, 1); // *((SoftmaxDescriptor *)(temp.params)) = smax; // layer_specifier.push_back(temp); // AlexNet vector<LayerSpecifier> layer_specifier; { ConvDescriptor layer0; layer0.initializeValues(3, 96, 11, 11, 227, 227, 0, 0, 4, 4, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer0; layer_specifier.push_back(temp); } { PoolingDescriptor layer1; layer1.initializeValues(96, 3, 3, 55, 55, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer1; layer_specifier.push_back(temp); } { ConvDescriptor layer2; layer2.initializeValues(96, 256, 5, 5, 27, 27, 2, 2, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer2; layer_specifier.push_back(temp); } { PoolingDescriptor layer3; layer3.initializeValues(256, 3, 3, 27, 27, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer3; layer_specifier.push_back(temp); } { ConvDescriptor layer4; layer4.initializeValues(256, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer4; layer_specifier.push_back(temp); } { ConvDescriptor layer5; layer5.initializeValues(384, 384, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer5; layer_specifier.push_back(temp); } { ConvDescriptor layer6; layer6.initializeValues(384, 256, 3, 3, 13, 13, 1, 1, 1, 1, RELU); LayerSpecifier temp; temp.initPointer(CONV); *((ConvDescriptor *)temp.params) = layer6; layer_specifier.push_back(temp); } { PoolingDescriptor layer7; layer7.initializeValues(256, 3, 3, 13, 13, 0, 0, 2, 2, POOLING_MAX); LayerSpecifier temp; temp.initPointer(POOLING); *((PoolingDescriptor *)temp.params) = layer7; layer_specifier.push_back(temp); } { FCDescriptor layer8; layer8.initializeValues(9216, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer8; layer_specifier.push_back(temp); } { FCDescriptor layer9; layer9.initializeValues(4096, 4096, RELU); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer9; layer_specifier.push_back(temp); } { FCDescriptor layer10; layer10.initializeValues(4096, 1000); LayerSpecifier temp; temp.initPointer(FULLY_CONNECTED); *((FCDescriptor *)temp.params) = layer10; layer_specifier.push_back(temp); } { SoftmaxDescriptor layer11; layer11.initializeValues(SOFTMAX_ACCURATE, SOFTMAX_MODE_INSTANCE, 1000, 1, 1); LayerSpecifier temp; temp.initPointer(SOFTMAX); *((SoftmaxDescriptor *)temp.params) = layer11; layer_specifier.push_back(temp); } vDNNConvAlgo vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; vDNNType vdnn_type = vDNN_DYN; string filename("vdnn_dyn"); if (argc == 3) { filename.assign("vdnn"); // argv[1] - layers to offload, argv[2] - conv algo to use if (strcmp(argv[1], "dyn") == 0) { vdnn_type = vDNN_DYN; filename.append("_dyn"); } else if (strcmp(argv[1], "conv") == 0) { vdnn_type = vDNN_CONV; filename.append("_conv"); } else if (strcmp(argv[1], "all") == 0) { vdnn_type = vDNN_ALL; filename.append("_all"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } if ((strcmp(argv[1], "conv") == 0 or strcmp(argv[1], "all") == 0)) { if (strcmp(argv[2], "p") == 0) { vdnn_conv_algo = vDNN_PERFORMANCE_OPTIMAL; filename.append("_p"); } else if (strcmp(argv[2], "m") == 0) { vdnn_conv_algo = vDNN_MEMORY_OPTIMAL; filename.append("_m"); } else { printf("invalid argument.. using vdnn dynamic\n"); filename.assign("vdnn_dyn"); } } } int batch_size = 128; long long dropout_seed = 1; float softmax_eps = 1e-8; float init_std_dev = 0.1; NeuralNet net(layer_specifier, DATA_FLOAT, batch_size, TENSOR_NCHW, dropout_seed, softmax_eps, init_std_dev, vdnn_type, vdnn_conv_algo, SGD); int num_epoch = 1000; double learning_rate = 1e-3; double learning_rate_decay = 0.9; Solver solver(&net, (void *)f_train_images, f_train_labels, (void *)f_train_images, f_train_labels, num_epoch, SGD, learning_rate, learning_rate_decay, num_train, num_train); vector<float> loss; vector<float> time; vector<vector<float> > fwd_vdnn_lag, bwd_vdnn_lag; solver.getTrainTime(loss, time, 100, fwd_vdnn_lag, bwd_vdnn_lag); printTimes(time, filename); printvDNNLag(fwd_vdnn_lag, bwd_vdnn_lag, filename); vector<vector<float> > fwd_computation_time, bwd_computation_time; solver.getComputationTime(1, fwd_computation_time, bwd_computation_time); vector<vector<float> > fwd_transfer_time, bwd_transfer_time; solver.getTransferTime(1, fwd_transfer_time, bwd_transfer_time); printComputationTransferTimes(fwd_computation_time, bwd_computation_time, true, filename); printComputationTransferTimes(fwd_transfer_time, bwd_transfer_time, false, filename); } void printTimes(vector<float> &time, string filename) { float mean_time = 0.0; float std_dev = 0.0; int N = time.size(); for (int i = 0; i < N; i++) { mean_time += time[i]; } mean_time /= N; for (int i = 0; i < N; i++) { std_dev += pow(time[i] - mean_time, 2); } std_dev /= N; std_dev = pow(std_dev, 0.5); cout << "Average time: " << mean_time << endl; cout << "Standard deviation: " << std_dev << endl; filename.append(".dat"); fstream f; f.open(filename.c_str(), ios_base::out); for (int i = 0; i < N; i++) { f << time[i] << endl; } f << "mean_time: " << mean_time << endl; f << "standard_deviation: " << std_dev << endl; f.close(); filename.append(".bin"); fstream f_bin; f_bin.open(filename.c_str(), ios_base::out); f_bin.write((char *)&N, sizeof(N)); for (int i = 0; i < N; i++) { f_bin.write((char *)&time[i], sizeof(time[i])); } f_bin.close(); } void printvDNNLag(vector<vector<float> > &fwd_vdnn_lag, vector<vector<float> > &bwd_vdnn_lag, string filename) { filename.append("_lag.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_vdnn_lag.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_vdnn_lag[i].size(); j++) { f << "fwd" << j << ": " << fwd_vdnn_lag[i][j] << endl; } for (int j = 0; j < bwd_vdnn_lag[i].size(); j++) { f << "bwd" << j << ": " << bwd_vdnn_lag[i][j] << endl; } f << endl; } f.close(); } void printComputationTransferTimes(vector<vector<float> > &fwd_times, vector<vector<float> >&bwd_times, bool computation, string filename) { if (computation) filename.append("_compute_time.dat"); else filename.append("_transfer_time.dat"); fstream f; f.open(filename.c_str(), ios_base::out); int N = fwd_times.size(); for (int i = 0; i < N; i++) { for (int j = 0; j < fwd_times[i].size(); j++) { f << "fwd" << j << ": " << fwd_times[i][j] << endl; } for (int j = 0; j < bwd_times[i].size(); j++) { f << "bwd" << j << ": " << bwd_times[i][j] << endl; } f << endl; } f.close(); }
7fb99182dda048579196655d2dd417c1bb00f248.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "MDSystem_interface.h" #include "common.h" #include "BoxGeometry.h" #include "MDSystem.h" #include "RandomGenerator.h" #include "Auxiliary.h" #include "NeighborList_interface.h" #include"Statistic.h" #include "Integrator_interface.h" #include "InteractionEngine_interface.h" #include "tmp.h" #include "Reshuffle_interface.h" #include "Displacement_interface.h" #include "AssignRCut.h" #include "Topology.h" #include "SystemBondedInteraction.h" #include "BondInteraction.h" #include "NonBondedInteraction.h" #include "PressureCorrection.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <fftw3.h> // #define NThreadsPerBlockCell 32 // #define NThreadsPerBlockAtom 4 #define NThreadsPerBlockCell 96 #define NThreadsPerBlockAtom 96 #include "DensityProfile.h" int main(int argc, char * argv[]) { IndexType nstep = 10000; IndexType confFeq = 2000; IndexType thermoFeq = 100; ScalorType dt = 0.005; ScalorType rcut = 5.0; ScalorType nlistExten = 0.49; ScalorType refT = 1.10; ScalorType tauT = 1.0; char * filename; IndexType densityProfileSamplingFeq = 40; IndexType rcutAssignFeq = 40; IndexType rcutUpdateFeq = 2000; IndexType rcutNumRefine = 0; double refh = 1.0; double rcmin = 03.0; double rcmax = 10.0; double rcstep = 0.5; double targetPrec = 0.020; if (argc != 4){ printf ("Usage:\n%s conf.gro nstep device\n", argv[0]); return 1; } if (argc != 1){ nstep = atoi(argv[2]); filename = argv[1]; } printf ("# setting device to %d\n", atoi(argv[3])); hipSetDevice (atoi(argv[3])); checkCUDAError ("set device"); MDSystem sys; sys.initConfig(filename); Topology::System sysTop; Topology::Molecule mol; mol.pushAtom (Topology::Atom (1.0, 0.0, 0)); LennardJones6_12Parameter ljparam; ljparam.reinit (1.f, 1.f, 0.f, rcmax); sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam)); sysTop.addMolecules (mol, sys.hdata.numAtom); sys.initTopology (sysTop); sys.initDeviceData (); DensityProfile_PiecewiseConst dp; printf ("# init DensityProfile_PiecewiseConst\n"); dp.reinit (sys.box.size.x, sys.box.size.y, sys.box.size.z, refh); AdaptRCut arc; printf ("# init AdaptRCut\n"); arc.reinit (rcmin, rcmax, rcstep, dp); AssignRCut assign_rcut; printf ("# init AssignRCut\n"); assign_rcut.reinit (sys, arc, NThreadsPerBlockAtom); assign_rcut.uniform (rcut); assign_rcut.print_x ("rcut.x.out"); assign_rcut.assign (sys); PressureCorrection pc (arc, dp); ScalorType pcxx, pcyy, pczz; pcxx = pcyy = pczz = 0.; SystemNonBondedInteraction sysNbInter; sysNbInter.reinit (sysTop); ScalorType energyCorr = sysNbInter.energyCorrection (); ScalorType pressureCorr = sysNbInter.pressureCorrection (); ScalorType rlist = rcmax + nlistExten; CellList clist (sys, rlist, NThreadsPerBlockCell, NThreadsPerBlockAtom); CellList clist_resh (sys, rcmin, NThreadsPerBlockCell, NThreadsPerBlockAtom); NeighborList nlist (sysNbInter, sys, rlist, nlistExten, NThreadsPerBlockAtom, 4.f); sys.normalizeDeviceData (); clist.rebuild (sys, NULL); clist_resh.rebuild (sys, NULL); nlist.rebuild (sys, clist, NULL); Displacement_max disp (sys, NThreadsPerBlockAtom); disp.recordCoord (sys); MDStatistic st(sys); TranslationalFreedomRemover tfremover (sys, NThreadsPerBlockAtom); InteractionEngine inter (sys, NThreadsPerBlockAtom); inter.registNonBondedInteraction (sysNbInter); MDTimer timer; unsigned i; ScalorType seed = 1; RandomGenerator_MT19937::init_genrand (seed); VelocityVerlet inte_vv (sys, NThreadsPerBlockAtom); VelocityRescale inte_vr (sys, NThreadsPerBlockAtom, refT, 0.1); NoseHoover_Chains2 nhc; nhc.reinit (sys, NThreadsPerBlockAtom, refT, tauT); Reshuffle resh (sys); timer.tic(mdTimeTotal); if (resh.calIndexTable (clist_resh, &timer)){ sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); } printf ("# prepare ok, start to run\n"); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataGro ("confstart.gro", 0, 0.f, &timer); dp.init_write ("density.dtj"); assign_rcut.init_write ("rcut.rtj"); printf ("# prepare ok, start to run\n"); printf ("#* 1 2 3 4 5 6 7 8 9 10 11 12 13 14\n"); printf ("#* nstep time nonBondedE kineticE temperature totalE NHC_Hamiltonian pressureXX pressureYY pressureZZ s_tension pcxx pcyy tc\n"); try{ sys.initWriteXtc ("traj.xtc"); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataXtc (0, 0*dt, &timer); for (i = 0; i < nstep; ++i){ if (i%1 == 0){ tfremover.remove (sys, &timer); } nhc.operator_L (0.5 * dt, sys, &timer); inte_vv.step1 (sys, dt, &timer); st.clearDevice(); inter.clearInteraction (sys); ScalorType maxdr = disp.calMaxDisplacemant (sys, &timer); if (maxdr > nlistExten * 0.5){ // printf ("# Rebuild at step %09i ... ", i+1); // fflush(stdout); // rebuild sys.normalizeDeviceData (&timer); disp.recordCoord (sys); clist.rebuild (sys, &timer); clist_resh.rebuild (sys, &timer); nlist.rebuild (sys, clist, &timer); // printf ("done\n"); // fflush(stdout); } inter.applyNonBondedInteraction (sys, nlist, st, NULL, &timer); if ((i) % rcutAssignFeq == 0){ timer.tic (mdTimeAdaptRCut); assign_rcut.assign (sys); timer.toc (mdTimeAdaptRCut); } inte_vv.step2 (sys, dt, &timer); if ((i+1) % thermoFeq == 0){ nhc.operator_L (0.5 * dt, sys, st, &timer); } else { nhc.operator_L (0.5 * dt, sys, &timer); } if ((i+1) % thermoFeq == 0){ timer.tic (mdTimeDataIO); st.updateHost (); ScalorType px = st.pressureXX (sys.box); ScalorType py = st.pressureYY (sys.box); ScalorType pz = st.pressureZZ (sys.box); printf ("%09d %05e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.2e\n", (i+1), (i+1) * dt, st.nonBondedEnergy(), st.kineticEnergy(), st.kineticEnergy() * 2. / 3. / (double (sys.hdata.numAtom) - 3.), st.nonBondedEnergy() + st.kineticEnergy(), st.nonBondedEnergy() + st.kineticEnergy() + nhc.HamiltonianContribution (), px, py, pz, (px - (py + pz) * 0.5) * sys.box.size.x * 0.5, pcxx, pcyy, (pcxx - (pcyy + pczz) * 0.5) * sys.box.size.x * 0.5, double (nlist.calSumNeighbor ()) ); fflush(stdout); timer.toc (mdTimeDataIO); } if ((i+1) % densityProfileSamplingFeq == 0) { timer.tic (mdTimeDensityProfile); sys.updateHostFromDevice (NULL); dp.deposite (sys.hdata.coord, sys.hdata.numAtom); timer.toc (mdTimeDensityProfile); } if ((i+1) % rcutUpdateFeq == 0) { // printf ("# update rcut\n"); timer.tic (mdTimeDensityProfile); dp.calculate (); dp.print_x ("density.x.out"); timer.toc (mdTimeDensityProfile); timer.tic (mdTimeAdaptRCut); arc.calError (dp); arc.calRCut (targetPrec); for (IndexType jj = 0; jj < rcutNumRefine; ++jj){ arc.refineRCut (); } arc.print_x ("error.x.out"); assign_rcut.getRCut (arc); assign_rcut.print_x ("rcut.x.out"); pc.correction (arc, dp); pcxx = pc.pxx; pcyy = pc.pyy; pczz = pc.pzz; timer.toc (mdTimeAdaptRCut); if (i != nstep - 1) dp.clearData (); } if ((i+1) % confFeq == 0){ // printf ("write conf\n"); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataXtc (i+1, (i+1)*dt, &timer); dp.write ((i+1) * dt); assign_rcut.write ((i+1) * dt); } if ((i+1) % 100 == 0){ if (resh.calIndexTable (clist_resh, &timer)){ sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); } } } sys.endWriteXtc(); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataGro ("confout.gro", nstep, nstep*dt, &timer); timer.toc(mdTimeTotal); timer.printRecord (stderr); } catch (MDExcptCuda & e){ // resh.recoverMDDataToHost (sys, &timer); // sys.writeHostDataXtc (i+1, (i+1)*dt, &timer); timer.toc(mdTimeTotal); timer.printRecord (stderr); return 1; } catch (MDException &e){ fprintf (stderr, "%s\n", e.what()); return 1; } dp.end_write(); assign_rcut.end_write(); dp.save ("density.save"); arc.save_rc ("rcut.save"); arc.print_error_avg (dp, "a.error.x.out"); arc.print_rc_avg ("a.rcut.x.out"); return 0; }
7fb99182dda048579196655d2dd417c1bb00f248.cu
#include <stdio.h> #include "MDSystem_interface.h" #include "common.h" #include "BoxGeometry.h" #include "MDSystem.h" #include "RandomGenerator.h" #include "Auxiliary.h" #include "NeighborList_interface.h" #include"Statistic.h" #include "Integrator_interface.h" #include "InteractionEngine_interface.h" #include "tmp.h" #include "Reshuffle_interface.h" #include "Displacement_interface.h" #include "AssignRCut.h" #include "Topology.h" #include "SystemBondedInteraction.h" #include "BondInteraction.h" #include "NonBondedInteraction.h" #include "PressureCorrection.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <fftw3.h> // #define NThreadsPerBlockCell 32 // #define NThreadsPerBlockAtom 4 #define NThreadsPerBlockCell 96 #define NThreadsPerBlockAtom 96 #include "DensityProfile.h" int main(int argc, char * argv[]) { IndexType nstep = 10000; IndexType confFeq = 2000; IndexType thermoFeq = 100; ScalorType dt = 0.005; ScalorType rcut = 5.0; ScalorType nlistExten = 0.49; ScalorType refT = 1.10; ScalorType tauT = 1.0; char * filename; IndexType densityProfileSamplingFeq = 40; IndexType rcutAssignFeq = 40; IndexType rcutUpdateFeq = 2000; IndexType rcutNumRefine = 0; double refh = 1.0; double rcmin = 03.0; double rcmax = 10.0; double rcstep = 0.5; double targetPrec = 0.020; if (argc != 4){ printf ("Usage:\n%s conf.gro nstep device\n", argv[0]); return 1; } if (argc != 1){ nstep = atoi(argv[2]); filename = argv[1]; } printf ("# setting device to %d\n", atoi(argv[3])); cudaSetDevice (atoi(argv[3])); checkCUDAError ("set device"); MDSystem sys; sys.initConfig(filename); Topology::System sysTop; Topology::Molecule mol; mol.pushAtom (Topology::Atom (1.0, 0.0, 0)); LennardJones6_12Parameter ljparam; ljparam.reinit (1.f, 1.f, 0.f, rcmax); sysTop.addNonBondedInteraction (Topology::NonBondedInteraction(0, 0, ljparam)); sysTop.addMolecules (mol, sys.hdata.numAtom); sys.initTopology (sysTop); sys.initDeviceData (); DensityProfile_PiecewiseConst dp; printf ("# init DensityProfile_PiecewiseConst\n"); dp.reinit (sys.box.size.x, sys.box.size.y, sys.box.size.z, refh); AdaptRCut arc; printf ("# init AdaptRCut\n"); arc.reinit (rcmin, rcmax, rcstep, dp); AssignRCut assign_rcut; printf ("# init AssignRCut\n"); assign_rcut.reinit (sys, arc, NThreadsPerBlockAtom); assign_rcut.uniform (rcut); assign_rcut.print_x ("rcut.x.out"); assign_rcut.assign (sys); PressureCorrection pc (arc, dp); ScalorType pcxx, pcyy, pczz; pcxx = pcyy = pczz = 0.; SystemNonBondedInteraction sysNbInter; sysNbInter.reinit (sysTop); ScalorType energyCorr = sysNbInter.energyCorrection (); ScalorType pressureCorr = sysNbInter.pressureCorrection (); ScalorType rlist = rcmax + nlistExten; CellList clist (sys, rlist, NThreadsPerBlockCell, NThreadsPerBlockAtom); CellList clist_resh (sys, rcmin, NThreadsPerBlockCell, NThreadsPerBlockAtom); NeighborList nlist (sysNbInter, sys, rlist, nlistExten, NThreadsPerBlockAtom, 4.f); sys.normalizeDeviceData (); clist.rebuild (sys, NULL); clist_resh.rebuild (sys, NULL); nlist.rebuild (sys, clist, NULL); Displacement_max disp (sys, NThreadsPerBlockAtom); disp.recordCoord (sys); MDStatistic st(sys); TranslationalFreedomRemover tfremover (sys, NThreadsPerBlockAtom); InteractionEngine inter (sys, NThreadsPerBlockAtom); inter.registNonBondedInteraction (sysNbInter); MDTimer timer; unsigned i; ScalorType seed = 1; RandomGenerator_MT19937::init_genrand (seed); VelocityVerlet inte_vv (sys, NThreadsPerBlockAtom); VelocityRescale inte_vr (sys, NThreadsPerBlockAtom, refT, 0.1); NoseHoover_Chains2 nhc; nhc.reinit (sys, NThreadsPerBlockAtom, refT, tauT); Reshuffle resh (sys); timer.tic(mdTimeTotal); if (resh.calIndexTable (clist_resh, &timer)){ sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); } printf ("# prepare ok, start to run\n"); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataGro ("confstart.gro", 0, 0.f, &timer); dp.init_write ("density.dtj"); assign_rcut.init_write ("rcut.rtj"); printf ("# prepare ok, start to run\n"); printf ("#* 1 2 3 4 5 6 7 8 9 10 11 12 13 14\n"); printf ("#* nstep time nonBondedE kineticE temperature totalE NHC_Hamiltonian pressureXX pressureYY pressureZZ s_tension pcxx pcyy tc\n"); try{ sys.initWriteXtc ("traj.xtc"); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataXtc (0, 0*dt, &timer); for (i = 0; i < nstep; ++i){ if (i%1 == 0){ tfremover.remove (sys, &timer); } nhc.operator_L (0.5 * dt, sys, &timer); inte_vv.step1 (sys, dt, &timer); st.clearDevice(); inter.clearInteraction (sys); ScalorType maxdr = disp.calMaxDisplacemant (sys, &timer); if (maxdr > nlistExten * 0.5){ // printf ("# Rebuild at step %09i ... ", i+1); // fflush(stdout); // rebuild sys.normalizeDeviceData (&timer); disp.recordCoord (sys); clist.rebuild (sys, &timer); clist_resh.rebuild (sys, &timer); nlist.rebuild (sys, clist, &timer); // printf ("done\n"); // fflush(stdout); } inter.applyNonBondedInteraction (sys, nlist, st, NULL, &timer); if ((i) % rcutAssignFeq == 0){ timer.tic (mdTimeAdaptRCut); assign_rcut.assign (sys); timer.toc (mdTimeAdaptRCut); } inte_vv.step2 (sys, dt, &timer); if ((i+1) % thermoFeq == 0){ nhc.operator_L (0.5 * dt, sys, st, &timer); } else { nhc.operator_L (0.5 * dt, sys, &timer); } if ((i+1) % thermoFeq == 0){ timer.tic (mdTimeDataIO); st.updateHost (); ScalorType px = st.pressureXX (sys.box); ScalorType py = st.pressureYY (sys.box); ScalorType pz = st.pressureZZ (sys.box); printf ("%09d %05e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.5e %.2e\n", (i+1), (i+1) * dt, st.nonBondedEnergy(), st.kineticEnergy(), st.kineticEnergy() * 2. / 3. / (double (sys.hdata.numAtom) - 3.), st.nonBondedEnergy() + st.kineticEnergy(), st.nonBondedEnergy() + st.kineticEnergy() + nhc.HamiltonianContribution (), px, py, pz, (px - (py + pz) * 0.5) * sys.box.size.x * 0.5, pcxx, pcyy, (pcxx - (pcyy + pczz) * 0.5) * sys.box.size.x * 0.5, double (nlist.calSumNeighbor ()) ); fflush(stdout); timer.toc (mdTimeDataIO); } if ((i+1) % densityProfileSamplingFeq == 0) { timer.tic (mdTimeDensityProfile); sys.updateHostFromDevice (NULL); dp.deposite (sys.hdata.coord, sys.hdata.numAtom); timer.toc (mdTimeDensityProfile); } if ((i+1) % rcutUpdateFeq == 0) { // printf ("# update rcut\n"); timer.tic (mdTimeDensityProfile); dp.calculate (); dp.print_x ("density.x.out"); timer.toc (mdTimeDensityProfile); timer.tic (mdTimeAdaptRCut); arc.calError (dp); arc.calRCut (targetPrec); for (IndexType jj = 0; jj < rcutNumRefine; ++jj){ arc.refineRCut (); } arc.print_x ("error.x.out"); assign_rcut.getRCut (arc); assign_rcut.print_x ("rcut.x.out"); pc.correction (arc, dp); pcxx = pc.pxx; pcyy = pc.pyy; pczz = pc.pzz; timer.toc (mdTimeAdaptRCut); if (i != nstep - 1) dp.clearData (); } if ((i+1) % confFeq == 0){ // printf ("write conf\n"); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataXtc (i+1, (i+1)*dt, &timer); dp.write ((i+1) * dt); assign_rcut.write ((i+1) * dt); } if ((i+1) % 100 == 0){ if (resh.calIndexTable (clist_resh, &timer)){ sys.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); clist_resh.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); nlist.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); disp.reshuffle (resh.indexTable, sys.hdata.numAtom, &timer); } } } sys.endWriteXtc(); sys.recoverDeviceData (&timer); sys.updateHostFromRecovered (&timer); sys.writeHostDataGro ("confout.gro", nstep, nstep*dt, &timer); timer.toc(mdTimeTotal); timer.printRecord (stderr); } catch (MDExcptCuda & e){ // resh.recoverMDDataToHost (sys, &timer); // sys.writeHostDataXtc (i+1, (i+1)*dt, &timer); timer.toc(mdTimeTotal); timer.printRecord (stderr); return 1; } catch (MDException &e){ fprintf (stderr, "%s\n", e.what()); return 1; } dp.end_write(); assign_rcut.end_write(); dp.save ("density.save"); arc.save_rc ("rcut.save"); arc.print_error_avg (dp, "a.error.x.out"); arc.print_rc_avg ("a.rcut.x.out"); return 0; }
826fe7bee2d2a77e350c308627d66469459d2823.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> //#include <hip/hip_cooperative_groups.h> #include <math.h> #include <string.h> #include <sstream> #include <fstream> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; //using namespace cooperative_groups; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 256 #define H 15 #define W 15 #define R 3 #define S 3 #define M 384 #define E 13 #define F 13 #define U 1 __global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt) { //printf("gpu2 started\n"); float red_sum = 0; int row = threadIdx.y; int col = threadIdx.x; for(int i=0; i<num_ch; i++) { red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ; } d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum; } __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) {//printf("gpu started\n"); __shared__ float s_w[R*S]; __shared__ float s_i[H*W]; int row = threadIdx.y; int col = threadIdx.x; if(row*width+col<R*S) { s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)]; } //__syncthreads();//printf("wt done\n"); //if(row*width+col<(H*W+1)/2) { int s_i_idx = row*blockDim.x+col; s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx]; //s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169]; //s_i[s_i_idx+338] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+338]; //s_i[s_i_idx+507] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+507]; if(s_i_idx+169 < H*W) s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169]; } __syncthreads(); //printf("ip_done\n"); float prod = 0; if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width { for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ float ip = s_i[(stride*row+i)*ip_height+(stride*col+j)]; prod += ip*s_w[i*wt_width+j]; __syncthreads(); } } if(prod>=0) d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod; if(row*width+col<R*S){ s_w[(row*width+col)] = 0; __syncthreads(); } } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod>=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; float* d_r; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; //WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); if ((c==0) || (d==0) || (c==14) || (d==14)) IP[n*C*H*W+k*H*W+c*W+d] = 0; else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); } } } } if(hipSuccess != hipMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float))) { printf("error in d_i malloc\n"); } hipMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), hipMemcpyHostToDevice); if(hipSuccess != hipMalloc((void**) &d_w, M*C*R*S*sizeof(float))) { printf("error in d_w malloc\n"); } hipMemcpy(d_w, WT, M*C*R*S*sizeof(float), hipMemcpyHostToDevice); if(hipSuccess != hipMalloc((void**) &d_o,C*batch_size*M*E*F*sizeof(float))) { printf("error in d_o malloc\n"); } if(hipSuccess != hipMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float))) { printf("error in d_r malloc\n"); } //cpu_start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); printf("cpu done\n"); //cpu_end = clock(); dim3 dimGrid(batch_size,384,256); dim3 dimBlock(13,13,1); dim3 dimGridRed(batch_size,384,1); dim3 dimBlockRed(13,13,1); //int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384; //gpu_start = clock();hipLaunchKernelGGL(( ew_gpu_mmul), dim3(dimGrid), dim3(dimBlock), 0, 0, d_o,d_i,d_w,13,13,1,15,3,384,batch_size,256); hipDeviceSynchronize();hipLaunchKernelGGL(( red_ch), dim3(dimGridRed), dim3(dimBlockRed), 0, 0, d_r,d_o,256,batch_size,384); //gpu_end = clock(); //void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch }; //hipLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL); //hipDeviceSynchronize(); hipMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), hipMemcpyDeviceToHost); /**print outputs**/ //int e,f,g,h; int g,h,s,u; float max_error = 0; string filename = "layer_3_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; //for (t=0;t<C;t++){ for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error is %f\n", max_error); //} //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; hipFree(d_o); hipFree(d_i); hipFree(d_w); hipFree(d_r); free(OPG); free(IP); free(WT); free(OP); return 0; }
826fe7bee2d2a77e350c308627d66469459d2823.cu
#include <stdio.h> #include <iostream> //#include <cooperative_groups.h> #include <math.h> #include <string.h> #include <sstream> #include <fstream> //#include <bits/stdc++.h> //#include <stdlib.h> //#include <time.h> using namespace std; //using namespace cooperative_groups; /***DEFINING THE DEFINES FOR THE ARRAY INDICES****************************/ //#define N 128 #define C 256 #define H 15 #define W 15 #define R 3 #define S 3 #define M 384 #define E 13 #define F 13 #define U 1 __global__ void red_ch(float* d_r, float* d_o, int num_ch, int num_img, int num_wt) { //printf("gpu2 started\n"); float red_sum = 0; int row = threadIdx.y; int col = threadIdx.x; for(int i=0; i<num_ch; i++) { red_sum += d_o[i*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] ; } d_r[blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = red_sum; } __global__ void ew_gpu_mmul(float* d_o, float* d_i, float* d_w, int width, int height, int stride, int ip_height, int wt_width, int num_wt,int num_img, int num_ch) {//printf("gpu started\n"); __shared__ float s_w[R*S]; __shared__ float s_i[H*W]; int row = threadIdx.y; int col = threadIdx.x; if(row*width+col<R*S) { s_w[row*width+col] = d_w[blockIdx.y*num_ch*wt_width*wt_width+blockIdx.z*wt_width*wt_width+(row*width+col)]; } //__syncthreads();//printf("wt done\n"); //if(row*width+col<(H*W+1)/2) { int s_i_idx = row*blockDim.x+col; s_i[s_i_idx] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx]; //s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169]; //s_i[s_i_idx+338] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+338]; //s_i[s_i_idx+507] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+507]; if(s_i_idx+169 < H*W) s_i[s_i_idx+169] = d_i[blockIdx.x*num_ch*ip_height*ip_height+blockIdx.z*ip_height*ip_height+s_i_idx+169]; } __syncthreads(); //printf("ip_done\n"); float prod = 0; if((row<height) && (col<width))//earlier it was num_wt*height & num_img*width { for (int i=0; i<wt_width; i++){ for (int j=0; j<wt_width; j++){ float ip = s_i[(stride*row+i)*ip_height+(stride*col+j)]; prod += ip*s_w[i*wt_width+j]; __syncthreads(); } } if(prod>=0) d_o[blockIdx.z*(num_wt*num_img*blockDim.x*blockDim.y)+blockIdx.x*num_wt*blockDim.x*blockDim.y+blockIdx.y*blockDim.x*blockDim.y+row*blockDim.x+col] = prod; if(row*width+col<R*S){ s_w[(row*width+col)] = 0; __syncthreads(); } } } void element_wise_mmul(float* output, float* input, float* weight, int batch_size) { int x,y,i,j,m,n,k; for(n=0; n<batch_size; n++){ for (m=0 ; m<M; m++){ for (x=0; x<F; x++){ for(y=0; y<E; y++){ // OP[x][y] = 0; // adding bias to output for (i=0; i<R; i++){ for (j=0; j<S; j++){ for(k=0; k<C; k++){ float ip = input[n*C*H*W+k*H*W+(U*x+i)*H+(U*y+j)]; float wt = weight[m*C*R*S+k*R*S+i*S+j]; float prod = ip*wt; if(prod>=0) output[n*E*F*M+m*E*F+x*E+y] += prod; //OP[x][y] += IP[U*x+i][U*y+j]*WT[i][j]; }} } } } } } } int main(int argc, char* argv[]) { int batch_size = atoi(argv[1]); /*************INITALIZING MATRICES*********************************/ float *IP = (float*) malloc(batch_size*C*H*W*sizeof(float)); //float IP[H][W]; float *OP = (float*) malloc(batch_size*M*F*E*sizeof(float)); //float OP[F][E]; float *OPG = (float*) malloc(batch_size*M*F*E*sizeof(float)); float *WT = (float*) malloc(M*C*R*S*sizeof(float)); //float WT[R][S]; float* d_o; float* d_i; float* d_w; float* d_r; //clock_t cpu_start, gpu_start, cpu_end, gpu_end; //int a,b,c,d; int c,d,m,n,k; /*INITIALIZING WEIGHT MATRIX*/ for (m=0; m<M; m++){ for(k=0;k<C;k++){ for (c=0; c<R; c++){ for(d=0; d<S; d++){ //WT[c][d] = 2.0; //WT[m*C*R*S+k*R*S+c*S+d] = (int)k+1; WT[m*C*R*S+k*R*S+c*S+d] = (float)rand()/(float)(RAND_MAX+1.0); } } } } /*INITIALIZING OUTPUT MATRIX*/ for (n=0; n<batch_size;n++){ for (m=0; m<M; m++){ for (c=0; c<F; c++){ for(d=0; d<E; d++){ //OP[c][d] = 0; OP[n*M*F*E+m*F*E+c*E+d] = 0; } } } } /*INITIALIZING INPUT MATRIX*/ for (n=0; n<batch_size; n++){ for(k=0;k<C;k++){ for (c=0; c<H; c++){ for(d=0; d<W; d++){ // IP[c][d] = (a+b+c+d); if ((c==0) || (d==0) || (c==14) || (d==14)) IP[n*C*H*W+k*H*W+c*W+d] = 0; else IP[n*C*H*W+k*H*W+c*W+d] = (float)rand()/(RAND_MAX+1.0); } } } } if(cudaSuccess != cudaMalloc((void**) &d_i,batch_size*C*H*W*sizeof(float))) { printf("error in d_i malloc\n"); } cudaMemcpy(d_i, IP, batch_size*C*H*W*sizeof(float), cudaMemcpyHostToDevice); if(cudaSuccess != cudaMalloc((void**) &d_w, M*C*R*S*sizeof(float))) { printf("error in d_w malloc\n"); } cudaMemcpy(d_w, WT, M*C*R*S*sizeof(float), cudaMemcpyHostToDevice); if(cudaSuccess != cudaMalloc((void**) &d_o,C*batch_size*M*E*F*sizeof(float))) { printf("error in d_o malloc\n"); } if(cudaSuccess != cudaMalloc((void**) &d_r,batch_size*M*E*F*sizeof(float))) { printf("error in d_r malloc\n"); } //cpu_start = clock(); //element_wise_mmul(OP, IP, WT, batch_size); printf("cpu done\n"); //cpu_end = clock(); dim3 dimGrid(batch_size,384,256); dim3 dimBlock(13,13,1); dim3 dimGridRed(batch_size,384,1); dim3 dimBlockRed(13,13,1); //int op_height = 3; int op_width = 3; int stride = 1; int ip_height = 4;int wt_height = 2; int num_wt = 96; int num_img = 1; int num_ch = 384; //gpu_start = clock(); ew_gpu_mmul<<<dimGrid, dimBlock>>>(d_o,d_i,d_w,13,13,1,15,3,384,batch_size,256); cudaDeviceSynchronize(); red_ch<<<dimGridRed, dimBlockRed>>>(d_r,d_o,256,batch_size,384); //gpu_end = clock(); //void *kernelArgs[] = {(void *)&d_o, (void *)&d_i, (void *)&d_w,(void *)&op_height, (void *)&op_width, (void *)&stride, (void *)&ip_height,(void *)&wt_height, (void *)&num_wt, (void *)&num_img, (void *)&num_ch }; //cudaLaunchCooperativeKernel((void*)ew_gpu_mmul,dimGrid,dimBlock,kernelArgs,0,NULL); //cudaDeviceSynchronize(); cudaMemcpy(OPG,d_r,batch_size*M*E*F*sizeof(float), cudaMemcpyDeviceToHost); /**print outputs**/ //int e,f,g,h; int g,h,s,u; float max_error = 0; string filename = "layer_3_"+to_string(batch_size); ifstream fin(filename.c_str()); string line ; //for (t=0;t<C;t++){ for (u=0;u<batch_size;u++){ for (s=0;s<M;s++){ for (g=0; g<F; g++){ for(h=0; h<E; h++){ getline(fin,line); float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-atof(line.c_str())); //float error = abs(OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h]); if(error > max_error) max_error = error; // printf("the output is %f for index %d, %d,%d,%d.\n",OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("diff CPU and GPU is %f for index %d,%d,%d,%d.\n", OPG[u*M*F*E+s*E*F+g*E+h]-OP[u*M*F*E+s*E*F+g*E+h],u,s,g,h); // printf("the output from GPU is %f for index,%d,%d,%d,%d.\n",OPG[u*M*F*E+s*E*F+g*E+h],u,s,g,h); } } } } fin.close(); printf("max error is %f\n", max_error); //} //cout<<"time taken by cpu call is "<<((double)(cpu_end-cpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; //cout<<"time taken by gpu call is "<<((double)(gpu_end-gpu_start))/CLOCKS_PER_SEC<<"secs"<<endl; cudaFree(d_o); cudaFree(d_i); cudaFree(d_w); cudaFree(d_r); free(OPG); free(IP); free(WT); free(OP); return 0; }
ed6d5e2d86ffc706474adb53f5e8fac295dedc36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _GPU_JOIN_KERNEL_CU__ #define _GPU_JOIN_KERNEL_CU__ #include <stdio.h> #include <stdlib.h> #include "../../domain/GpuMetaEvent.h" #include "../../main/GpuProcessor.h" #include "../../domain/GpuProcessorContext.h" #include "../../buffer/GpuStreamEventBuffer.h" #include "../../buffer/GpuWindowEventBuffer.h" #include "../../buffer/GpuRawByteBuffer.h" #include "../../buffer/GpuIntBuffer.h" #include "../../domain/GpuKernelDataTypes.h" #include "../../join/GpuJoinProcessor.h" #include "../../join/GpuJoinKernel.h" #include "../../util/GpuCudaHelper.h" #include "../../join/GpuJoinKernelCore.h" #include "../../filter/GpuFilterProcessor.h" #include "../../util/GpuUtils.h" namespace SiddhiGpu { #define THREADS_PER_BLOCK 128 #define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK #define MY_KERNEL_MIN_BLOCKS 8 // process batch of events in one stream of join processor __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinLeftTriggerAllOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // get in event starting position char * pInEventBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * iEventIdx); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength * 2; char * pResultsInEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pResultsExpiredEventBufferSegment = pResultsInEventBufferSegment + (iOutputSegmentSize / 2); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; GpuEvent * pInEvent = (GpuEvent*) pInEventBuffer; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pInputMetaEvent; mExpressionParam.a_Event[0] = pInEventBuffer; mExpressionParam.a_Meta[1] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } if(pExpiredEventBuffer != NULL) { pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pInputMetaEvent; mExpressionParam.a_Event[0] = pExpiredEventBuffer; mExpressionParam.a_Meta[1] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } //__global__ //void ProcessEventsJoinLeftTriggerCurrentOn( // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // uint64_t _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock, // number of events allocated per block // int _iWorkSize // Number of events in window process by this kernel //) __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinLeftTriggerCurrentOn( JoinKernelParameters * _pParameters, int _iInputNumberOfEvents, // Number of events in input buffer int _iRemainingCount, // Remaining free slots in Window buffer int _iOtherRemainingCount // Remaining free slots in Window buffer of other stream ) { // avoid out of bound threads if(threadIdx.x >= _pParameters->i_EventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; int iWorkerCount = ceil((float)_pParameters->i_OtherWindowLength / _pParameters->i_WorkSize); if((blockIdx.x == (_iInputNumberOfEvents * iWorkerCount) / _pParameters->i_EventsPerBlock) && // last thread block (threadIdx.x >= (_iInputNumberOfEvents * iWorkerCount) % _pParameters->i_EventsPerBlock)) // extra threads { return; } extern __shared__ char p_SharedInputEventBuffer[]; // get assigned event int iGlobalThreadIdx = (blockIdx.x * _pParameters->i_EventsPerBlock) + threadIdx.x; // get in buffer index int iInEventIndex = iGlobalThreadIdx / iWorkerCount; int iWindowStartEventIndex = (iGlobalThreadIdx % iWorkerCount) * _pParameters->i_WorkSize; // get in event starting position // char * pInEventBuffer = _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex); char * pSharedInEventBuffer = p_SharedInputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * (threadIdx.x / iWorkerCount)); if(threadIdx.x % iWorkerCount == 0) { memcpy(pSharedInEventBuffer, _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex), _pParameters->p_InputMetaEvent->i_SizeOfEventInBytes); } __syncthreads(); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) // int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsInEventBufferSegment = _pParameters->p_ResultsBuffer + (iGlobalThreadIdx * _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // + (iWindowStartEventIndex * _pOutputStreamMetaEvent->i_SizeOfEventInBytes); GpuEvent * pInEvent = (GpuEvent*) pSharedInEventBuffer; // memset(pResultsInEventBufferSegment, 0, _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _pParameters->i_OtherWindowLength - _iOtherRemainingCount; if(iWindowStartEventIndex < iOtherWindowFillCount) { int iWindowEndEventIndex = min(iWindowStartEventIndex + _pParameters->i_WorkSize, iOtherWindowFillCount); int iMatchedCount = 0; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; for(int i=iWindowStartEventIndex; i<iWindowEndEventIndex; ++i) { // get other window event char * pOtherWindowEventBuffer = _pParameters->p_OtherEventWindowBuffer + (_pParameters->p_OtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _pParameters->i_WithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pParameters->p_OnCompareFilter; mExpressionParam.a_Meta[0] = _pParameters->p_InputMetaEvent; mExpressionParam.a_Event[0] = pSharedInEventBuffer; mExpressionParam.a_Meta[1] = _pParameters->p_OtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream // pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; #pragma __unroll__ for(int m=0; m < _pParameters->p_OutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pParameters->p_OutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pParameters->p_OutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; } } else { pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < (iWindowEndEventIndex - iWindowStartEventIndex)) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } else { GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultsInEventBufferSegment; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinLeftTriggerExpiredOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsExpiredEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } if(pExpiredEventBuffer != NULL) { pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; // for each events in other window // get all matching event for in event from other window buffer and copy them to output event buffer int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pInputMetaEvent; mExpressionParam.a_Event[0] = pExpiredEventBuffer; mExpressionParam.a_Meta[1] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinRightTriggerAllOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // get in event starting position char * pInEventBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * iEventIdx); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength * 2; char * pResultsInEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pResultsExpiredEventBufferSegment = pResultsInEventBufferSegment + (iOutputSegmentSize / 2); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; GpuEvent * pInEvent = (GpuEvent*) pInEventBuffer; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pInputMetaEvent; mExpressionParam.a_Event[1] = pInEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } if(pExpiredEventBuffer != NULL) { pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pInputMetaEvent; mExpressionParam.a_Event[1] = pExpiredEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } //__global__ //void ProcessEventsJoinRightTriggerCurrentOn( // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // uint64_t _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock, // number of events allocated per block // int _iWorkSize // Number of events in window process by this kernel //) __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinRightTriggerCurrentOn( JoinKernelParameters * _pParameters, int _iInputNumberOfEvents, // Number of events in input buffer int _iRemainingCount, // Remaining free slots in Window buffer int _iOtherRemainingCount // Remaining free slots in Window buffer of other stream ) { // avoid out of bound threads if(threadIdx.x >= _pParameters->i_EventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; int iWorkerCount = ceil((float)_pParameters->i_OtherWindowLength / _pParameters->i_WorkSize); if((blockIdx.x == (_iInputNumberOfEvents * iWorkerCount) / _pParameters->i_EventsPerBlock) && // last thread block (threadIdx.x >= (_iInputNumberOfEvents * iWorkerCount) % _pParameters->i_EventsPerBlock)) // extra threads { return; } extern __shared__ char p_SharedInputEventBuffer[]; // get assigned event int iGlobalThreadIdx = (blockIdx.x * _pParameters->i_EventsPerBlock) + threadIdx.x; // get in buffer index int iInEventIndex = iGlobalThreadIdx / iWorkerCount; int iWindowStartEventIndex = (iGlobalThreadIdx % iWorkerCount) * _pParameters->i_WorkSize; // get in event starting position // char * pInEventBuffer = _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex); char * pSharedInEventBuffer = p_SharedInputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * (threadIdx.x / iWorkerCount)); if(threadIdx.x % iWorkerCount == 0) { memcpy(pSharedInEventBuffer, _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex), _pParameters->p_InputMetaEvent->i_SizeOfEventInBytes); } __syncthreads(); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) // int iOutputSegmentSizePerEvent = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsInEventBufferSegment = _pParameters->p_ResultsBuffer + (iGlobalThreadIdx * _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // + (iWindowStartEventIndex * _pOutputStreamMetaEvent->i_SizeOfEventInBytes); GpuEvent * pInEvent = (GpuEvent*) pSharedInEventBuffer; // memset(pResultsInEventBufferSegment, 0, _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _pParameters->i_OtherWindowLength - _iOtherRemainingCount; if(iWindowStartEventIndex < iOtherWindowFillCount) { int iWindowEndEventIndex = min(iWindowStartEventIndex + _pParameters->i_WorkSize, iOtherWindowFillCount); int iMatchedCount = 0; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; for(int i=iWindowStartEventIndex; i<iWindowEndEventIndex; ++i) { // get other window event char * pOtherWindowEventBuffer = _pParameters->p_OtherEventWindowBuffer + (_pParameters->p_OtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _pParameters->i_WithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pParameters->p_OnCompareFilter; mExpressionParam.a_Meta[0] = _pParameters->p_OtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pParameters->p_InputMetaEvent; mExpressionParam.a_Event[1] = pSharedInEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; #pragma __unroll__ for(int m=0; m < _pParameters->p_OutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pParameters->p_OutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pParameters->p_OutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; } } else { pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < (iWindowEndEventIndex - iWindowStartEventIndex)) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } else { GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultsInEventBufferSegment; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinRightTriggerExpireOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsExpiredEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } if(pExpiredEventBuffer != NULL) { // get all matching event for in event from other window buffer and copy them to output event buffer pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; // for each events in other window int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pInputMetaEvent; mExpressionParam.a_Event[1] = pExpiredEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) JoinSetWindowState( char * _pInputEventBuffer, // original input events buffer int _iNumberOfEvents, // Number of events in input buffer (matched + not matched) char * _pEventWindowBuffer, // Event window buffer int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer int _iMaxEventCount, // used for setting results array int _iSizeOfEvent, // Size of an event int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // get in event starting position char * pInEventBuffer = _pInputEventBuffer + (_iSizeOfEvent * iEventIdx); if(_iNumberOfEvents < _iWindowLength) { int iWindowPositionShift = _iWindowLength - _iNumberOfEvents; if(_iRemainingCount < _iNumberOfEvents) { int iExitEventCount = _iNumberOfEvents - _iRemainingCount; // calculate start and end window buffer positions int iStart = iEventIdx + iWindowPositionShift; int iEnd = iStart; int iPrevToEnd = iEnd; while(iEnd >= 0) { char * pDestinationEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * iEnd); GpuEvent * pDestinationEvent = (GpuEvent*) pDestinationEventBuffer; if(pDestinationEvent->i_Type != GpuEvent::NONE) // there is an event in destination position { iPrevToEnd = iEnd; iEnd -= iExitEventCount; } else { break; } } iEnd = (iEnd < 0 ? iPrevToEnd : iEnd); // work back from end while copying events while(iEnd < iStart) { char * pDestinationEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * iEnd); GpuEvent * pDestinationEvent = (GpuEvent*) pDestinationEventBuffer; char * pSourceEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * (iEnd + iExitEventCount)); memcpy(pDestinationEventBuffer, pSourceEventBuffer, _iSizeOfEvent); pDestinationEvent->i_Type = GpuEvent::EXPIRED; iEnd += iExitEventCount; } // iEnd == iStart if(iStart >= 0) { char * pDestinationEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * iStart); GpuEvent * pDestinationEvent = (GpuEvent*) pDestinationEventBuffer; memcpy(pDestinationEventBuffer, pInEventBuffer, _iSizeOfEvent); pDestinationEvent->i_Type = GpuEvent::EXPIRED; } } else { // just copy event to window iWindowPositionShift -= (_iRemainingCount - _iNumberOfEvents); char * pWindowEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * (iEventIdx + iWindowPositionShift)); memcpy(pWindowEventBuffer, pInEventBuffer, _iSizeOfEvent); GpuEvent * pExpiredEvent = (GpuEvent*) pWindowEventBuffer; pExpiredEvent->i_Type = GpuEvent::EXPIRED; } } else { int iWindowPositionShift = _iNumberOfEvents - _iWindowLength; if(iEventIdx >= iWindowPositionShift) { char * pWindowEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * (iEventIdx - iWindowPositionShift)); memcpy(pWindowEventBuffer, pInEventBuffer, _iSizeOfEvent); GpuEvent * pExpiredEvent = (GpuEvent*) pWindowEventBuffer; pExpiredEvent->i_Type = GpuEvent::EXPIRED; } } } // ====================================================================================================================== GpuJoinKernel::GpuJoinKernel(GpuProcessor * _pProc, GpuProcessorContext * _pLeftContext, GpuProcessorContext * _pRightContext, int _iThreadBlockSize, int _iLeftWindowSize, int _iRightWindowSize, FILE * _fpLeftLog, FILE * _fpRightLog) : GpuKernel(_pProc, _pLeftContext->GetDeviceId(), _iThreadBlockSize, _fpLeftLog), p_LeftContext(_pLeftContext), p_RightContext(_pRightContext), i_LeftInputBufferIndex(0), i_RightInputBufferIndex(0), p_LeftInputEventBuffer(NULL), p_RightInputEventBuffer(NULL), p_LeftWindowEventBuffer(NULL), p_RightWindowEventBuffer(NULL), p_LeftResultEventBuffer(NULL), p_RightResultEventBuffer(NULL), p_DeviceOnCompareFilter(NULL), p_DeviceParametersLeft(NULL), p_DeviceParametersRight(NULL), i_LeftStreamWindowSize(_iLeftWindowSize), i_RightStreamWindowSize(_iRightWindowSize), // i_LeftRemainingCount(_iLeftWindowSize), // i_RightRemainingCount(_iRightWindowSize), i_LeftNumEventPerSegment(0), i_RightNumEventPerSegment(0), b_LeftFirstKernel(true), b_RightFirstKernel(true), b_LeftDeviceSet(false), b_RightDeviceSet(false), i_LeftThreadWorkSize(_iRightWindowSize), i_RightThreadWorkSize(_iLeftWindowSize), i_LeftThreadWorkerCount(0), i_RightThreadWorkerCount(0), i_InitializedStreamCount(0), fp_LeftLog(_fpLeftLog), fp_RightLog(_fpRightLog) { p_JoinProcessor = (GpuJoinProcessor*) _pProc; pthread_mutex_init(&mtx_Lock, NULL); } GpuJoinKernel::~GpuJoinKernel() { fprintf(fp_LeftLog, "[GpuJoinKernel] destroy\n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] destroy\n"); fflush(fp_RightLog); CUDA_CHECK_RETURN(hipFree(p_DeviceOnCompareFilter)); p_DeviceOnCompareFilter = NULL; if(p_DeviceOutputAttributeMapping) { CUDA_CHECK_RETURN(hipFree(p_DeviceOutputAttributeMapping)); p_DeviceOutputAttributeMapping = NULL; } if(p_DeviceParametersLeft) { CUDA_CHECK_RETURN(hipFree(p_DeviceParametersLeft)); p_DeviceParametersLeft = NULL; } if(p_DeviceParametersRight) { CUDA_CHECK_RETURN(hipFree(p_DeviceParametersRight)); p_DeviceParametersRight = NULL; } pthread_mutex_destroy(&mtx_Lock); } bool GpuJoinKernel::Initialize(int _iStreamIndex, GpuMetaEvent * _pMetaEvent, int _iInputEventBufferSize) { if(_iStreamIndex == 0) { fprintf(fp_LeftLog, "[GpuJoinKernel] Initialize : StreamIndex=%d LeftTrigger=%d RightTrigger=%d CurrentOn=%d ExpireOn=%d\n", _iStreamIndex, p_JoinProcessor->GetLeftTrigger(), p_JoinProcessor->GetRightTrigger(), p_JoinProcessor->GetCurrentOn(), p_JoinProcessor->GetExpiredOn()); fflush(fp_LeftLog); // set input event buffer fprintf(fp_LeftLog, "[GpuJoinKernel] Left InpuEventBufferIndex=%d\n", i_LeftInputBufferIndex); fflush(fp_LeftLog); p_LeftInputEventBuffer = (GpuStreamEventBuffer*) p_LeftContext->GetEventBuffer(i_LeftInputBufferIndex); p_LeftInputEventBuffer->Print(); // left event window p_LeftWindowEventBuffer = new GpuWindowEventBuffer("LeftWindowEventBuffer", p_LeftContext->GetDeviceId(), _pMetaEvent, fp_LeftLog); p_LeftWindowEventBuffer->CreateEventBuffer(i_LeftStreamWindowSize); fprintf(fp_LeftLog, "[GpuJoinKernel] Created device left window buffer : Length=%d Size=%d bytes\n", i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_LeftLog); fprintf(fp_LeftLog, "[GpuJoinKernel] initialize left window buffer data \n"); fflush(fp_LeftLog); p_LeftWindowEventBuffer->Print(); p_LeftWindowEventBuffer->ResetHostEventBuffer(0); char * pLeftHostWindowBuffer = p_LeftWindowEventBuffer->GetHostEventBuffer(); char * pCurrentEvent; for(int i=0; i<i_LeftStreamWindowSize; ++i) { pCurrentEvent = pLeftHostWindowBuffer + (_pMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pGpuEvent = (GpuEvent*) pCurrentEvent; pGpuEvent->i_Type = GpuEvent::NONE; } p_LeftWindowEventBuffer->CopyToDevice(false); p_LeftWindowEventBuffer->Sync(0, false); i_InitializedStreamCount++; GpuUtils::PrintThreadInfo("GpuJoinKernel", fp_LeftLog); } else if(_iStreamIndex == 1) { fprintf(fp_RightLog, "[GpuJoinKernel] Initialize : StreamIndex=%d LeftTrigger=%d RightTrigger=%d CurrentOn=%d ExpireOn=%d\n", _iStreamIndex, p_JoinProcessor->GetLeftTrigger(), p_JoinProcessor->GetRightTrigger(), p_JoinProcessor->GetCurrentOn(), p_JoinProcessor->GetExpiredOn()); fflush(fp_RightLog); fprintf(fp_RightLog, "[GpuJoinKernel] Right InpuEventBufferIndex=%d\n", i_RightInputBufferIndex); fflush(fp_RightLog); p_RightInputEventBuffer = (GpuStreamEventBuffer*) p_RightContext->GetEventBuffer(i_RightInputBufferIndex); p_RightInputEventBuffer->Print(); // right event window p_RightWindowEventBuffer = new GpuWindowEventBuffer("RightWindowEventBuffer", p_RightContext->GetDeviceId(), _pMetaEvent, fp_RightLog); p_RightWindowEventBuffer->CreateEventBuffer(i_RightStreamWindowSize); fprintf(fp_RightLog, "[GpuJoinKernel] Created device right window buffer : Length=%d Size=%d bytes\n", i_RightStreamWindowSize, p_RightWindowEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_RightLog); fprintf(fp_RightLog, "[GpuJoinKernel] initialize right window buffer data \n"); fflush(fp_RightLog); p_RightWindowEventBuffer->Print(); p_RightWindowEventBuffer->ResetHostEventBuffer(0); char * pRightHostWindowBuffer = p_RightWindowEventBuffer->GetHostEventBuffer(); char * pCurrentEvent; for(int i=0; i<i_RightStreamWindowSize; ++i) { pCurrentEvent = pRightHostWindowBuffer + (_pMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pGpuEvent = (GpuEvent*) pCurrentEvent; pGpuEvent->i_Type = GpuEvent::NONE; } p_RightWindowEventBuffer->CopyToDevice(false); p_RightWindowEventBuffer->Sync(0, false); i_InitializedStreamCount++; GpuUtils::PrintThreadInfo("GpuJoinKernel", fp_RightLog); } if(i_InitializedStreamCount == 2) { fprintf(fp_LeftLog, "[GpuJoinKernel] StreamId=%d Creating result event buffer\n", _iStreamIndex); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] StreamId=%d Creating result event buffer\n", _iStreamIndex); fflush(fp_RightLog); p_LeftResultEventBuffer = new GpuStreamEventBuffer("JoinLeftResultEventBuffer", p_LeftContext->GetDeviceId(), p_OutputStreamMeta, fp_LeftLog); if(p_JoinProcessor->GetLeftTrigger()) { int iEventCount = 0; if(p_JoinProcessor->GetCurrentOn()) { iEventCount += i_RightStreamWindowSize * p_LeftInputEventBuffer->GetMaxEventCount(); i_LeftNumEventPerSegment = i_RightStreamWindowSize; } if(p_JoinProcessor->GetExpiredOn()) { iEventCount += i_RightStreamWindowSize * p_LeftInputEventBuffer->GetMaxEventCount(); i_LeftNumEventPerSegment += i_RightStreamWindowSize; } p_LeftResultEventBuffer->CreateEventBuffer(iEventCount); fprintf(fp_LeftLog, "[GpuJoinKernel] LeftResultEventBuffer created : Size=%d bytes\n", p_LeftResultEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_LeftLog); } p_LeftResultEventBuffer->Print(); p_RightResultEventBuffer = new GpuStreamEventBuffer("JoinRightResultEventBuffer", p_RightContext->GetDeviceId(), p_OutputStreamMeta, fp_RightLog); if(p_JoinProcessor->GetRightTrigger()) { int iEventCount = 0; if(p_JoinProcessor->GetCurrentOn()) { iEventCount += i_LeftStreamWindowSize * p_RightInputEventBuffer->GetMaxEventCount(); i_RightNumEventPerSegment = i_LeftStreamWindowSize; } if(p_JoinProcessor->GetExpiredOn()) { iEventCount += i_LeftStreamWindowSize * p_RightInputEventBuffer->GetMaxEventCount(); i_RightNumEventPerSegment += i_LeftStreamWindowSize; } p_RightResultEventBuffer->CreateEventBuffer(iEventCount); fprintf(fp_RightLog, "[GpuJoinKernel] RightResultEventBuffer created : Size=%d bytes\n", p_RightResultEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_RightLog); } p_RightResultEventBuffer->Print(); fprintf(fp_LeftLog, "[GpuJoinKernel] Copying OnCompare filter to device \n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] Copying OnCompare filter to device \n"); fflush(fp_RightLog); CUDA_CHECK_RETURN(hipMalloc( (void**) &p_DeviceOnCompareFilter, sizeof(GpuKernelFilter))); GpuKernelFilter * apHostFilters = (GpuKernelFilter *) malloc(sizeof(GpuKernelFilter)); apHostFilters->i_NodeCount = p_JoinProcessor->i_NodeCount; apHostFilters->ap_ExecutorNodes = NULL; CUDA_CHECK_RETURN(hipMalloc( (void**) &apHostFilters->ap_ExecutorNodes, sizeof(ExecutorNode) * p_JoinProcessor->i_NodeCount)); CUDA_CHECK_RETURN(hipMemcpy( apHostFilters->ap_ExecutorNodes, p_JoinProcessor->ap_ExecutorNodes, sizeof(ExecutorNode) * p_JoinProcessor->i_NodeCount, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy( p_DeviceOnCompareFilter, apHostFilters, sizeof(GpuKernelFilter), hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipPeekAtLastError()); CUDA_CHECK_RETURN(hipDeviceSynchronize()); free(apHostFilters); apHostFilters = NULL; // copy Output mappings if(p_HostOutputAttributeMapping) { fprintf(fp_LeftLog, "[GpuJoinKernel] Copying AttributeMappings to device \n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] Copying AttributeMappings to device \n"); fflush(fp_RightLog); fprintf(fp_LeftLog, "[GpuJoinKernel] AttributeMapCount : %d \n", p_HostOutputAttributeMapping->i_MappingCount); fprintf(fp_RightLog, "[GpuJoinKernel] AttributeMapCount : %d \n", p_HostOutputAttributeMapping->i_MappingCount); for(int c=0; c<p_HostOutputAttributeMapping->i_MappingCount; ++c) { fprintf(fp_LeftLog, "[GpuJoinKernel] Map : Form [Stream=%d, Attrib=%d] To [Attrib=%d] \n", p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::STREAM_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::ATTRIBUTE_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].to); fprintf(fp_RightLog, "[GpuJoinKernel] Map : Form [Stream=%d, Attrib=%d] To [Attrib=%d] \n", p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::STREAM_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::ATTRIBUTE_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].to); } CUDA_CHECK_RETURN(hipMalloc( (void**) &p_DeviceOutputAttributeMapping, sizeof(AttributeMappings))); AttributeMappings * pHostMappings = (AttributeMappings*) malloc(sizeof(AttributeMappings)); pHostMappings->i_MappingCount = p_HostOutputAttributeMapping->i_MappingCount; pHostMappings->p_Mappings = NULL; CUDA_CHECK_RETURN(hipMalloc( (void**) &pHostMappings->p_Mappings, sizeof(AttributeMapping) * p_HostOutputAttributeMapping->i_MappingCount)); CUDA_CHECK_RETURN(hipMemcpy( pHostMappings->p_Mappings, p_HostOutputAttributeMapping->p_Mappings, sizeof(AttributeMapping) * p_HostOutputAttributeMapping->i_MappingCount, hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipMemcpy( p_DeviceOutputAttributeMapping, pHostMappings, sizeof(AttributeMappings), hipMemcpyHostToDevice)); CUDA_CHECK_RETURN(hipPeekAtLastError()); CUDA_CHECK_RETURN(hipDeviceSynchronize()); free(pHostMappings); pHostMappings = NULL; } if(p_JoinProcessor->GetThreadWorkSize() != 0) { i_LeftThreadWorkSize = p_JoinProcessor->GetThreadWorkSize(); i_RightThreadWorkSize = p_JoinProcessor->GetThreadWorkSize(); } if(i_LeftThreadWorkSize >= i_RightStreamWindowSize) { i_LeftThreadWorkSize = i_RightStreamWindowSize; } if(i_RightThreadWorkSize >= i_LeftStreamWindowSize) { i_RightThreadWorkSize = i_LeftStreamWindowSize; } i_LeftThreadWorkerCount = ceil((float)i_RightStreamWindowSize / i_LeftThreadWorkSize); i_RightThreadWorkerCount = ceil((float)i_LeftStreamWindowSize / i_RightThreadWorkSize); fprintf(fp_LeftLog, "[GpuJoinKernel] LeftThreadWorkSize=%d RightThreadWorkSize=%d\n", i_LeftThreadWorkSize, i_RightThreadWorkSize); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] LeftThreadWorkSize=%d RightThreadWorkSize=%d\n", i_LeftThreadWorkSize, i_RightThreadWorkSize); fflush(fp_RightLog); fprintf(fp_LeftLog, "[GpuJoinKernel] LeftThreadWorkCount=%d RightThreadWorkCount=%d\n", i_LeftThreadWorkerCount, i_RightThreadWorkerCount); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] LeftThreadWorkCount=%d RightThreadWorkCount=%d\n", i_LeftThreadWorkerCount, i_RightThreadWorkerCount); fflush(fp_RightLog); CUDA_CHECK_RETURN(hipMalloc((void**) &p_DeviceParametersLeft, sizeof(JoinKernelParameters))); JoinKernelParameters * pHostParameters = (JoinKernelParameters*) malloc(sizeof(JoinKernelParameters)); pHostParameters->p_InputEventBuffer = p_LeftInputEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_InputMetaEvent = p_LeftInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_EventWindowBuffer = p_LeftWindowEventBuffer->GetDeviceEventBuffer(); pHostParameters->i_WindowLength = i_LeftStreamWindowSize; pHostParameters->p_OtherStreamMetaEvent = p_RightInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_OtherEventWindowBuffer = p_RightWindowEventBuffer->GetReadOnlyDeviceEventBuffer(); pHostParameters->i_OtherWindowLength = i_RightStreamWindowSize; pHostParameters->p_OnCompareFilter = p_DeviceOnCompareFilter; pHostParameters->i_WithInTime = p_JoinProcessor->GetWithInTimeMilliSeconds(); pHostParameters->p_OutputStreamMetaEvent = p_LeftResultEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_ResultsBuffer = p_LeftResultEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_OutputAttribMappings = p_DeviceOutputAttributeMapping; pHostParameters->i_EventsPerBlock = i_ThreadBlockSize; pHostParameters->i_WorkSize = i_LeftThreadWorkSize; CUDA_CHECK_RETURN(hipMemcpy( p_DeviceParametersLeft, pHostParameters, sizeof(JoinKernelParameters), hipMemcpyHostToDevice)); free(pHostParameters); pHostParameters = NULL; CUDA_CHECK_RETURN(hipMalloc((void**) &p_DeviceParametersRight, sizeof(JoinKernelParameters))); pHostParameters = (JoinKernelParameters*) malloc(sizeof(JoinKernelParameters)); pHostParameters->p_InputEventBuffer = p_RightInputEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_InputMetaEvent = p_RightInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_EventWindowBuffer = p_RightWindowEventBuffer->GetDeviceEventBuffer(); pHostParameters->i_WindowLength = i_RightStreamWindowSize; pHostParameters->p_OtherStreamMetaEvent = p_LeftInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_OtherEventWindowBuffer = p_LeftWindowEventBuffer->GetReadOnlyDeviceEventBuffer(); pHostParameters->i_OtherWindowLength = i_LeftStreamWindowSize; pHostParameters->p_OnCompareFilter = p_DeviceOnCompareFilter; pHostParameters->i_WithInTime = p_JoinProcessor->GetWithInTimeMilliSeconds(); pHostParameters->p_OutputStreamMetaEvent = p_RightResultEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_ResultsBuffer = p_RightResultEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_OutputAttribMappings = p_DeviceOutputAttributeMapping; pHostParameters->i_EventsPerBlock = i_ThreadBlockSize; pHostParameters->i_WorkSize = i_RightThreadWorkSize; CUDA_CHECK_RETURN(hipMemcpy( p_DeviceParametersRight, pHostParameters, sizeof(JoinKernelParameters), hipMemcpyHostToDevice)); free(pHostParameters); pHostParameters = NULL; fprintf(fp_LeftLog, "[GpuJoinKernel] Initialization complete\n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] Initialization complete\n"); fflush(fp_RightLog); } return true; } void GpuJoinKernel::Process(int _iStreamIndex, int & _iNumEvents) { if(_iStreamIndex == 0) { ProcessLeftStream(_iStreamIndex, _iNumEvents); } else if(_iStreamIndex == 1) { ProcessRightStream(_iStreamIndex, _iNumEvents); } } void GpuJoinKernel::ProcessLeftStream(int _iStreamIndex, int & _iNumEvents) { #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_LeftLog, "[GpuJoinKernel] ProcessLeftStream : StreamIndex=%d EventCount=%d\n", _iStreamIndex, _iNumEvents); GpuUtils::PrintThreadInfo("GpuJoinKernel::ProcessLeftStream", fp_LeftLog); fflush(fp_LeftLog); #endif if(!b_LeftDeviceSet) { GpuCudaHelper::SelectDevice(i_DeviceId, "GpuJoinKernel::Left", fp_LeftLog); b_LeftDeviceSet = true; } #ifdef KERNEL_TIME sdkStartTimer(&p_StopWatch); #endif if(b_LeftFirstKernel) { p_LeftInputEventBuffer->CopyToDevice(true); } // call entry kernel int numBlocksX = ceil((float)_iNumEvents * i_LeftThreadWorkerCount / (float)i_ThreadBlockSize); int numBlocksY = 1; dim3 numBlocks = dim3(numBlocksX, numBlocksY); dim3 numThreads = dim3(i_ThreadBlockSize, 1); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_LeftLog, "[GpuJoinKernel] ProcessLeftStream : Invoke kernel Blocks(%d,%d) Threads(%d,%d)\n", numBlocksX, numBlocksY, i_ThreadBlockSize, 1); fprintf(fp_LeftLog, "[GpuJoinKernel] ProcessLeftStream : NumEvents=%d LeftWindow=(%d/%d) RightWindow=(%d/%d) WithIn=%llu\n", _iNumEvents, p_LeftWindowEventBuffer->GetRemainingCount(), i_LeftStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), i_RightStreamWindowSize, p_JoinProcessor->GetWithInTimeMilliSeconds()); fflush(fp_LeftLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE GpuUtils::PrintByteBuffer(p_LeftInputEventBuffer->GetHostEventBuffer(), _iNumEvents, p_LeftInputEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftInputBuffer", fp_LeftLog); p_LeftWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_LeftWindowEventBuffer->GetHostEventBuffer(), (i_LeftStreamWindowSize - p_LeftWindowEventBuffer->GetRemainingCount()), p_LeftWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftWindowBuffer", fp_LeftLog); p_RightWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_RightWindowEventBuffer->GetHostEventBuffer(), (i_RightStreamWindowSize - p_RightWindowEventBuffer->GetRemainingCount()), p_RightWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightWindowBuffer", fp_LeftLog); fflush(fp_LeftLog); #endif // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // int _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock // number of events allocated per block if(p_JoinProcessor->GetLeftTrigger()) { if(p_JoinProcessor->GetCurrentOn() && p_JoinProcessor->GetExpiredOn()) { hipLaunchKernelGGL(( ProcessEventsJoinLeftTriggerAllOn), dim3(numBlocks), dim3(numThreads), 0, 0, p_LeftInputEventBuffer->GetDeviceEventBuffer(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_LeftWindowEventBuffer->GetDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_RightInputEventBuffer->GetDeviceMetaEvent(), p_RightWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_LeftResultEventBuffer->GetDeviceMetaEvent(), p_LeftResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } else if(p_JoinProcessor->GetCurrentOn()) { int iSharedSize = (i_ThreadBlockSize * p_LeftInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes / i_LeftThreadWorkerCount); hipLaunchKernelGGL(( ProcessEventsJoinLeftTriggerCurrentOn), dim3(numBlocks), dim3(numThreads), iSharedSize, 0, p_DeviceParametersLeft, _iNumEvents, p_LeftWindowEventBuffer->GetRemainingCount(), p_RightWindowEventBuffer->GetRemainingCount() ); } else if(p_JoinProcessor->GetExpiredOn()) { hipLaunchKernelGGL(( ProcessEventsJoinLeftTriggerExpiredOn), dim3(numBlocks), dim3(numThreads), 0, 0, p_LeftInputEventBuffer->GetDeviceEventBuffer(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_LeftWindowEventBuffer->GetDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_RightInputEventBuffer->GetDeviceMetaEvent(), p_RightWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_LeftResultEventBuffer->GetDeviceMetaEvent(), p_LeftResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } } if(b_LastKernel) { p_LeftResultEventBuffer->CopyToHost(true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_LeftLog, "[GpuJoinKernel] Results copied \n"); fflush(fp_LeftLog); #endif } numBlocksX = ceil((float)_iNumEvents / (float)i_ThreadBlockSize); numBlocks = dim3(numBlocksX, numBlocksY); // we need to synchronize processing of JoinKernel as only one batch of events can be there at a time // pthread_mutex_lock(&mtx_Lock); // char * _pInputEventBuffer, // original input events buffer // int _iNumberOfEvents, // Number of events in input buffer (matched + not matched) // char * _pEventWindowBuffer, // Event window buffer // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // int _iMaxEventCount, // used for setting results array // int _iSizeOfEvent, // Size of an event // int _iEventsPerBlock // number of events allocated per block hipLaunchKernelGGL(( JoinSetWindowState), dim3(numBlocks), dim3(numThreads), 0, 0, p_LeftInputEventBuffer->GetDeviceEventBuffer(), _iNumEvents, p_LeftWindowEventBuffer->GetDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_LeftInputEventBuffer->GetMaxEventCount(), p_LeftInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes, i_ThreadBlockSize ); CUDA_CHECK_RETURN(hipPeekAtLastError()); CUDA_CHECK_RETURN(hipDeviceSynchronize()); p_LeftWindowEventBuffer->Sync(_iNumEvents, true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_LeftLog, "[GpuJoinKernel] Kernel complete \n"); fflush(fp_LeftLog); #endif #ifdef KERNEL_TIME sdkStopTimer(&p_StopWatch); float fElapsed = sdkGetTimerValue(&p_StopWatch); fprintf(fp_LeftLog, "[GpuJoinKernel] Stats : Elapsed=%f ms\n", fElapsed); fflush(fp_LeftLog); lst_ElapsedTimes.push_back(fElapsed); sdkResetTimer(&p_StopWatch); #endif // if(_iNumEvents > i_LeftRemainingCount) // { // i_LeftRemainingCount = 0; // } // else // { // i_LeftRemainingCount -= _iNumEvents; // } // pthread_mutex_unlock(&mtx_Lock); if(!p_JoinProcessor->GetLeftTrigger()) { _iNumEvents = 0; } else { _iNumEvents = _iNumEvents * i_LeftNumEventPerSegment; } #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG GpuUtils::PrintByteBuffer(p_LeftResultEventBuffer->GetHostEventBuffer(), _iNumEvents, p_LeftResultEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftResultEventBuffer", fp_LeftLog); fflush(fp_LeftLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE p_LeftWindowEventBuffer->CopyToHost(true); CUDA_CHECK_RETURN(hipDeviceSynchronize()); GpuUtils::PrintByteBuffer(p_LeftWindowEventBuffer->GetHostEventBuffer(), (i_LeftStreamWindowSize - p_LeftWindowEventBuffer->GetRemainingCount()), p_LeftWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftWindowBuffer", fp_LeftLog); fflush(fp_LeftLog); #endif } void GpuJoinKernel::ProcessRightStream(int _iStreamIndex, int & _iNumEvents) { #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_RightLog, "[GpuJoinKernel] ProcessRightStream : StreamIndex=%d EventCount=%d\n", _iStreamIndex, _iNumEvents); GpuUtils::PrintThreadInfo("GpuJoinKernel::ProcessRightStream", fp_RightLog); fflush(fp_RightLog); #endif if(!b_RightDeviceSet) { GpuCudaHelper::SelectDevice(i_DeviceId, "GpuJoinKernel::Right", fp_RightLog); b_RightDeviceSet = true; } #ifdef KERNEL_TIME sdkStartTimer(&p_StopWatch); #endif if(b_RightFirstKernel) { p_RightInputEventBuffer->CopyToDevice(true); } // call entry kernel int numBlocksX = ceil((float)_iNumEvents * i_RightThreadWorkerCount / (float)i_ThreadBlockSize); int numBlocksY = 1; dim3 numBlocks = dim3(numBlocksX, numBlocksY); dim3 numThreads = dim3(i_ThreadBlockSize, 1); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_RightLog, "[GpuJoinKernel] ProcessRightStream : Invoke kernel Blocks(%d,%d) Threads(%d,%d)\n", numBlocksX, numBlocksY, i_ThreadBlockSize, 1); fprintf(fp_RightLog, "[GpuJoinKernel] ProcessRightStream : NumEvents=%d LeftWindow=(%d/%d) RightWindow=(%d/%d) WithIn=%llu\n", _iNumEvents, p_LeftWindowEventBuffer->GetRemainingCount(), i_LeftStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), i_RightStreamWindowSize, p_JoinProcessor->GetWithInTimeMilliSeconds()); fflush(fp_RightLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE GpuUtils::PrintByteBuffer(p_RightInputEventBuffer->GetHostEventBuffer(), _iNumEvents, p_RightInputEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightInputBuffer", fp_RightLog); p_LeftWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_LeftWindowEventBuffer->GetHostEventBuffer(), (i_LeftStreamWindowSize - p_LeftWindowEventBuffer->GetRemainingCount()), p_LeftWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftWindowBuffer", fp_RightLog); p_RightWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_RightWindowEventBuffer->GetHostEventBuffer(), (i_RightStreamWindowSize - p_RightWindowEventBuffer->GetRemainingCount()), p_RightWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightWindowBuffer", fp_RightLog); fflush(fp_RightLog); #endif // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // int _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock // number of events allocated per block if(p_JoinProcessor->GetRightTrigger()) { if(p_JoinProcessor->GetCurrentOn() && p_JoinProcessor->GetExpiredOn()) { hipLaunchKernelGGL(( ProcessEventsJoinRightTriggerAllOn), dim3(numBlocks), dim3(numThreads), 0, 0, p_RightInputEventBuffer->GetDeviceEventBuffer(), p_RightInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_RightWindowEventBuffer->GetDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), p_LeftWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_RightResultEventBuffer->GetDeviceMetaEvent(), p_RightResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } else if(p_JoinProcessor->GetCurrentOn()) { int iSharedSize = (i_ThreadBlockSize * p_RightInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes / i_RightThreadWorkerCount); hipLaunchKernelGGL(( ProcessEventsJoinRightTriggerCurrentOn), dim3(numBlocks), dim3(numThreads), iSharedSize, 0, p_DeviceParametersRight, _iNumEvents, p_RightWindowEventBuffer->GetRemainingCount(), p_LeftWindowEventBuffer->GetRemainingCount() ); } else if(p_JoinProcessor->GetExpiredOn()) { hipLaunchKernelGGL(( ProcessEventsJoinRightTriggerExpireOn), dim3(numBlocks), dim3(numThreads), 0, 0, p_RightInputEventBuffer->GetDeviceEventBuffer(), p_RightInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_RightWindowEventBuffer->GetDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), p_LeftWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_RightResultEventBuffer->GetDeviceMetaEvent(), p_RightResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } } if(b_LastKernel) { p_RightResultEventBuffer->CopyToHost(true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_RightLog, "[GpuJoinKernel] Results copied \n"); fflush(fp_RightLog); #endif } numBlocksX = ceil((float)_iNumEvents / (float)i_ThreadBlockSize); numBlocks = dim3(numBlocksX, numBlocksY); // we need to synchronize processing of JoinKernel as only one batch of events can be there at a time // pthread_mutex_lock(&mtx_Lock); // char * _pInputEventBuffer, // original input events buffer // int _iNumberOfEvents, // Number of events in input buffer (matched + not matched) // char * _pEventWindowBuffer, // Event window buffer // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // int _iMaxEventCount, // used for setting results array // int _iSizeOfEvent, // Size of an event // int _iEventsPerBlock // number of events allocated per block hipLaunchKernelGGL(( JoinSetWindowState), dim3(numBlocks), dim3(numThreads), 0, 0, p_RightInputEventBuffer->GetDeviceEventBuffer(), _iNumEvents, p_RightWindowEventBuffer->GetDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_RightInputEventBuffer->GetMaxEventCount(), p_RightInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes, i_ThreadBlockSize ); CUDA_CHECK_RETURN(hipPeekAtLastError()); CUDA_CHECK_RETURN(hipDeviceSynchronize()); p_RightWindowEventBuffer->Sync(_iNumEvents, true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_RightLog, "[GpuJoinKernel] Kernel complete \n"); fflush(fp_RightLog); #endif #ifdef KERNEL_TIME sdkStopTimer(&p_StopWatch); float fElapsed = sdkGetTimerValue(&p_StopWatch); fprintf(fp_RightLog, "[GpuJoinKernel] Stats : Elapsed=%f ms\n", fElapsed); fflush(fp_RightLog); lst_ElapsedTimes.push_back(fElapsed); sdkResetTimer(&p_StopWatch); #endif // if(_iNumEvents > i_RightRemainingCount) // { // i_RightRemainingCount = 0; // } // else // { // i_RightRemainingCount -= _iNumEvents; // } // pthread_mutex_unlock(&mtx_Lock); if(!p_JoinProcessor->GetRightTrigger()) { _iNumEvents = 0; } else { _iNumEvents = _iNumEvents * i_RightNumEventPerSegment; } #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG GpuUtils::PrintByteBuffer(p_RightResultEventBuffer->GetHostEventBuffer(), _iNumEvents, p_RightResultEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightResultEventBuffer", fp_RightLog); fflush(fp_RightLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE p_RightWindowEventBuffer->CopyToHost(true); CUDA_CHECK_RETURN(hipDeviceSynchronize()); GpuUtils::PrintByteBuffer(p_RightWindowEventBuffer->GetHostEventBuffer(), (i_RightStreamWindowSize - p_RightWindowEventBuffer->GetRemainingCount()), p_RightWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightWindowBuffer", fp_RightLog); fflush(fp_RightLog); #endif } char * GpuJoinKernel::GetResultEventBuffer() { return NULL; } int GpuJoinKernel::GetResultEventBufferSize() { return 0; } char * GpuJoinKernel::GetLeftResultEventBuffer() { return p_LeftResultEventBuffer->GetHostEventBuffer(); } int GpuJoinKernel::GetLeftResultEventBufferSize() { return p_LeftResultEventBuffer->GetEventBufferSizeInBytes(); } char * GpuJoinKernel::GetRightResultEventBuffer() { return p_RightResultEventBuffer->GetHostEventBuffer(); } int GpuJoinKernel::GetRightResultEventBufferSize() { return p_RightResultEventBuffer->GetEventBufferSizeInBytes(); } } #endif
ed6d5e2d86ffc706474adb53f5e8fac295dedc36.cu
#ifndef _GPU_JOIN_KERNEL_CU__ #define _GPU_JOIN_KERNEL_CU__ #include <stdio.h> #include <stdlib.h> #include "../../domain/GpuMetaEvent.h" #include "../../main/GpuProcessor.h" #include "../../domain/GpuProcessorContext.h" #include "../../buffer/GpuStreamEventBuffer.h" #include "../../buffer/GpuWindowEventBuffer.h" #include "../../buffer/GpuRawByteBuffer.h" #include "../../buffer/GpuIntBuffer.h" #include "../../domain/GpuKernelDataTypes.h" #include "../../join/GpuJoinProcessor.h" #include "../../join/GpuJoinKernel.h" #include "../../util/GpuCudaHelper.h" #include "../../join/GpuJoinKernelCore.h" #include "../../filter/GpuFilterProcessor.h" #include "../../util/GpuUtils.h" namespace SiddhiGpu { #define THREADS_PER_BLOCK 128 #define MY_KERNEL_MAX_THREADS THREADS_PER_BLOCK #define MY_KERNEL_MIN_BLOCKS 8 // process batch of events in one stream of join processor __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinLeftTriggerAllOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // get in event starting position char * pInEventBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * iEventIdx); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength * 2; char * pResultsInEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pResultsExpiredEventBufferSegment = pResultsInEventBufferSegment + (iOutputSegmentSize / 2); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; GpuEvent * pInEvent = (GpuEvent*) pInEventBuffer; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pInputMetaEvent; mExpressionParam.a_Event[0] = pInEventBuffer; mExpressionParam.a_Meta[1] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } if(pExpiredEventBuffer != NULL) { pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pInputMetaEvent; mExpressionParam.a_Event[0] = pExpiredEventBuffer; mExpressionParam.a_Meta[1] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } //__global__ //void ProcessEventsJoinLeftTriggerCurrentOn( // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // uint64_t _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock, // number of events allocated per block // int _iWorkSize // Number of events in window process by this kernel //) __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinLeftTriggerCurrentOn( JoinKernelParameters * _pParameters, int _iInputNumberOfEvents, // Number of events in input buffer int _iRemainingCount, // Remaining free slots in Window buffer int _iOtherRemainingCount // Remaining free slots in Window buffer of other stream ) { // avoid out of bound threads if(threadIdx.x >= _pParameters->i_EventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; int iWorkerCount = ceil((float)_pParameters->i_OtherWindowLength / _pParameters->i_WorkSize); if((blockIdx.x == (_iInputNumberOfEvents * iWorkerCount) / _pParameters->i_EventsPerBlock) && // last thread block (threadIdx.x >= (_iInputNumberOfEvents * iWorkerCount) % _pParameters->i_EventsPerBlock)) // extra threads { return; } extern __shared__ char p_SharedInputEventBuffer[]; // get assigned event int iGlobalThreadIdx = (blockIdx.x * _pParameters->i_EventsPerBlock) + threadIdx.x; // get in buffer index int iInEventIndex = iGlobalThreadIdx / iWorkerCount; int iWindowStartEventIndex = (iGlobalThreadIdx % iWorkerCount) * _pParameters->i_WorkSize; // get in event starting position // char * pInEventBuffer = _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex); char * pSharedInEventBuffer = p_SharedInputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * (threadIdx.x / iWorkerCount)); if(threadIdx.x % iWorkerCount == 0) { memcpy(pSharedInEventBuffer, _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex), _pParameters->p_InputMetaEvent->i_SizeOfEventInBytes); } __syncthreads(); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) // int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsInEventBufferSegment = _pParameters->p_ResultsBuffer + (iGlobalThreadIdx * _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // + (iWindowStartEventIndex * _pOutputStreamMetaEvent->i_SizeOfEventInBytes); GpuEvent * pInEvent = (GpuEvent*) pSharedInEventBuffer; // memset(pResultsInEventBufferSegment, 0, _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _pParameters->i_OtherWindowLength - _iOtherRemainingCount; if(iWindowStartEventIndex < iOtherWindowFillCount) { int iWindowEndEventIndex = min(iWindowStartEventIndex + _pParameters->i_WorkSize, iOtherWindowFillCount); int iMatchedCount = 0; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; for(int i=iWindowStartEventIndex; i<iWindowEndEventIndex; ++i) { // get other window event char * pOtherWindowEventBuffer = _pParameters->p_OtherEventWindowBuffer + (_pParameters->p_OtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _pParameters->i_WithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pParameters->p_OnCompareFilter; mExpressionParam.a_Meta[0] = _pParameters->p_InputMetaEvent; mExpressionParam.a_Event[0] = pSharedInEventBuffer; mExpressionParam.a_Meta[1] = _pParameters->p_OtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream // pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; #pragma __unroll__ for(int m=0; m < _pParameters->p_OutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pParameters->p_OutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pParameters->p_OutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; } } else { pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < (iWindowEndEventIndex - iWindowStartEventIndex)) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } else { GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultsInEventBufferSegment; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinLeftTriggerExpiredOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsExpiredEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } if(pExpiredEventBuffer != NULL) { pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; // for each events in other window // get all matching event for in event from other window buffer and copy them to output event buffer int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pInputMetaEvent; mExpressionParam.a_Event[0] = pExpiredEventBuffer; mExpressionParam.a_Meta[1] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[1] = pOtherWindowEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinRightTriggerAllOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // get in event starting position char * pInEventBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * iEventIdx); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength * 2; char * pResultsInEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pResultsExpiredEventBufferSegment = pResultsInEventBufferSegment + (iOutputSegmentSize / 2); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; GpuEvent * pInEvent = (GpuEvent*) pInEventBuffer; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pInputMetaEvent; mExpressionParam.a_Event[1] = pInEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } if(pExpiredEventBuffer != NULL) { pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pInputMetaEvent; mExpressionParam.a_Event[1] = pExpiredEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } //__global__ //void ProcessEventsJoinRightTriggerCurrentOn( // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // uint64_t _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock, // number of events allocated per block // int _iWorkSize // Number of events in window process by this kernel //) __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinRightTriggerCurrentOn( JoinKernelParameters * _pParameters, int _iInputNumberOfEvents, // Number of events in input buffer int _iRemainingCount, // Remaining free slots in Window buffer int _iOtherRemainingCount // Remaining free slots in Window buffer of other stream ) { // avoid out of bound threads if(threadIdx.x >= _pParameters->i_EventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; int iWorkerCount = ceil((float)_pParameters->i_OtherWindowLength / _pParameters->i_WorkSize); if((blockIdx.x == (_iInputNumberOfEvents * iWorkerCount) / _pParameters->i_EventsPerBlock) && // last thread block (threadIdx.x >= (_iInputNumberOfEvents * iWorkerCount) % _pParameters->i_EventsPerBlock)) // extra threads { return; } extern __shared__ char p_SharedInputEventBuffer[]; // get assigned event int iGlobalThreadIdx = (blockIdx.x * _pParameters->i_EventsPerBlock) + threadIdx.x; // get in buffer index int iInEventIndex = iGlobalThreadIdx / iWorkerCount; int iWindowStartEventIndex = (iGlobalThreadIdx % iWorkerCount) * _pParameters->i_WorkSize; // get in event starting position // char * pInEventBuffer = _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex); char * pSharedInEventBuffer = p_SharedInputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * (threadIdx.x / iWorkerCount)); if(threadIdx.x % iWorkerCount == 0) { memcpy(pSharedInEventBuffer, _pParameters->p_InputEventBuffer + (_pParameters->p_InputMetaEvent->i_SizeOfEventInBytes * iInEventIndex), _pParameters->p_InputMetaEvent->i_SizeOfEventInBytes); } __syncthreads(); // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) // int iOutputSegmentSizePerEvent = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsInEventBufferSegment = _pParameters->p_ResultsBuffer + (iGlobalThreadIdx * _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // + (iWindowStartEventIndex * _pOutputStreamMetaEvent->i_SizeOfEventInBytes); GpuEvent * pInEvent = (GpuEvent*) pSharedInEventBuffer; // memset(pResultsInEventBufferSegment, 0, _pParameters->i_WorkSize * _pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes); // get all matching event for in event from other window buffer and copy them to output event buffer // for each events in other window int iOtherWindowFillCount = _pParameters->i_OtherWindowLength - _iOtherRemainingCount; if(iWindowStartEventIndex < iOtherWindowFillCount) { int iWindowEndEventIndex = min(iWindowStartEventIndex + _pParameters->i_WorkSize, iOtherWindowFillCount); int iMatchedCount = 0; // get buffer position for in event matching results char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; for(int i=iWindowStartEventIndex; i<iWindowEndEventIndex; ++i) { // get other window event char * pOtherWindowEventBuffer = _pParameters->p_OtherEventWindowBuffer + (_pParameters->p_OtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; if(pInEvent->i_Sequence > pOtherWindowEvent->i_Sequence && (pInEvent->i_Timestamp - pOtherWindowEvent->i_Timestamp) <= _pParameters->i_WithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pParameters->p_OnCompareFilter; mExpressionParam.a_Meta[0] = _pParameters->p_OtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pParameters->p_InputMetaEvent; mExpressionParam.a_Event[1] = pSharedInEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultInMatchingEvent->i_Type = GpuEvent::CURRENT; pResultInMatchingEvent->i_Sequence = pInEvent->i_Sequence; pResultInMatchingEvent->i_Timestamp = pInEvent->i_Timestamp; #pragma __unroll__ for(int m=0; m < _pParameters->p_OutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pParameters->p_OutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pParameters->p_OutputAttribMappings->p_Mappings[m].to; memcpy( pResultInMatchingEventBuffer + _pParameters->p_OutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; } } else { pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; // cannot continue, last result event for this segment pResultInMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < (iWindowEndEventIndex - iWindowStartEventIndex)) { char * pResultInMatchingEventBuffer = pResultsInEventBufferSegment + (_pParameters->p_OutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultInMatchingEventBuffer; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } else { GpuEvent * pResultInMatchingEvent = (GpuEvent*) pResultsInEventBufferSegment; pResultInMatchingEvent->i_Type = GpuEvent::RESET; } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) ProcessEventsJoinRightTriggerExpireOn( char * _pInputEventBuffer, // input events buffer GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events int _iInputNumberOfEvents, // Number of events in input buffer char * _pEventWindowBuffer, // Event window buffer of this stream int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream char * _pOtherEventWindowBuffer, // Event window buffer of other stream int _iOtherWindowLength, // Length of current events window of other stream int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization uint64_t _iWithInTime, // WithIn time in milliseconds GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream char * _pResultsBuffer, // Resulting events buffer for this stream AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iInputNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iInputNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // output to results buffer [in event, expired event] // {other stream event size * other window size} * 2 (for in/exp) int iOutputSegmentSize = _pOutputStreamMetaEvent->i_SizeOfEventInBytes * _iOtherWindowLength; char * pResultsExpiredEventBufferSegment = _pResultsBuffer + (iOutputSegmentSize * iEventIdx); char * pExpiredEventBuffer = NULL; GpuEvent * pExpiredEvent = NULL; // calculate in/expired event pair for this event if(iEventIdx >= _iRemainingCount) { if(iEventIdx < _iWindowLength) { // in window buffer char * pExpiredOutEventInWindowBuffer = _pEventWindowBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iRemainingCount)); GpuEvent * pWindowEvent = (GpuEvent*) pExpiredOutEventInWindowBuffer; if(pWindowEvent->i_Type != GpuEvent::NONE) // if window event is filled { pExpiredEventBuffer = pExpiredOutEventInWindowBuffer; } else { // no expiring event } } else { // in input event buffer char * pExpiredOutEventInInputBuffer = _pInputEventBuffer + (_pInputMetaEvent->i_SizeOfEventInBytes * (iEventIdx - _iWindowLength)); pExpiredEventBuffer = pExpiredOutEventInInputBuffer; } } else { // [NULL,inEvent] // no expiring event } if(pExpiredEventBuffer != NULL) { // get all matching event for in event from other window buffer and copy them to output event buffer pExpiredEvent = (GpuEvent*) pExpiredEventBuffer; // for each events in other window int iOtherWindowFillCount = _iOtherWindowLength - _iOtherRemainingCount; int iMatchedCount = 0; // for each events in other window for(int i=0; i<iOtherWindowFillCount; ++i) { // get other window event char * pOtherWindowEventBuffer = _pOtherEventWindowBuffer + (_pOtherStreamMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pOtherWindowEvent = (GpuEvent*) pOtherWindowEventBuffer; // get buffer position for expire event matching results char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; if(pExpiredEvent->i_Sequence < pOtherWindowEvent->i_Sequence && (pOtherWindowEvent->i_Timestamp - pExpiredEvent->i_Timestamp) <= _iWithInTime) { ExpressionEvalParameters mExpressionParam; mExpressionParam.p_OnCompare = _pOnCompareFilter; mExpressionParam.a_Meta[0] = _pOtherStreamMetaEvent; mExpressionParam.a_Event[0] = pOtherWindowEventBuffer; mExpressionParam.a_Meta[1] = _pInputMetaEvent; mExpressionParam.a_Event[1] = pExpiredEventBuffer; mExpressionParam.i_CurrentIndex = 0; bool bOnCompareMatched = Evaluate(mExpressionParam); if(bOnCompareMatched) { // copy output event to buffer - map attributes from input streams to output stream pResultExpireMatchingEvent->i_Type = GpuEvent::EXPIRED; pResultExpireMatchingEvent->i_Sequence = pExpiredEvent->i_Sequence; pResultExpireMatchingEvent->i_Timestamp = pExpiredEvent->i_Timestamp; for(int m=0; m < _pOutputAttribMappings->i_MappingCount; ++m) { int iFromStreamIndex = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::STREAM_INDEX]; int iFromAttrib = _pOutputAttribMappings->p_Mappings[m].from[AttributeMapping::ATTRIBUTE_INDEX]; int iTo = _pOutputAttribMappings->p_Mappings[m].to; memcpy( pResultExpireMatchingEventBuffer + _pOutputStreamMetaEvent->p_Attributes[iTo].i_Position, // to mExpressionParam.a_Event[iFromStreamIndex] + mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Position, // from mExpressionParam.a_Meta[iFromStreamIndex]->p_Attributes[iFromAttrib].i_Length // size ); } iMatchedCount++; } } else { // cannot continue, last result event for this segment pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; break; } } if(iMatchedCount < iOtherWindowFillCount || iOtherWindowFillCount == 0) { char * pResultExpireMatchingEventBuffer = pResultsExpiredEventBufferSegment + (_pOutputStreamMetaEvent->i_SizeOfEventInBytes * iMatchedCount); GpuEvent * pResultExpireMatchingEvent = (GpuEvent*) pResultExpireMatchingEventBuffer; pResultExpireMatchingEvent->i_Type = GpuEvent::RESET; } } } __global__ void __launch_bounds__(MY_KERNEL_MAX_THREADS, MY_KERNEL_MIN_BLOCKS) JoinSetWindowState( char * _pInputEventBuffer, // original input events buffer int _iNumberOfEvents, // Number of events in input buffer (matched + not matched) char * _pEventWindowBuffer, // Event window buffer int _iWindowLength, // Length of current events window int _iRemainingCount, // Remaining free slots in Window buffer int _iMaxEventCount, // used for setting results array int _iSizeOfEvent, // Size of an event int _iEventsPerBlock // number of events allocated per block ) { // avoid out of bound threads if(threadIdx.x >= _iEventsPerBlock || threadIdx.y > 0 || blockIdx.y > 0) return; if((blockIdx.x == _iNumberOfEvents / _iEventsPerBlock) && // last thread block (threadIdx.x >= _iNumberOfEvents % _iEventsPerBlock)) // extra threads { return; } // get assigned event int iEventIdx = (blockIdx.x * _iEventsPerBlock) + threadIdx.x; // get in event starting position char * pInEventBuffer = _pInputEventBuffer + (_iSizeOfEvent * iEventIdx); if(_iNumberOfEvents < _iWindowLength) { int iWindowPositionShift = _iWindowLength - _iNumberOfEvents; if(_iRemainingCount < _iNumberOfEvents) { int iExitEventCount = _iNumberOfEvents - _iRemainingCount; // calculate start and end window buffer positions int iStart = iEventIdx + iWindowPositionShift; int iEnd = iStart; int iPrevToEnd = iEnd; while(iEnd >= 0) { char * pDestinationEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * iEnd); GpuEvent * pDestinationEvent = (GpuEvent*) pDestinationEventBuffer; if(pDestinationEvent->i_Type != GpuEvent::NONE) // there is an event in destination position { iPrevToEnd = iEnd; iEnd -= iExitEventCount; } else { break; } } iEnd = (iEnd < 0 ? iPrevToEnd : iEnd); // work back from end while copying events while(iEnd < iStart) { char * pDestinationEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * iEnd); GpuEvent * pDestinationEvent = (GpuEvent*) pDestinationEventBuffer; char * pSourceEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * (iEnd + iExitEventCount)); memcpy(pDestinationEventBuffer, pSourceEventBuffer, _iSizeOfEvent); pDestinationEvent->i_Type = GpuEvent::EXPIRED; iEnd += iExitEventCount; } // iEnd == iStart if(iStart >= 0) { char * pDestinationEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * iStart); GpuEvent * pDestinationEvent = (GpuEvent*) pDestinationEventBuffer; memcpy(pDestinationEventBuffer, pInEventBuffer, _iSizeOfEvent); pDestinationEvent->i_Type = GpuEvent::EXPIRED; } } else { // just copy event to window iWindowPositionShift -= (_iRemainingCount - _iNumberOfEvents); char * pWindowEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * (iEventIdx + iWindowPositionShift)); memcpy(pWindowEventBuffer, pInEventBuffer, _iSizeOfEvent); GpuEvent * pExpiredEvent = (GpuEvent*) pWindowEventBuffer; pExpiredEvent->i_Type = GpuEvent::EXPIRED; } } else { int iWindowPositionShift = _iNumberOfEvents - _iWindowLength; if(iEventIdx >= iWindowPositionShift) { char * pWindowEventBuffer = _pEventWindowBuffer + (_iSizeOfEvent * (iEventIdx - iWindowPositionShift)); memcpy(pWindowEventBuffer, pInEventBuffer, _iSizeOfEvent); GpuEvent * pExpiredEvent = (GpuEvent*) pWindowEventBuffer; pExpiredEvent->i_Type = GpuEvent::EXPIRED; } } } // ====================================================================================================================== GpuJoinKernel::GpuJoinKernel(GpuProcessor * _pProc, GpuProcessorContext * _pLeftContext, GpuProcessorContext * _pRightContext, int _iThreadBlockSize, int _iLeftWindowSize, int _iRightWindowSize, FILE * _fpLeftLog, FILE * _fpRightLog) : GpuKernel(_pProc, _pLeftContext->GetDeviceId(), _iThreadBlockSize, _fpLeftLog), p_LeftContext(_pLeftContext), p_RightContext(_pRightContext), i_LeftInputBufferIndex(0), i_RightInputBufferIndex(0), p_LeftInputEventBuffer(NULL), p_RightInputEventBuffer(NULL), p_LeftWindowEventBuffer(NULL), p_RightWindowEventBuffer(NULL), p_LeftResultEventBuffer(NULL), p_RightResultEventBuffer(NULL), p_DeviceOnCompareFilter(NULL), p_DeviceParametersLeft(NULL), p_DeviceParametersRight(NULL), i_LeftStreamWindowSize(_iLeftWindowSize), i_RightStreamWindowSize(_iRightWindowSize), // i_LeftRemainingCount(_iLeftWindowSize), // i_RightRemainingCount(_iRightWindowSize), i_LeftNumEventPerSegment(0), i_RightNumEventPerSegment(0), b_LeftFirstKernel(true), b_RightFirstKernel(true), b_LeftDeviceSet(false), b_RightDeviceSet(false), i_LeftThreadWorkSize(_iRightWindowSize), i_RightThreadWorkSize(_iLeftWindowSize), i_LeftThreadWorkerCount(0), i_RightThreadWorkerCount(0), i_InitializedStreamCount(0), fp_LeftLog(_fpLeftLog), fp_RightLog(_fpRightLog) { p_JoinProcessor = (GpuJoinProcessor*) _pProc; pthread_mutex_init(&mtx_Lock, NULL); } GpuJoinKernel::~GpuJoinKernel() { fprintf(fp_LeftLog, "[GpuJoinKernel] destroy\n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] destroy\n"); fflush(fp_RightLog); CUDA_CHECK_RETURN(cudaFree(p_DeviceOnCompareFilter)); p_DeviceOnCompareFilter = NULL; if(p_DeviceOutputAttributeMapping) { CUDA_CHECK_RETURN(cudaFree(p_DeviceOutputAttributeMapping)); p_DeviceOutputAttributeMapping = NULL; } if(p_DeviceParametersLeft) { CUDA_CHECK_RETURN(cudaFree(p_DeviceParametersLeft)); p_DeviceParametersLeft = NULL; } if(p_DeviceParametersRight) { CUDA_CHECK_RETURN(cudaFree(p_DeviceParametersRight)); p_DeviceParametersRight = NULL; } pthread_mutex_destroy(&mtx_Lock); } bool GpuJoinKernel::Initialize(int _iStreamIndex, GpuMetaEvent * _pMetaEvent, int _iInputEventBufferSize) { if(_iStreamIndex == 0) { fprintf(fp_LeftLog, "[GpuJoinKernel] Initialize : StreamIndex=%d LeftTrigger=%d RightTrigger=%d CurrentOn=%d ExpireOn=%d\n", _iStreamIndex, p_JoinProcessor->GetLeftTrigger(), p_JoinProcessor->GetRightTrigger(), p_JoinProcessor->GetCurrentOn(), p_JoinProcessor->GetExpiredOn()); fflush(fp_LeftLog); // set input event buffer fprintf(fp_LeftLog, "[GpuJoinKernel] Left InpuEventBufferIndex=%d\n", i_LeftInputBufferIndex); fflush(fp_LeftLog); p_LeftInputEventBuffer = (GpuStreamEventBuffer*) p_LeftContext->GetEventBuffer(i_LeftInputBufferIndex); p_LeftInputEventBuffer->Print(); // left event window p_LeftWindowEventBuffer = new GpuWindowEventBuffer("LeftWindowEventBuffer", p_LeftContext->GetDeviceId(), _pMetaEvent, fp_LeftLog); p_LeftWindowEventBuffer->CreateEventBuffer(i_LeftStreamWindowSize); fprintf(fp_LeftLog, "[GpuJoinKernel] Created device left window buffer : Length=%d Size=%d bytes\n", i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_LeftLog); fprintf(fp_LeftLog, "[GpuJoinKernel] initialize left window buffer data \n"); fflush(fp_LeftLog); p_LeftWindowEventBuffer->Print(); p_LeftWindowEventBuffer->ResetHostEventBuffer(0); char * pLeftHostWindowBuffer = p_LeftWindowEventBuffer->GetHostEventBuffer(); char * pCurrentEvent; for(int i=0; i<i_LeftStreamWindowSize; ++i) { pCurrentEvent = pLeftHostWindowBuffer + (_pMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pGpuEvent = (GpuEvent*) pCurrentEvent; pGpuEvent->i_Type = GpuEvent::NONE; } p_LeftWindowEventBuffer->CopyToDevice(false); p_LeftWindowEventBuffer->Sync(0, false); i_InitializedStreamCount++; GpuUtils::PrintThreadInfo("GpuJoinKernel", fp_LeftLog); } else if(_iStreamIndex == 1) { fprintf(fp_RightLog, "[GpuJoinKernel] Initialize : StreamIndex=%d LeftTrigger=%d RightTrigger=%d CurrentOn=%d ExpireOn=%d\n", _iStreamIndex, p_JoinProcessor->GetLeftTrigger(), p_JoinProcessor->GetRightTrigger(), p_JoinProcessor->GetCurrentOn(), p_JoinProcessor->GetExpiredOn()); fflush(fp_RightLog); fprintf(fp_RightLog, "[GpuJoinKernel] Right InpuEventBufferIndex=%d\n", i_RightInputBufferIndex); fflush(fp_RightLog); p_RightInputEventBuffer = (GpuStreamEventBuffer*) p_RightContext->GetEventBuffer(i_RightInputBufferIndex); p_RightInputEventBuffer->Print(); // right event window p_RightWindowEventBuffer = new GpuWindowEventBuffer("RightWindowEventBuffer", p_RightContext->GetDeviceId(), _pMetaEvent, fp_RightLog); p_RightWindowEventBuffer->CreateEventBuffer(i_RightStreamWindowSize); fprintf(fp_RightLog, "[GpuJoinKernel] Created device right window buffer : Length=%d Size=%d bytes\n", i_RightStreamWindowSize, p_RightWindowEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_RightLog); fprintf(fp_RightLog, "[GpuJoinKernel] initialize right window buffer data \n"); fflush(fp_RightLog); p_RightWindowEventBuffer->Print(); p_RightWindowEventBuffer->ResetHostEventBuffer(0); char * pRightHostWindowBuffer = p_RightWindowEventBuffer->GetHostEventBuffer(); char * pCurrentEvent; for(int i=0; i<i_RightStreamWindowSize; ++i) { pCurrentEvent = pRightHostWindowBuffer + (_pMetaEvent->i_SizeOfEventInBytes * i); GpuEvent * pGpuEvent = (GpuEvent*) pCurrentEvent; pGpuEvent->i_Type = GpuEvent::NONE; } p_RightWindowEventBuffer->CopyToDevice(false); p_RightWindowEventBuffer->Sync(0, false); i_InitializedStreamCount++; GpuUtils::PrintThreadInfo("GpuJoinKernel", fp_RightLog); } if(i_InitializedStreamCount == 2) { fprintf(fp_LeftLog, "[GpuJoinKernel] StreamId=%d Creating result event buffer\n", _iStreamIndex); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] StreamId=%d Creating result event buffer\n", _iStreamIndex); fflush(fp_RightLog); p_LeftResultEventBuffer = new GpuStreamEventBuffer("JoinLeftResultEventBuffer", p_LeftContext->GetDeviceId(), p_OutputStreamMeta, fp_LeftLog); if(p_JoinProcessor->GetLeftTrigger()) { int iEventCount = 0; if(p_JoinProcessor->GetCurrentOn()) { iEventCount += i_RightStreamWindowSize * p_LeftInputEventBuffer->GetMaxEventCount(); i_LeftNumEventPerSegment = i_RightStreamWindowSize; } if(p_JoinProcessor->GetExpiredOn()) { iEventCount += i_RightStreamWindowSize * p_LeftInputEventBuffer->GetMaxEventCount(); i_LeftNumEventPerSegment += i_RightStreamWindowSize; } p_LeftResultEventBuffer->CreateEventBuffer(iEventCount); fprintf(fp_LeftLog, "[GpuJoinKernel] LeftResultEventBuffer created : Size=%d bytes\n", p_LeftResultEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_LeftLog); } p_LeftResultEventBuffer->Print(); p_RightResultEventBuffer = new GpuStreamEventBuffer("JoinRightResultEventBuffer", p_RightContext->GetDeviceId(), p_OutputStreamMeta, fp_RightLog); if(p_JoinProcessor->GetRightTrigger()) { int iEventCount = 0; if(p_JoinProcessor->GetCurrentOn()) { iEventCount += i_LeftStreamWindowSize * p_RightInputEventBuffer->GetMaxEventCount(); i_RightNumEventPerSegment = i_LeftStreamWindowSize; } if(p_JoinProcessor->GetExpiredOn()) { iEventCount += i_LeftStreamWindowSize * p_RightInputEventBuffer->GetMaxEventCount(); i_RightNumEventPerSegment += i_LeftStreamWindowSize; } p_RightResultEventBuffer->CreateEventBuffer(iEventCount); fprintf(fp_RightLog, "[GpuJoinKernel] RightResultEventBuffer created : Size=%d bytes\n", p_RightResultEventBuffer->GetEventBufferSizeInBytes()); fflush(fp_RightLog); } p_RightResultEventBuffer->Print(); fprintf(fp_LeftLog, "[GpuJoinKernel] Copying OnCompare filter to device \n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] Copying OnCompare filter to device \n"); fflush(fp_RightLog); CUDA_CHECK_RETURN(cudaMalloc( (void**) &p_DeviceOnCompareFilter, sizeof(GpuKernelFilter))); GpuKernelFilter * apHostFilters = (GpuKernelFilter *) malloc(sizeof(GpuKernelFilter)); apHostFilters->i_NodeCount = p_JoinProcessor->i_NodeCount; apHostFilters->ap_ExecutorNodes = NULL; CUDA_CHECK_RETURN(cudaMalloc( (void**) &apHostFilters->ap_ExecutorNodes, sizeof(ExecutorNode) * p_JoinProcessor->i_NodeCount)); CUDA_CHECK_RETURN(cudaMemcpy( apHostFilters->ap_ExecutorNodes, p_JoinProcessor->ap_ExecutorNodes, sizeof(ExecutorNode) * p_JoinProcessor->i_NodeCount, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy( p_DeviceOnCompareFilter, apHostFilters, sizeof(GpuKernelFilter), cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaPeekAtLastError()); CUDA_CHECK_RETURN(cudaThreadSynchronize()); free(apHostFilters); apHostFilters = NULL; // copy Output mappings if(p_HostOutputAttributeMapping) { fprintf(fp_LeftLog, "[GpuJoinKernel] Copying AttributeMappings to device \n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] Copying AttributeMappings to device \n"); fflush(fp_RightLog); fprintf(fp_LeftLog, "[GpuJoinKernel] AttributeMapCount : %d \n", p_HostOutputAttributeMapping->i_MappingCount); fprintf(fp_RightLog, "[GpuJoinKernel] AttributeMapCount : %d \n", p_HostOutputAttributeMapping->i_MappingCount); for(int c=0; c<p_HostOutputAttributeMapping->i_MappingCount; ++c) { fprintf(fp_LeftLog, "[GpuJoinKernel] Map : Form [Stream=%d, Attrib=%d] To [Attrib=%d] \n", p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::STREAM_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::ATTRIBUTE_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].to); fprintf(fp_RightLog, "[GpuJoinKernel] Map : Form [Stream=%d, Attrib=%d] To [Attrib=%d] \n", p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::STREAM_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].from[AttributeMapping::ATTRIBUTE_INDEX], p_HostOutputAttributeMapping->p_Mappings[c].to); } CUDA_CHECK_RETURN(cudaMalloc( (void**) &p_DeviceOutputAttributeMapping, sizeof(AttributeMappings))); AttributeMappings * pHostMappings = (AttributeMappings*) malloc(sizeof(AttributeMappings)); pHostMappings->i_MappingCount = p_HostOutputAttributeMapping->i_MappingCount; pHostMappings->p_Mappings = NULL; CUDA_CHECK_RETURN(cudaMalloc( (void**) &pHostMappings->p_Mappings, sizeof(AttributeMapping) * p_HostOutputAttributeMapping->i_MappingCount)); CUDA_CHECK_RETURN(cudaMemcpy( pHostMappings->p_Mappings, p_HostOutputAttributeMapping->p_Mappings, sizeof(AttributeMapping) * p_HostOutputAttributeMapping->i_MappingCount, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy( p_DeviceOutputAttributeMapping, pHostMappings, sizeof(AttributeMappings), cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaPeekAtLastError()); CUDA_CHECK_RETURN(cudaThreadSynchronize()); free(pHostMappings); pHostMappings = NULL; } if(p_JoinProcessor->GetThreadWorkSize() != 0) { i_LeftThreadWorkSize = p_JoinProcessor->GetThreadWorkSize(); i_RightThreadWorkSize = p_JoinProcessor->GetThreadWorkSize(); } if(i_LeftThreadWorkSize >= i_RightStreamWindowSize) { i_LeftThreadWorkSize = i_RightStreamWindowSize; } if(i_RightThreadWorkSize >= i_LeftStreamWindowSize) { i_RightThreadWorkSize = i_LeftStreamWindowSize; } i_LeftThreadWorkerCount = ceil((float)i_RightStreamWindowSize / i_LeftThreadWorkSize); i_RightThreadWorkerCount = ceil((float)i_LeftStreamWindowSize / i_RightThreadWorkSize); fprintf(fp_LeftLog, "[GpuJoinKernel] LeftThreadWorkSize=%d RightThreadWorkSize=%d\n", i_LeftThreadWorkSize, i_RightThreadWorkSize); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] LeftThreadWorkSize=%d RightThreadWorkSize=%d\n", i_LeftThreadWorkSize, i_RightThreadWorkSize); fflush(fp_RightLog); fprintf(fp_LeftLog, "[GpuJoinKernel] LeftThreadWorkCount=%d RightThreadWorkCount=%d\n", i_LeftThreadWorkerCount, i_RightThreadWorkerCount); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] LeftThreadWorkCount=%d RightThreadWorkCount=%d\n", i_LeftThreadWorkerCount, i_RightThreadWorkerCount); fflush(fp_RightLog); CUDA_CHECK_RETURN(cudaMalloc((void**) &p_DeviceParametersLeft, sizeof(JoinKernelParameters))); JoinKernelParameters * pHostParameters = (JoinKernelParameters*) malloc(sizeof(JoinKernelParameters)); pHostParameters->p_InputEventBuffer = p_LeftInputEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_InputMetaEvent = p_LeftInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_EventWindowBuffer = p_LeftWindowEventBuffer->GetDeviceEventBuffer(); pHostParameters->i_WindowLength = i_LeftStreamWindowSize; pHostParameters->p_OtherStreamMetaEvent = p_RightInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_OtherEventWindowBuffer = p_RightWindowEventBuffer->GetReadOnlyDeviceEventBuffer(); pHostParameters->i_OtherWindowLength = i_RightStreamWindowSize; pHostParameters->p_OnCompareFilter = p_DeviceOnCompareFilter; pHostParameters->i_WithInTime = p_JoinProcessor->GetWithInTimeMilliSeconds(); pHostParameters->p_OutputStreamMetaEvent = p_LeftResultEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_ResultsBuffer = p_LeftResultEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_OutputAttribMappings = p_DeviceOutputAttributeMapping; pHostParameters->i_EventsPerBlock = i_ThreadBlockSize; pHostParameters->i_WorkSize = i_LeftThreadWorkSize; CUDA_CHECK_RETURN(cudaMemcpy( p_DeviceParametersLeft, pHostParameters, sizeof(JoinKernelParameters), cudaMemcpyHostToDevice)); free(pHostParameters); pHostParameters = NULL; CUDA_CHECK_RETURN(cudaMalloc((void**) &p_DeviceParametersRight, sizeof(JoinKernelParameters))); pHostParameters = (JoinKernelParameters*) malloc(sizeof(JoinKernelParameters)); pHostParameters->p_InputEventBuffer = p_RightInputEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_InputMetaEvent = p_RightInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_EventWindowBuffer = p_RightWindowEventBuffer->GetDeviceEventBuffer(); pHostParameters->i_WindowLength = i_RightStreamWindowSize; pHostParameters->p_OtherStreamMetaEvent = p_LeftInputEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_OtherEventWindowBuffer = p_LeftWindowEventBuffer->GetReadOnlyDeviceEventBuffer(); pHostParameters->i_OtherWindowLength = i_LeftStreamWindowSize; pHostParameters->p_OnCompareFilter = p_DeviceOnCompareFilter; pHostParameters->i_WithInTime = p_JoinProcessor->GetWithInTimeMilliSeconds(); pHostParameters->p_OutputStreamMetaEvent = p_RightResultEventBuffer->GetDeviceMetaEvent(); pHostParameters->p_ResultsBuffer = p_RightResultEventBuffer->GetDeviceEventBuffer(); pHostParameters->p_OutputAttribMappings = p_DeviceOutputAttributeMapping; pHostParameters->i_EventsPerBlock = i_ThreadBlockSize; pHostParameters->i_WorkSize = i_RightThreadWorkSize; CUDA_CHECK_RETURN(cudaMemcpy( p_DeviceParametersRight, pHostParameters, sizeof(JoinKernelParameters), cudaMemcpyHostToDevice)); free(pHostParameters); pHostParameters = NULL; fprintf(fp_LeftLog, "[GpuJoinKernel] Initialization complete\n"); fflush(fp_LeftLog); fprintf(fp_RightLog, "[GpuJoinKernel] Initialization complete\n"); fflush(fp_RightLog); } return true; } void GpuJoinKernel::Process(int _iStreamIndex, int & _iNumEvents) { if(_iStreamIndex == 0) { ProcessLeftStream(_iStreamIndex, _iNumEvents); } else if(_iStreamIndex == 1) { ProcessRightStream(_iStreamIndex, _iNumEvents); } } void GpuJoinKernel::ProcessLeftStream(int _iStreamIndex, int & _iNumEvents) { #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_LeftLog, "[GpuJoinKernel] ProcessLeftStream : StreamIndex=%d EventCount=%d\n", _iStreamIndex, _iNumEvents); GpuUtils::PrintThreadInfo("GpuJoinKernel::ProcessLeftStream", fp_LeftLog); fflush(fp_LeftLog); #endif if(!b_LeftDeviceSet) { GpuCudaHelper::SelectDevice(i_DeviceId, "GpuJoinKernel::Left", fp_LeftLog); b_LeftDeviceSet = true; } #ifdef KERNEL_TIME sdkStartTimer(&p_StopWatch); #endif if(b_LeftFirstKernel) { p_LeftInputEventBuffer->CopyToDevice(true); } // call entry kernel int numBlocksX = ceil((float)_iNumEvents * i_LeftThreadWorkerCount / (float)i_ThreadBlockSize); int numBlocksY = 1; dim3 numBlocks = dim3(numBlocksX, numBlocksY); dim3 numThreads = dim3(i_ThreadBlockSize, 1); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_LeftLog, "[GpuJoinKernel] ProcessLeftStream : Invoke kernel Blocks(%d,%d) Threads(%d,%d)\n", numBlocksX, numBlocksY, i_ThreadBlockSize, 1); fprintf(fp_LeftLog, "[GpuJoinKernel] ProcessLeftStream : NumEvents=%d LeftWindow=(%d/%d) RightWindow=(%d/%d) WithIn=%llu\n", _iNumEvents, p_LeftWindowEventBuffer->GetRemainingCount(), i_LeftStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), i_RightStreamWindowSize, p_JoinProcessor->GetWithInTimeMilliSeconds()); fflush(fp_LeftLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE GpuUtils::PrintByteBuffer(p_LeftInputEventBuffer->GetHostEventBuffer(), _iNumEvents, p_LeftInputEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftInputBuffer", fp_LeftLog); p_LeftWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_LeftWindowEventBuffer->GetHostEventBuffer(), (i_LeftStreamWindowSize - p_LeftWindowEventBuffer->GetRemainingCount()), p_LeftWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftWindowBuffer", fp_LeftLog); p_RightWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_RightWindowEventBuffer->GetHostEventBuffer(), (i_RightStreamWindowSize - p_RightWindowEventBuffer->GetRemainingCount()), p_RightWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightWindowBuffer", fp_LeftLog); fflush(fp_LeftLog); #endif // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // int _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock // number of events allocated per block if(p_JoinProcessor->GetLeftTrigger()) { if(p_JoinProcessor->GetCurrentOn() && p_JoinProcessor->GetExpiredOn()) { ProcessEventsJoinLeftTriggerAllOn<<<numBlocks, numThreads>>>( p_LeftInputEventBuffer->GetDeviceEventBuffer(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_LeftWindowEventBuffer->GetDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_RightInputEventBuffer->GetDeviceMetaEvent(), p_RightWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_LeftResultEventBuffer->GetDeviceMetaEvent(), p_LeftResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } else if(p_JoinProcessor->GetCurrentOn()) { int iSharedSize = (i_ThreadBlockSize * p_LeftInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes / i_LeftThreadWorkerCount); ProcessEventsJoinLeftTriggerCurrentOn<<<numBlocks, numThreads, iSharedSize>>>( p_DeviceParametersLeft, _iNumEvents, p_LeftWindowEventBuffer->GetRemainingCount(), p_RightWindowEventBuffer->GetRemainingCount() ); } else if(p_JoinProcessor->GetExpiredOn()) { ProcessEventsJoinLeftTriggerExpiredOn<<<numBlocks, numThreads>>>( p_LeftInputEventBuffer->GetDeviceEventBuffer(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_LeftWindowEventBuffer->GetDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_RightInputEventBuffer->GetDeviceMetaEvent(), p_RightWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_LeftResultEventBuffer->GetDeviceMetaEvent(), p_LeftResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } } if(b_LastKernel) { p_LeftResultEventBuffer->CopyToHost(true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_LeftLog, "[GpuJoinKernel] Results copied \n"); fflush(fp_LeftLog); #endif } numBlocksX = ceil((float)_iNumEvents / (float)i_ThreadBlockSize); numBlocks = dim3(numBlocksX, numBlocksY); // we need to synchronize processing of JoinKernel as only one batch of events can be there at a time // pthread_mutex_lock(&mtx_Lock); // char * _pInputEventBuffer, // original input events buffer // int _iNumberOfEvents, // Number of events in input buffer (matched + not matched) // char * _pEventWindowBuffer, // Event window buffer // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // int _iMaxEventCount, // used for setting results array // int _iSizeOfEvent, // Size of an event // int _iEventsPerBlock // number of events allocated per block JoinSetWindowState<<<numBlocks, numThreads>>>( p_LeftInputEventBuffer->GetDeviceEventBuffer(), _iNumEvents, p_LeftWindowEventBuffer->GetDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_LeftInputEventBuffer->GetMaxEventCount(), p_LeftInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes, i_ThreadBlockSize ); CUDA_CHECK_RETURN(cudaPeekAtLastError()); CUDA_CHECK_RETURN(cudaThreadSynchronize()); p_LeftWindowEventBuffer->Sync(_iNumEvents, true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_LeftLog, "[GpuJoinKernel] Kernel complete \n"); fflush(fp_LeftLog); #endif #ifdef KERNEL_TIME sdkStopTimer(&p_StopWatch); float fElapsed = sdkGetTimerValue(&p_StopWatch); fprintf(fp_LeftLog, "[GpuJoinKernel] Stats : Elapsed=%f ms\n", fElapsed); fflush(fp_LeftLog); lst_ElapsedTimes.push_back(fElapsed); sdkResetTimer(&p_StopWatch); #endif // if(_iNumEvents > i_LeftRemainingCount) // { // i_LeftRemainingCount = 0; // } // else // { // i_LeftRemainingCount -= _iNumEvents; // } // pthread_mutex_unlock(&mtx_Lock); if(!p_JoinProcessor->GetLeftTrigger()) { _iNumEvents = 0; } else { _iNumEvents = _iNumEvents * i_LeftNumEventPerSegment; } #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG GpuUtils::PrintByteBuffer(p_LeftResultEventBuffer->GetHostEventBuffer(), _iNumEvents, p_LeftResultEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftResultEventBuffer", fp_LeftLog); fflush(fp_LeftLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE p_LeftWindowEventBuffer->CopyToHost(true); CUDA_CHECK_RETURN(cudaThreadSynchronize()); GpuUtils::PrintByteBuffer(p_LeftWindowEventBuffer->GetHostEventBuffer(), (i_LeftStreamWindowSize - p_LeftWindowEventBuffer->GetRemainingCount()), p_LeftWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftWindowBuffer", fp_LeftLog); fflush(fp_LeftLog); #endif } void GpuJoinKernel::ProcessRightStream(int _iStreamIndex, int & _iNumEvents) { #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_RightLog, "[GpuJoinKernel] ProcessRightStream : StreamIndex=%d EventCount=%d\n", _iStreamIndex, _iNumEvents); GpuUtils::PrintThreadInfo("GpuJoinKernel::ProcessRightStream", fp_RightLog); fflush(fp_RightLog); #endif if(!b_RightDeviceSet) { GpuCudaHelper::SelectDevice(i_DeviceId, "GpuJoinKernel::Right", fp_RightLog); b_RightDeviceSet = true; } #ifdef KERNEL_TIME sdkStartTimer(&p_StopWatch); #endif if(b_RightFirstKernel) { p_RightInputEventBuffer->CopyToDevice(true); } // call entry kernel int numBlocksX = ceil((float)_iNumEvents * i_RightThreadWorkerCount / (float)i_ThreadBlockSize); int numBlocksY = 1; dim3 numBlocks = dim3(numBlocksX, numBlocksY); dim3 numThreads = dim3(i_ThreadBlockSize, 1); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_RightLog, "[GpuJoinKernel] ProcessRightStream : Invoke kernel Blocks(%d,%d) Threads(%d,%d)\n", numBlocksX, numBlocksY, i_ThreadBlockSize, 1); fprintf(fp_RightLog, "[GpuJoinKernel] ProcessRightStream : NumEvents=%d LeftWindow=(%d/%d) RightWindow=(%d/%d) WithIn=%llu\n", _iNumEvents, p_LeftWindowEventBuffer->GetRemainingCount(), i_LeftStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), i_RightStreamWindowSize, p_JoinProcessor->GetWithInTimeMilliSeconds()); fflush(fp_RightLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE GpuUtils::PrintByteBuffer(p_RightInputEventBuffer->GetHostEventBuffer(), _iNumEvents, p_RightInputEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightInputBuffer", fp_RightLog); p_LeftWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_LeftWindowEventBuffer->GetHostEventBuffer(), (i_LeftStreamWindowSize - p_LeftWindowEventBuffer->GetRemainingCount()), p_LeftWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:LeftWindowBuffer", fp_RightLog); p_RightWindowEventBuffer->CopyToHost(false); GpuUtils::PrintByteBuffer(p_RightWindowEventBuffer->GetHostEventBuffer(), (i_RightStreamWindowSize - p_RightWindowEventBuffer->GetRemainingCount()), p_RightWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightWindowBuffer", fp_RightLog); fflush(fp_RightLog); #endif // char * _pInputEventBuffer, // input events buffer // GpuKernelMetaEvent * _pInputMetaEvent, // Meta event for input events // int _iInputNumberOfEvents, // Number of events in input buffer // char * _pEventWindowBuffer, // Event window buffer of this stream // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // GpuKernelMetaEvent * _pOtherStreamMetaEvent, // Meta event for other stream // char * _pOtherEventWindowBuffer, // Event window buffer of other stream // int _iOtherWindowLength, // Length of current events window of other stream // int _iOtherRemainingCount, // Remaining free slots in Window buffer of other stream // GpuKernelFilter * _pOnCompareFilter, // OnCompare filter buffer - pre-copied at initialization // int _iWithInTime, // WithIn time in milliseconds // GpuKernelMetaEvent * _pOutputStreamMetaEvent, // Meta event for output stream // char * _pResultsBuffer, // Resulting events buffer for this stream // AttributeMappings * _pOutputAttribMappings, // Output event attribute mappings // int _iEventsPerBlock // number of events allocated per block if(p_JoinProcessor->GetRightTrigger()) { if(p_JoinProcessor->GetCurrentOn() && p_JoinProcessor->GetExpiredOn()) { ProcessEventsJoinRightTriggerAllOn<<<numBlocks, numThreads>>>( p_RightInputEventBuffer->GetDeviceEventBuffer(), p_RightInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_RightWindowEventBuffer->GetDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), p_LeftWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_RightResultEventBuffer->GetDeviceMetaEvent(), p_RightResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } else if(p_JoinProcessor->GetCurrentOn()) { int iSharedSize = (i_ThreadBlockSize * p_RightInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes / i_RightThreadWorkerCount); ProcessEventsJoinRightTriggerCurrentOn<<<numBlocks, numThreads, iSharedSize>>>( p_DeviceParametersRight, _iNumEvents, p_RightWindowEventBuffer->GetRemainingCount(), p_LeftWindowEventBuffer->GetRemainingCount() ); } else if(p_JoinProcessor->GetExpiredOn()) { ProcessEventsJoinRightTriggerExpireOn<<<numBlocks, numThreads>>>( p_RightInputEventBuffer->GetDeviceEventBuffer(), p_RightInputEventBuffer->GetDeviceMetaEvent(), _iNumEvents, p_RightWindowEventBuffer->GetDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_LeftInputEventBuffer->GetDeviceMetaEvent(), p_LeftWindowEventBuffer->GetReadOnlyDeviceEventBuffer(), i_LeftStreamWindowSize, p_LeftWindowEventBuffer->GetRemainingCount(), p_DeviceOnCompareFilter, p_JoinProcessor->GetWithInTimeMilliSeconds(), p_RightResultEventBuffer->GetDeviceMetaEvent(), p_RightResultEventBuffer->GetDeviceEventBuffer(), p_DeviceOutputAttributeMapping, i_ThreadBlockSize ); } } if(b_LastKernel) { p_RightResultEventBuffer->CopyToHost(true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG fprintf(fp_RightLog, "[GpuJoinKernel] Results copied \n"); fflush(fp_RightLog); #endif } numBlocksX = ceil((float)_iNumEvents / (float)i_ThreadBlockSize); numBlocks = dim3(numBlocksX, numBlocksY); // we need to synchronize processing of JoinKernel as only one batch of events can be there at a time // pthread_mutex_lock(&mtx_Lock); // char * _pInputEventBuffer, // original input events buffer // int _iNumberOfEvents, // Number of events in input buffer (matched + not matched) // char * _pEventWindowBuffer, // Event window buffer // int _iWindowLength, // Length of current events window // int _iRemainingCount, // Remaining free slots in Window buffer // int _iMaxEventCount, // used for setting results array // int _iSizeOfEvent, // Size of an event // int _iEventsPerBlock // number of events allocated per block JoinSetWindowState<<<numBlocks, numThreads>>>( p_RightInputEventBuffer->GetDeviceEventBuffer(), _iNumEvents, p_RightWindowEventBuffer->GetDeviceEventBuffer(), i_RightStreamWindowSize, p_RightWindowEventBuffer->GetRemainingCount(), p_RightInputEventBuffer->GetMaxEventCount(), p_RightInputEventBuffer->GetHostMetaEvent()->i_SizeOfEventInBytes, i_ThreadBlockSize ); CUDA_CHECK_RETURN(cudaPeekAtLastError()); CUDA_CHECK_RETURN(cudaThreadSynchronize()); p_RightWindowEventBuffer->Sync(_iNumEvents, true); #if GPU_DEBUG >= GPU_DEBUG_LEVEL_INFO fprintf(fp_RightLog, "[GpuJoinKernel] Kernel complete \n"); fflush(fp_RightLog); #endif #ifdef KERNEL_TIME sdkStopTimer(&p_StopWatch); float fElapsed = sdkGetTimerValue(&p_StopWatch); fprintf(fp_RightLog, "[GpuJoinKernel] Stats : Elapsed=%f ms\n", fElapsed); fflush(fp_RightLog); lst_ElapsedTimes.push_back(fElapsed); sdkResetTimer(&p_StopWatch); #endif // if(_iNumEvents > i_RightRemainingCount) // { // i_RightRemainingCount = 0; // } // else // { // i_RightRemainingCount -= _iNumEvents; // } // pthread_mutex_unlock(&mtx_Lock); if(!p_JoinProcessor->GetRightTrigger()) { _iNumEvents = 0; } else { _iNumEvents = _iNumEvents * i_RightNumEventPerSegment; } #if GPU_DEBUG >= GPU_DEBUG_LEVEL_DEBUG GpuUtils::PrintByteBuffer(p_RightResultEventBuffer->GetHostEventBuffer(), _iNumEvents, p_RightResultEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightResultEventBuffer", fp_RightLog); fflush(fp_RightLog); #endif #if GPU_DEBUG >= GPU_DEBUG_LEVEL_TRACE p_RightWindowEventBuffer->CopyToHost(true); CUDA_CHECK_RETURN(cudaThreadSynchronize()); GpuUtils::PrintByteBuffer(p_RightWindowEventBuffer->GetHostEventBuffer(), (i_RightStreamWindowSize - p_RightWindowEventBuffer->GetRemainingCount()), p_RightWindowEventBuffer->GetHostMetaEvent(), "GpuJoinKernel:RightWindowBuffer", fp_RightLog); fflush(fp_RightLog); #endif } char * GpuJoinKernel::GetResultEventBuffer() { return NULL; } int GpuJoinKernel::GetResultEventBufferSize() { return 0; } char * GpuJoinKernel::GetLeftResultEventBuffer() { return p_LeftResultEventBuffer->GetHostEventBuffer(); } int GpuJoinKernel::GetLeftResultEventBufferSize() { return p_LeftResultEventBuffer->GetEventBufferSizeInBytes(); } char * GpuJoinKernel::GetRightResultEventBuffer() { return p_RightResultEventBuffer->GetHostEventBuffer(); } int GpuJoinKernel::GetRightResultEventBufferSize() { return p_RightResultEventBuffer->GetEventBufferSizeInBytes(); } } #endif
f2ce91296edd7af9a4e680fd6eb10dab41152f25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ztrtri_diag_batched.cu normal z -> d, Fri Sep 11 18:29:22 2015 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu */ #include "common_magma.h" #include "dtrtri.cuh" /** Purpose ------- DTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix. This routine is used in dtrsm. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n specifies the order of the matrix A. N >= 0. @param[in] dA_array Array of pointers, dimension (batchCount). Each is a DOUBLE_PRECISION array A of dimension ( ldda, n ) The triangular matrix A. \n If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular matrix, and the strictly lower triangular part of A is not referenced. \n If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular matrix, and the strictly upper triangular part of A is not referenced. \n If DIAG = 'U', the diagonal elements of A are also not referenced and are assumed to be 1. @param[in] ldda INTEGER. The leading dimension of each array A. LDDA >= max(1,N). @param[out] dinvA_array Array of pointers, dimension (batchCount). Each is a DOUBLE_PRECISION array dinvA of dimension (NB, ceil(n/NB)*NB), where NB = 128. On exit, contains inverses of the NB-by-NB diagonal blocks of A. @param[in] resetozero INTEGER If not zero, each array dinvA will be reset to all zeros @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dtrtri_diag_batched( magma_uplo_t uplo, magma_diag_t diag, magma_int_t n, double const * const *dA_array, magma_int_t ldda, double **dinvA_array, magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if (uplo != MagmaLower && uplo != MagmaUpper) info = -1; else if (diag != MagmaNonUnit && diag != MagmaUnit) info = -2; else if (n < 0) info = -3; else if (ldda < n) info = -5; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info } int nblocks = magma_ceildiv( n, IB ); if ( resetozero ) { magmablas_dlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_D_ZERO, MAGMA_D_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue); } // if someone want to use cudamemset he need to set the whole vectors // of initial size otherwise it is a bug and thus need to have dinvA_length // in input parameter and has been tested and was slower. //was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_dlaset_batched if ( uplo == MagmaLower ) { // invert diagonal IB x IB inner blocks dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid hipLaunchKernelGGL(( dtrtri_diag_lower_kernel_batched), dim3(diaggrid), dim3(IB), 0, queue , diag, n, dA_array, ldda, dinvA_array ); // build up NB x NB blocks (assuming IB=16 here): // use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads; // then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads; // then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads; // then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads. for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x //printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages ); switch (jb) { case 16: hipLaunchKernelGGL(( triple_dgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; case 32: hipLaunchKernelGGL(( triple_dgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; case 64: hipLaunchKernelGGL(( triple_dgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; default: hipLaunchKernelGGL(( triple_dgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; } if ( kb >= n ) break; } } else { dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid hipLaunchKernelGGL(( dtrtri_diag_upper_kernel_batched), dim3(diaggrid), dim3(IB), 0, queue , diag, n, dA_array, ldda, dinvA_array ); // update the inverse up to the size of IB for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x switch (jb) { case 16: hipLaunchKernelGGL(( triple_dgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; case 32: hipLaunchKernelGGL(( triple_dgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; case 64: hipLaunchKernelGGL(( triple_dgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; default: hipLaunchKernelGGL(( triple_dgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); hipLaunchKernelGGL(( triple_dgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , n, dA_array, ldda, dinvA_array, jb, npages ); break; } if ( kb >= n ) break; } } }
f2ce91296edd7af9a4e680fd6eb10dab41152f25.cu
/* -- MAGMA (version 1.7.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2015 @generated from ztrtri_diag_batched.cu normal z -> d, Fri Sep 11 18:29:22 2015 @author Peng Du @author Tingxing Dong @author Mark Gates @author Azzam Haidar File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu */ #include "common_magma.h" #include "dtrtri.cuh" /** Purpose ------- DTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix. This routine is used in dtrsm. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: - = MagmaUpper: A is an upper triangular matrix. - = MagmaLower: A is a lower triangular matrix. @param[in] diag magma_diag_t. On entry, diag specifies whether or not A is unit triangular as follows: - = MagmaUnit: A is assumed to be unit triangular. - = MagmaNonUnit: A is not assumed to be unit triangular. @param[in] n INTEGER. On entry, n specifies the order of the matrix A. N >= 0. @param[in] dA_array Array of pointers, dimension (batchCount). Each is a DOUBLE_PRECISION array A of dimension ( ldda, n ) The triangular matrix A. \n If UPLO = 'U', the leading N-by-N upper triangular part of A contains the upper triangular matrix, and the strictly lower triangular part of A is not referenced. \n If UPLO = 'L', the leading N-by-N lower triangular part of A contains the lower triangular matrix, and the strictly upper triangular part of A is not referenced. \n If DIAG = 'U', the diagonal elements of A are also not referenced and are assumed to be 1. @param[in] ldda INTEGER. The leading dimension of each array A. LDDA >= max(1,N). @param[out] dinvA_array Array of pointers, dimension (batchCount). Each is a DOUBLE_PRECISION array dinvA of dimension (NB, ceil(n/NB)*NB), where NB = 128. On exit, contains inverses of the NB-by-NB diagonal blocks of A. @param[in] resetozero INTEGER If not zero, each array dinvA will be reset to all zeros @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_dblas3 ********************************************************************/ extern "C" void magmablas_dtrtri_diag_batched( magma_uplo_t uplo, magma_diag_t diag, magma_int_t n, double const * const *dA_array, magma_int_t ldda, double **dinvA_array, magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue) { magma_int_t info = 0; if (uplo != MagmaLower && uplo != MagmaUpper) info = -1; else if (diag != MagmaNonUnit && diag != MagmaUnit) info = -2; else if (n < 0) info = -3; else if (ldda < n) info = -5; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info } int nblocks = magma_ceildiv( n, IB ); if ( resetozero ) { magmablas_dlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_D_ZERO, MAGMA_D_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue); } // if someone want to use cudamemset he need to set the whole vectors // of initial size otherwise it is a bug and thus need to have dinvA_length // in input parameter and has been tested and was slower. //was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_dlaset_batched if ( uplo == MagmaLower ) { // invert diagonal IB x IB inner blocks dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid dtrtri_diag_lower_kernel_batched<<< diaggrid, IB, 0, queue >>>( diag, n, dA_array, ldda, dinvA_array ); // build up NB x NB blocks (assuming IB=16 here): // use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads; // then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads; // then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads; // then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads. for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x //printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages ); switch (jb) { case 16: triple_dgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; case 32: triple_dgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; case 64: triple_dgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; default: triple_dgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; } if ( kb >= n ) break; } } else { dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid dtrtri_diag_upper_kernel_batched<<< diaggrid, IB, 0, queue >>>( diag, n, dA_array, ldda, dinvA_array ); // update the inverse up to the size of IB for( int jb=IB; jb < NB; jb *= 2 ) { int kb = jb*2; int npages = magma_ceildiv( n, kb ); dim3 threads( (jb <= 32 ? jb/4 : 16), 4 ); dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x switch (jb) { case 16: triple_dgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; case 32: triple_dgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; case 64: triple_dgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; default: triple_dgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); triple_dgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue >>>( n, dA_array, ldda, dinvA_array, jb, npages ); break; } if ( kb >= n ) break; } } }
a1065cc8796fab59671bf39bf92483722b588ee8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include"error_check.h" #include"gpu_timer.h" #define DTYPE double #define DTYPE_FORMAT "%lf" #define BLOCK_SIZE 32 float time_cost_gpu = -1, time_cost_cpu = -1; hipEvent_t gpu_start, gpu_stop, cpu_start, cpu_stop; DTYPE partialSum(DTYPE *vector, int n) { DTYPE temp = 0; for (int i = 0; i < n; i++) { temp += vector[i]; } return temp; } /* * Todo: * reduction kernel in which the threads are mapped to data with stride 2 */ __global__ void kernel_reduction_shm_non_consecutive(DTYPE *input, DTYPE *output, int n) { int tid = threadIdx.x, offset = blockIdx.x*blockDim.x; int i = offset + tid; __shared__ DTYPE sdata[BLOCK_SIZE]; sdata[tid] = input[i]; __syncthreads(); for(int s = 1; s < blockDim.x && tid*2 + s < BLOCK_SIZE; s<<=1){ //thread sdata[tid*2] += sdata[tid*2+s]; __syncthreads(); } if(tid == 0) output[blockIdx.x] = sdata[0]; } /* * Todo: * reduction kernel in which the threads are consecutively mapped to data */ __global__ void kernel_reduction_shm_consecutive(DTYPE *input, DTYPE *output, int n) { int tid = threadIdx.x, offset = blockIdx.x*blockDim.x; int i = offset + tid; __shared__ DTYPE sdata[BLOCK_SIZE]; sdata[tid] = input[i]; __syncthreads(); for(int s = BLOCK_SIZE/2; s >= 1 && tid+s < BLOCK_SIZE; s>>=1){ sdata[tid] += sdata[tid+s]; __syncthreads(); } if(tid == 0) output[blockIdx.x] = sdata[0]; } /* * Todo: * Wrapper function that utilizes cpu computation to sum the reduced results from blocks */ DTYPE gpu_reduction_cpu(DTYPE *input, int n, void (*kernel)(DTYPE *input, DTYPE *output, int n)) { int MEM_SIZE = sizeof(DTYPE) * n; DTYPE *in = nullptr, *out = nullptr, *output = nullptr; CHECK(hipMalloc((void**)&in, MEM_SIZE)); CHECK(hipMalloc((void**)&out, MEM_SIZE)); output = (DTYPE*)malloc(MEM_SIZE); CHECK(hipMemcpy(in, input, MEM_SIZE, hipMemcpyHostToDevice)); int grid = ceil((double)n/BLOCK_SIZE); hipEventRecord(gpu_start); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(BLOCK_SIZE), 0, 0, in, out, n); hipEventRecord(gpu_stop); hipEventSynchronize(gpu_stop); hipEventElapsedTime(&time_cost_gpu, gpu_start, gpu_stop); printf("Time cost (GPU):%f ms \n", time_cost_gpu); CHECK(hipMemcpy(output, out, MEM_SIZE, hipMemcpyDeviceToHost)); CHECK(hipFree(in)); CHECK(hipFree(out)); DTYPE sum = 0; for(int i = 0; i < grid; i += 1){ sum += output[i]; } free(output); return sum; } DTYPE* test_data_gen(int n) { srand(time(0)); DTYPE *data = (DTYPE *) malloc(n * sizeof(DTYPE)); for (int i = 0; i < n; i++) { data[i] = 1.0 * (rand() % RAND_MAX) / RAND_MAX; } return data; } void test(int n, DTYPE (*reduction)(DTYPE *input, int n, void (*kernel)(DTYPE *input, DTYPE *output, int n)), void (*kernel)(DTYPE *input, DTYPE *output, int n)) { DTYPE computed_result, computed_result_gpu; DTYPE *vector_input; vector_input = test_data_gen(n); printf("---------------------------\n"); hipEventCreate(&gpu_start); hipEventCreate(&gpu_stop); hipEventCreate(&cpu_start); hipEventCreate(&cpu_stop); ///cpu hipEventRecord(cpu_start); computed_result = partialSum(vector_input, n); hipEventRecord(cpu_stop); hipEventSynchronize(cpu_stop); hipEventElapsedTime(&time_cost_cpu, cpu_start, cpu_stop); printf("Time cost (CPU):%f ms \n", time_cost_cpu); /// ///gpu computed_result_gpu = reduction(vector_input, n, kernel); /// printf("[%d] Computed sum (CPU): ", n); printf(DTYPE_FORMAT, computed_result); printf(" GPU result:"); printf(DTYPE_FORMAT, computed_result_gpu); if (abs(computed_result_gpu - computed_result) < 1e-3) { printf(" PASSED! \n"); } else { printf(" FAILED! \n"); } printf("\n"); free(vector_input); } int main(int argc, char **argv) { int n_arr[] = {1, 7, 585, 5000, 300001, 1<<20}; for(int i=0; i<sizeof(n_arr)/sizeof(int); i++) { test(n_arr[i], gpu_reduction_cpu, kernel_reduction_shm_non_consecutive); test(n_arr[i], gpu_reduction_cpu, kernel_reduction_shm_consecutive); } return 0; }
a1065cc8796fab59671bf39bf92483722b588ee8.cu
#include<stdio.h> #include<stdlib.h> #include"error_check.h" #include"gpu_timer.h" #define DTYPE double #define DTYPE_FORMAT "%lf" #define BLOCK_SIZE 32 float time_cost_gpu = -1, time_cost_cpu = -1; cudaEvent_t gpu_start, gpu_stop, cpu_start, cpu_stop; DTYPE partialSum(DTYPE *vector, int n) { DTYPE temp = 0; for (int i = 0; i < n; i++) { temp += vector[i]; } return temp; } /* * Todo: * reduction kernel in which the threads are mapped to data with stride 2 */ __global__ void kernel_reduction_shm_non_consecutive(DTYPE *input, DTYPE *output, int n) { int tid = threadIdx.x, offset = blockIdx.x*blockDim.x; int i = offset + tid; __shared__ DTYPE sdata[BLOCK_SIZE]; sdata[tid] = input[i]; __syncthreads(); for(int s = 1; s < blockDim.x && tid*2 + s < BLOCK_SIZE; s<<=1){ //主要防无关thread多加 sdata[tid*2] += sdata[tid*2+s]; __syncthreads(); } if(tid == 0) output[blockIdx.x] = sdata[0]; } /* * Todo: * reduction kernel in which the threads are consecutively mapped to data */ __global__ void kernel_reduction_shm_consecutive(DTYPE *input, DTYPE *output, int n) { int tid = threadIdx.x, offset = blockIdx.x*blockDim.x; int i = offset + tid; __shared__ DTYPE sdata[BLOCK_SIZE]; sdata[tid] = input[i]; __syncthreads(); for(int s = BLOCK_SIZE/2; s >= 1 && tid+s < BLOCK_SIZE; s>>=1){ sdata[tid] += sdata[tid+s]; __syncthreads(); } if(tid == 0) output[blockIdx.x] = sdata[0]; } /* * Todo: * Wrapper function that utilizes cpu computation to sum the reduced results from blocks */ DTYPE gpu_reduction_cpu(DTYPE *input, int n, void (*kernel)(DTYPE *input, DTYPE *output, int n)) { int MEM_SIZE = sizeof(DTYPE) * n; DTYPE *in = nullptr, *out = nullptr, *output = nullptr; CHECK(cudaMalloc((void**)&in, MEM_SIZE)); CHECK(cudaMalloc((void**)&out, MEM_SIZE)); output = (DTYPE*)malloc(MEM_SIZE); CHECK(cudaMemcpy(in, input, MEM_SIZE, cudaMemcpyHostToDevice)); int grid = ceil((double)n/BLOCK_SIZE); cudaEventRecord(gpu_start); kernel<<<grid, BLOCK_SIZE>>>(in, out, n); cudaEventRecord(gpu_stop); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&time_cost_gpu, gpu_start, gpu_stop); printf("Time cost (GPU):%f ms \n", time_cost_gpu); CHECK(cudaMemcpy(output, out, MEM_SIZE, cudaMemcpyDeviceToHost)); CHECK(cudaFree(in)); CHECK(cudaFree(out)); DTYPE sum = 0; for(int i = 0; i < grid; i += 1){ sum += output[i]; } free(output); return sum; } DTYPE* test_data_gen(int n) { srand(time(0)); DTYPE *data = (DTYPE *) malloc(n * sizeof(DTYPE)); for (int i = 0; i < n; i++) { data[i] = 1.0 * (rand() % RAND_MAX) / RAND_MAX; } return data; } void test(int n, DTYPE (*reduction)(DTYPE *input, int n, void (*kernel)(DTYPE *input, DTYPE *output, int n)), void (*kernel)(DTYPE *input, DTYPE *output, int n)) { DTYPE computed_result, computed_result_gpu; DTYPE *vector_input; vector_input = test_data_gen(n); printf("---------------------------\n"); cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); cudaEventCreate(&cpu_start); cudaEventCreate(&cpu_stop); ///cpu cudaEventRecord(cpu_start); computed_result = partialSum(vector_input, n); cudaEventRecord(cpu_stop); cudaEventSynchronize(cpu_stop); cudaEventElapsedTime(&time_cost_cpu, cpu_start, cpu_stop); printf("Time cost (CPU):%f ms \n", time_cost_cpu); /// ///gpu computed_result_gpu = reduction(vector_input, n, kernel); /// printf("[%d] Computed sum (CPU): ", n); printf(DTYPE_FORMAT, computed_result); printf(" GPU result:"); printf(DTYPE_FORMAT, computed_result_gpu); if (abs(computed_result_gpu - computed_result) < 1e-3) { printf(" PASSED! \n"); } else { printf(" FAILED! \n"); } printf("\n"); free(vector_input); } int main(int argc, char **argv) { int n_arr[] = {1, 7, 585, 5000, 300001, 1<<20}; for(int i=0; i<sizeof(n_arr)/sizeof(int); i++) { test(n_arr[i], gpu_reduction_cpu, kernel_reduction_shm_non_consecutive); test(n_arr[i], gpu_reduction_cpu, kernel_reduction_shm_consecutive); } return 0; }
3f22ba6ed0cbdc0fdf293af3da33d38378787bb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2017 by Contributors * \file roi_align.cu * \brief roi align operator * \author Yuchen Guo, Zehao Shi */ #include "./roi_align-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> namespace mshadow { namespace cuda { template<typename Dtype> __global__ void ROIAlignForwardKernel(const int count, const Dtype* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_data) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; if (roi_batch_ind < 0) { top_data[index] = 0; argmax_data[index] = 0; continue; } Dtype roi_start_w = (bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (bottom_rois[4]) * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; int bottom_index = 0; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype h = hstart+h_stride; h <= hend-h_stride+0.01; h += max(h_stride, 0.01)) { for (Dtype w = wstart+w_stride; w <= wend-w_stride+0.01; w += max(w_stride, 0.01)) { bottom_index ++; int hlow = min(max(static_cast<int>(floor(h)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(h)), 0), height-1); int wleft = min(max(static_cast<int>(floor(w)), 0), width-1); int wright = min(max(static_cast<int>(ceil(w)), 0), width-1); int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (h - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (w - wleft) / (wright - wleft); Dtype value = (1 - alpha) * (1 - beta) * bottom_data[topleft] + alpha * (1 - beta) * bottom_data[bottomleft] + (1 - alpha) * beta * bottom_data[topright] + alpha * beta * bottom_data[bottomright]; if (value > maxval) { maxval = value; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = (Dtype)maxidx; } } template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *bottom_data = data.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *top_data = out.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward"); hipStream_t stream = Stream<gpu>::GetStream(out.stream_); hipLaunchKernelGGL(( ROIAlignForwardKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, top_data, argmax_data); } template<typename Dtype> __global__ void ROIAlignBackwardAccKernel(const int count, const Dtype* top_diff, const Dtype* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = (offset_bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (offset_bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (offset_bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Skip if ROI doesn't include (h, w) const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool in_bin = (w > wstart - 1.0 && w < wend + 1.0 && h > hstart - 1.0 && h < hend + 1.0); if (!in_bin) { continue; } const int pool_index = ph * pooled_width + pw; int bottom_index = 0; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype rh = hstart+h_stride; rh <= hend-h_stride+0.01; rh += max(h_stride, 0.01)) { for (Dtype rw = wstart+w_stride; rw <= wend-w_stride+0.01; rw += max(w_stride, 0.01)) { bottom_index ++; if (offset_argmax_data[pool_index] != bottom_index) continue; // compute the integer coordinates around (h, w) for bilinear interpolation int hlow = min(max(static_cast<int>(floor(rh)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(rh)), 0), height-1); int wleft = min(max(static_cast<int>(floor(rw)), 0), width-1); int wright = min(max(static_cast<int>(ceil(rw)), 0), width-1); if (h != hlow && h != hhigh && w != wleft && w != wright) // (w, h) is not around (rw, rh) continue; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (rh - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (rw - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_top_diff[pool_index] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_top_diff[pool_index] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_top_diff[pool_index] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_top_diff[pool_index] * alpha * beta; } } } } } bottom_diff[index] += gradient; } } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *top_diff = out_grad.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *bottom_diff = in_grad.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = in_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward"); hipStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); hipLaunchKernelGGL(( ROIAlignBackwardAccKernel<Dtype>), dim3(dimGrid), dim3(dimBlock), 0, stream, count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_diff, bottom_rois); } } // namespace cuda template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignForward(out, data, bbox, max_idx, spatial_scale); } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(ROIAlignParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new ROIAlignOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet
3f22ba6ed0cbdc0fdf293af3da33d38378787bb9.cu
/*! * Copyright (c) 2017 by Contributors * \file roi_align.cu * \brief roi align operator * \author Yuchen Guo, Zehao Shi */ #include "./roi_align-inl.h" #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include <algorithm> #include <vector> namespace mshadow { namespace cuda { template<typename Dtype> __global__ void ROIAlignForwardKernel(const int count, const Dtype* bottom_data, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const Dtype* bottom_rois, Dtype* top_data, Dtype* argmax_data) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; bottom_rois += n * 5; int roi_batch_ind = bottom_rois[0]; if (roi_batch_ind < 0) { top_data[index] = 0; argmax_data[index] = 0; continue; } Dtype roi_start_w = (bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (bottom_rois[4]) * spatial_scale; // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); // Add roi offsets and clip to input boundaries hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool is_empty = (hend <= hstart) || (wend <= wstart); // Define an empty pooling region to be zero Dtype maxval = is_empty ? 0 : -FLT_MAX; // If nothing is pooled, argmax = -1 causes nothing to be backprop'd int maxidx = -1; int bottom_index = 0; bottom_data += (roi_batch_ind * channels + c) * height * width; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype h = hstart+h_stride; h <= hend-h_stride+0.01; h += max(h_stride, 0.01)) { for (Dtype w = wstart+w_stride; w <= wend-w_stride+0.01; w += max(w_stride, 0.01)) { bottom_index ++; int hlow = min(max(static_cast<int>(floor(h)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(h)), 0), height-1); int wleft = min(max(static_cast<int>(floor(w)), 0), width-1); int wright = min(max(static_cast<int>(ceil(w)), 0), width-1); int topleft = hlow * width + wleft; int topright = hlow * width + wright; int bottomleft = hhigh * width + wleft; int bottomright = hhigh * width + wright; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (h - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (w - wleft) / (wright - wleft); Dtype value = (1 - alpha) * (1 - beta) * bottom_data[topleft] + alpha * (1 - beta) * bottom_data[bottomleft] + (1 - alpha) * beta * bottom_data[topright] + alpha * beta * bottom_data[bottomright]; if (value > maxval) { maxval = value; maxidx = bottom_index; } } } top_data[index] = maxval; argmax_data[index] = (Dtype)maxidx; } } template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *bottom_data = data.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *top_data = out.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = out.shape_.Size(); const int channels = data.size(1); const int height = data.size(2); const int width = data.size(3); const int pooled_height = out.size(2); const int pooled_width = out.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Forward"); cudaStream_t stream = Stream<gpu>::GetStream(out.stream_); ROIAlignForwardKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>( count, bottom_data, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_rois, top_data, argmax_data); } template<typename Dtype> __global__ void ROIAlignBackwardAccKernel(const int count, const Dtype* top_diff, const Dtype* argmax_data, const int num_rois, const float spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, Dtype* bottom_diff, const Dtype* bottom_rois) { for (int index = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; index < count; index += blockDim.x * gridDim.x * gridDim.y) { // (n, c, h, w) coords in bottom data int w = index % width; int h = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; Dtype gradient = 0; // Accumulate gradient over all ROIs that pooled this element for (int roi_n = 0; roi_n < num_rois; ++roi_n) { const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5; int roi_batch_ind = offset_bottom_rois[0]; // Skip if ROI's batch index doesn't match n if (n != roi_batch_ind) { continue; } Dtype roi_start_w = (offset_bottom_rois[1]) * spatial_scale; Dtype roi_start_h = (offset_bottom_rois[2]) * spatial_scale; Dtype roi_end_w = (offset_bottom_rois[3]) * spatial_scale; Dtype roi_end_h = (offset_bottom_rois[4]) * spatial_scale; // Skip if ROI doesn't include (h, w) const bool in_roi = (w > roi_start_w - 1.0 && w < roi_end_w + 1.0 && h > roi_start_h - 1.0 && h < roi_end_h + 1.0); if (!in_roi) { continue; } int offset = (roi_n * channels + c) * pooled_height * pooled_width; const Dtype* offset_top_diff = top_diff + offset; const Dtype* offset_argmax_data = argmax_data + offset; // Compute feasible set of pooled units that could have pooled // this bottom unit // Force malformed ROIs to be 1x1 Dtype roi_width = max(roi_end_w - roi_start_w, static_cast<Dtype>(1)); Dtype roi_height = max(roi_end_h - roi_start_h, static_cast<Dtype>(1)); Dtype bin_size_h = static_cast<Dtype>(roi_height) / static_cast<Dtype>(pooled_height); Dtype bin_size_w = static_cast<Dtype>(roi_width) / static_cast<Dtype>(pooled_width); for (int ph = 0; ph < pooled_height; ++ph) { for (int pw = 0; pw < pooled_width; ++pw) { Dtype hstart = static_cast<Dtype>((ph) * bin_size_h); Dtype wstart = static_cast<Dtype>((pw) * bin_size_w); Dtype hend = static_cast<Dtype>((ph + 1) * bin_size_h); Dtype wend = static_cast<Dtype>((pw + 1) * bin_size_w); hstart = min(max(hstart + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); hend = min(max(hend + roi_start_h, static_cast<Dtype>(0)), static_cast<Dtype>(height)); wstart = min(max(wstart + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); wend = min(max(wend + roi_start_w, static_cast<Dtype>(0)), static_cast<Dtype>(width)); bool in_bin = (w > wstart - 1.0 && w < wend + 1.0 && h > hstart - 1.0 && h < hend + 1.0); if (!in_bin) { continue; } const int pool_index = ph * pooled_width + pw; int bottom_index = 0; Dtype h_stride = (hend - hstart)/3.0; Dtype w_stride = (wend - wstart)/3.0; for (Dtype rh = hstart+h_stride; rh <= hend-h_stride+0.01; rh += max(h_stride, 0.01)) { for (Dtype rw = wstart+w_stride; rw <= wend-w_stride+0.01; rw += max(w_stride, 0.01)) { bottom_index ++; if (offset_argmax_data[pool_index] != bottom_index) continue; // compute the integer coordinates around (h, w) for bilinear interpolation int hlow = min(max(static_cast<int>(floor(rh)), 0), height-1); int hhigh = min(max(static_cast<int>(ceil(rh)), 0), height-1); int wleft = min(max(static_cast<int>(floor(rw)), 0), width-1); int wright = min(max(static_cast<int>(ceil(rw)), 0), width-1); if (h != hlow && h != hhigh && w != wleft && w != wright) // (w, h) is not around (rw, rh) continue; Dtype alpha = (hlow == hhigh) ? static_cast<Dtype>(0.5) : (rh - hlow) / (hhigh - hlow); Dtype beta = (wleft == wright) ? static_cast<Dtype>(0.5) : (rw - wleft) / (wright - wleft); if (h == hlow && w == wleft) gradient += offset_top_diff[pool_index] * (1 - alpha) * (1 - beta); else if (h == hlow && w == wright) gradient += offset_top_diff[pool_index] * (1 - alpha) * beta; else if (h == hhigh && w == wleft) gradient += offset_top_diff[pool_index] * alpha * (1 - beta); else if (h == hhigh && w == wright) gradient += offset_top_diff[pool_index] * alpha * beta; } } } } } bottom_diff[index] += gradient; } } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { const Dtype *top_diff = out_grad.dptr_; const Dtype *bottom_rois = bbox.dptr_; Dtype *bottom_diff = in_grad.dptr_; Dtype *argmax_data = max_idx.dptr_; const int count = in_grad.shape_.Size(); const int num_rois = bbox.size(0); const int channels = in_grad.size(1); const int height = in_grad.size(2); const int width = in_grad.size(3); const int pooled_height = out_grad.size(2); const int pooled_width = out_grad.size(3); const int gridSize = (count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock; dim3 dimGrid(kMaxGridNum, (gridSize + kMaxGridNum - 1) / kMaxGridNum); dim3 dimBlock(kMaxThreadsPerBlock); CheckLaunchParam(dimGrid, dimBlock, "ROIPooling Backward"); cudaStream_t stream = Stream<gpu>::GetStream(in_grad.stream_); ROIAlignBackwardAccKernel<Dtype><<<dimGrid, dimBlock, 0, stream>>>( count, top_diff, argmax_data, num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, bottom_diff, bottom_rois); } } // namespace cuda template<typename Dtype> inline void ROIAlignForward(const Tensor<gpu, 4, Dtype> &out, const Tensor<gpu, 4, Dtype> &data, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignForward(out, data, bbox, max_idx, spatial_scale); } template<typename Dtype> inline void ROIAlignBackwardAcc(const Tensor<gpu, 4, Dtype> &in_grad, const Tensor<gpu, 4, Dtype> &out_grad, const Tensor<gpu, 2, Dtype> &bbox, const Tensor<gpu, 4, Dtype> &max_idx, const float spatial_scale) { cuda::ROIAlignBackwardAcc(in_grad, out_grad, bbox, max_idx, spatial_scale); } } // namespace mshadow namespace mxnet { namespace op { template<> Operator* CreateOp<gpu>(ROIAlignParam param, int dtype) { Operator* op = NULL; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { op = new ROIAlignOp<gpu, DType>(param); }); return op; } } // namespace op } // namespace mxnet