hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
f0b9e6bcd2f9bfddf5efbaf61af63f6c95eb9d4f.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////// // // // standard headers plus new one defining tridiagonal solvers // // // //////////////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "trid.h" #include "utilities.h" #define COLS 16 //////////////////////////////////////////////////////////////////// // // // error-checking utility // // // //////////////////////////////////////////////////////////////////// #define cudaSafeCall(err) __cudaSafeCall(err,__FILE__,__LINE__) inline void __cudaSafeCall(hipError_t err, const char *file, const int line){ if(hipSuccess != err) { printf("%s(%i) : cudaSafeCall() Runtime API error : %d %s.\n", file, line, err, hipGetErrorString(err) ); exit(-1); } } //////////////////////////////////////////////////////////////////// // // // explicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// // // linear extrapolation b.c. // template <int pad_left, int pad_total, typename REAL> __global__ void BS_bc1(int NX, int NY, int NZ, REAL *u1) { int t, i, j, k, indg, IOFF, JOFF, KOFF; t = threadIdx.x + blockIdx.x*blockDim.x; IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); if (t<NX*NY) { i = t%NX; j = t/NX; k = NZ; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-KOFF] - u1[indg-2*KOFF]; } else if (t<NX*NY + NY*NZ) { t = t - NX*NY; j = t%NY; k = t/NY; i = NX; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-IOFF] - u1[indg-2*IOFF]; } else if (t<NX*NY + NY*NZ + NZ*NX) { t = t - NX*NY - NY*NZ; k = t%NZ; i = t/NZ; j = NY; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-JOFF] - u1[indg-2*JOFF]; } } // // explicit solvers // template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit1(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u2[indg] = t23 * u1[indg-KOFF-JOFF] + t13 * u1[indg-KOFF-IOFF] + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1[indg-KOFF] + t12 * u1[indg-JOFF-IOFF] + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1[indg-JOFF] + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1[indg-IOFF] + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1[indg] + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1[indg+IOFF] + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1[indg+JOFF] + t12 * u1[indg+JOFF+IOFF] + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1[indg+KOFF] + t13 * u1[indg+KOFF+IOFF] + t23 * u1[indg+KOFF+JOFF]; indg += KOFF; } } } template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit2(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } template <int pad_left, int pad_total, typename REAL, typename REAL2> __launch_bounds__(256, 3) // (max 256 threads per block, min 3 blocks per SMX) __global__ void BS_explicit3(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL2 * __restrict__ u1, REAL2 * __restrict__ u2) { REAL S1m, S1p, S2, S3, t12m, t12p, t13m, t13p, t23; int i, j, k, indg, active, JOFF, KOFF; REAL2 u1_mm, u1_om, u1_pm, u1_mp, u1_op, u1_pp, u; REAL u1_om_w, u1_mm_w, u1_pm_z, u1_op_z; i = threadIdx.x - 1 + blockIdx.x*(blockDim.x-2); j = threadIdx.y + blockIdx.y*blockDim.y; JOFF = (NX+pad_total)/2; KOFF = JOFF*(NY+2); indg = i + pad_left/2 + (j+1)*JOFF; active = (i<=NX/2) && (j<NY); if (active) { u1_mm = u1[indg-JOFF]; u1_om = u1[indg ]; u1_pm = u1[indg+JOFF]; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_om_w = __shfl_up (u1_om.y,1); u1_op_z = __shfl_down(u1_op.x,1); for (k=0; k<NZ; k++) { S1m = ((REAL) (2*i ))*dS; S1p = ((REAL) (2*i+1))*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12m = c12*S2*S1m; t12p = c12*S2*S1p; t13m = c13*S3*S1m; t13p = c13*S3*S1p; t23 = c23*S2*S3; u.x = t23 * u1_mm.x + t13m * u1_om_w + (c1_3*S3*S3 - c2_3*S3 - t13m - t23) * u1_om.x; u.y = t23 * u1_mm.y + t13p * u1_om.x + (c1_3*S3*S3 - c2_3*S3 - t13p - t23) * u1_om.y; u1_mm = u1_mp; u1_om = u1_op; u1_pm = u1_pp; u1_mm_w = __shfl_up (u1_mm.y,1); // u1_mm_z = __shfl_down(u1_mm.x,1); u1_om_w = __shfl_up (u1_om.y,1); // u1_om_z = __shfl_down(u1_om.x,1); == u1_op_z // u1_pm_w = __shfl_up (u1_pm.y,1); u1_pm_z = __shfl_down(u1_pm.x,1); u.x = u.x + t12m * u1_mm_w + (c1_2*S2*S2 - c2_2*S2 - t12m - t23 ) * u1_mm.x + (c1_1*S1m*S1m - c2_1*S1m - t12m - t13m) * u1_om_w + (1.0f - c3 - 2.0f*( c1_1*S1m*S1m + c1_2*S2*S2 + c1_3*S3*S3 - t12m - t13m - t23 ) ) * u1_om.x + (c1_1*S1m*S1m + c2_1*S1m - t12m - t13m) * u1_om.y + (c1_2*S2*S2 + c2_2*S2 - t12m - t23 ) * u1_pm.x + t12m * u1_pm.y; u.y = u.y + t12p * u1_mm.x + (c1_2*S2*S2 - c2_2*S2 - t12p - t23 ) * u1_mm.y + (c1_1*S1p*S1p - c2_1*S1p - t12p - t13p) * u1_om.x + (1.0f - c3 - 2.0f*( c1_1*S1p*S1p + c1_2*S2*S2 + c1_3*S3*S3 - t12p - t13p - t23 ) ) * u1_om.y + (c1_1*S1p*S1p + c2_1*S1p - t12p - t13p) * u1_op_z + (c1_2*S2*S2 + c2_2*S2 - t12p - t23 ) * u1_pm.y + t12p * u1_pm_z; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_op_z = __shfl_down(u1_op.x,1); u.x = u.x + (c1_3*S3*S3 + c2_3*S3 - t13m - t23) * u1_op.x + t13m * u1_op.y + t23 * u1_pp.x; u.y = u.y + (c1_3*S3*S3 + c2_3*S3 - t13p - t23) * u1_op.y + t13p * u1_op_z + t23 * u1_pp.y; if (threadIdx.x>0 && threadIdx.x<blockDim.x-1 && i<NX/2) u2[indg-KOFF] = u; } } } //////////////////////////////////////////////////////////////////// // // // implicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_rhs(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + ( - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } // // solves tridiagonal equations in x-direction, and increments solution // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_x(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u, const REAL* __restrict__ rhs ) { volatile __shared__ REAL smem[(256+8)*4]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int j, k, tid; tid = threadIdx.x; j = threadIdx.y; k = blockIdx.x; rhs = rhs + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); u = u + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for ( ; j<NY; j=j+4) { for (int i=0; i<8; i++) { S = (8*tid+i) * dS; lambda = c1*S*S; gamma = c2*S; a[i] = - ( lambda - gamma ); b[i] = 1.0f + c3 + 2.0f*lambda; c[i] = - ( lambda + gamma ); } if (tid==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f + c3 - 2.0f*gamma; c[7] = 0.0f; } int off = threadIdx.y*(256+8); loadDataIntoRegisters_contig<8,32>(tid,256,d,smem+off,rhs,(REAL)0.0); trid_warp<8>(a,b,c,d); incDataFromRegisters_contig<8,32>(tid,256,d,smem+off,u); rhs = rhs + 4*(NX+pad_total); // increment pointers for next line u = u + 4*(NX+pad_total); } } // // solves tridiagonal equations in y-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_y(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = 8*threadIdx.y; k = blockIdx.x; u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (j+n) * dS; lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)]; } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)] = d[n]; u = u + COLS; // increment pointers for next lines } } // // similar to BS_implicit2_y but solving in z-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_z(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = blockIdx.x; // swapping j, k in these two lines k = 8*threadIdx.y; // is one difference from implicit2_y u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (k+n) * dS; // changing j to k here is another lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)*(NY+2)]; // and a different offset here ... } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)*(NY+2)] = d[n]; // ... and here u = u + COLS; // increment pointers for next lines } } //////////////////////////////////////////////////////////////////// // // // main code to test all solvers for single & double precision // // // //////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { int NX=256, NY=256, NZ=256, N, imid; float *u_h, *u1_d, *u2_d, *foo_d; double *U_h, *U1_d, *U2_d, *Foo_d, val, err; int pad_left, pad_total; // initialise CUDA timing float milli; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // allocate memory for arrays int prod = (NX+32)*(NY+2)*(NZ+2)+2; u_h = (float *)malloc(prod*sizeof(float)); // U_h = (double *)malloc(prod*sizeof(double)); cudaSafeCall(hipMalloc((void **)&u1_d, (prod+1)*sizeof(float))); // cudaSafeCall(hipMalloc((void **)&U1_d, (prod+1)*sizeof(double))); cudaSafeCall(hipMalloc((void **)&u2_d, (prod+1)*sizeof(float))); // cudaSafeCall(hipMalloc((void **)&U2_d, (prod+1)*sizeof(double))); // execute kernels for (int prec=0; prec<1; prec++) { if (prec==0) { printf("\nsingle precision performance tests \n"); cudaSafeCall(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeFourByte)); } else { printf("\ndouble precision performance tests \n"); cudaSafeCall(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte)); } printf("---------------------------------- \n"); printf(" method exec time GFinsts GFlops value at strike \n"); for (int pass=0; pass<4; pass++) { pad_left = 32; pad_total = 32; if (pass<3) { N = 500; cudaSafeCall(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); } else { N = 100; //hipDeviceSetCacheConfig(hipFuncCachePreferShared); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_x<32,32,double>, hipFuncCachePreferShared)); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_y<32,32,double>, hipFuncCachePreferL1)); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_z<32,32,double>, hipFuncCachePreferL1)); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_rhs<32,32,double>, hipFuncCachePreferL1)); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_x<32,32,float>, hipFuncCachePreferShared)); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_y<32,32,float>, hipFuncCachePreferL1)); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_z<32,32,float>, hipFuncCachePreferL1)); cudaSafeCall(hipFuncSetCacheConfig(BS_implicit2_rhs<32,32,float>, hipFuncCachePreferL1)); } double Smax=200.0, K=100.0, r=0.05, sigma=0.2, T=0.05; double dS = Smax / 255.0; double dt = T / ( (double) N); double C1 = 0.5*dt*sigma*sigma / (dS*dS); double C2 = 0.5*dt*r / dS; double C3 = r*dt; float c1=C1, c2=C2, c3=C3, ds=dS; // initialise array (call on minimum of 3 assets) and copy over for (int i=-1; i<NX; i++) { for (int j=-1; j<NY; j++) { for (int k=-1; k<NZ; k++) { int indg = (i+pad_left) + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); // U_h[indg] = fmax(0.0, fmin(i*dS, fmin(j*dS,k*dS)) - K); U_h[indg] = fmax(0.0, i*dS-K); u_h[indg] = U_h[indg]; } } } if (prec==0) { cudaSafeCall(hipMemcpy(u1_d,u_h, prod*sizeof(float) ,hipMemcpyHostToDevice)); cudaSafeCall(hipMemcpy(u2_d,u_h, prod*sizeof(float) ,hipMemcpyHostToDevice)); } else { cudaSafeCall(hipMemcpy(U1_d,U_h, prod*sizeof(double),hipMemcpyHostToDevice)); cudaSafeCall(hipMemcpy(U2_d,U_h, prod*sizeof(double),hipMemcpyHostToDevice)); } // now do main computation int BLOCK_X = 64; int BLOCK_Y = 4; int bc_threads = BLOCK_X*BLOCK_Y; int bc_blocks = 1 + (NX*NY + NY*NZ + NZ*NX - 1) / bc_threads; int bx = 1 + (NX-1)/BLOCK_X; int by = 1 + (NY-1)/BLOCK_Y; if (pass==2) { BLOCK_X = 32; BLOCK_Y = 8; bx = 1 + (NX/2-1)/(BLOCK_X-2); by = 1 + (NY-1)/BLOCK_Y; } dim3 threads(BLOCK_X,BLOCK_Y); dim3 blocks(bx,by); hipEventRecord(start); for (int n=1; n<=N; n++) { if (prec==0) { hipLaunchKernelGGL(( BS_bc1<32,32>), dim3(bc_blocks), dim3(bc_threads), 0, 0, NX,NY,NZ, u1_d); if (pass==0) hipLaunchKernelGGL(( BS_explicit1<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==1) hipLaunchKernelGGL(( BS_explicit2<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==2) hipLaunchKernelGGL(( BS_explicit3<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, (float2*)(u1_d), (float2*)(u2_d)); else if (pass==3) { hipLaunchKernelGGL(( BS_implicit2_rhs<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); hipLaunchKernelGGL(( BS_implicit2_y<32,32>), dim3(NZ), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, ds, c1,c2,c3, u2_d); hipLaunchKernelGGL(( BS_implicit2_z<32,32>), dim3(NY), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, ds, c1,c2,c3, u2_d); hipLaunchKernelGGL(( BS_implicit2_x<32,32>), dim3(NZ), dim3(dim3(32,4)), 0, 0, NX,NY,NZ, ds, c1,c2,c3, u1_d, u2_d); } if (pass<3) {foo_d=u1_d; u1_d=u2_d; u2_d=foo_d;} // swap u1, u2 pointers } else { hipLaunchKernelGGL(( BS_bc1<32,32>), dim3(bc_blocks), dim3(bc_threads), 0, 0, NX,NY,NZ, U1_d); if (pass==0) hipLaunchKernelGGL(( BS_explicit1<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==1) hipLaunchKernelGGL(( BS_explicit2<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==2) hipLaunchKernelGGL(( BS_explicit3<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, (double2*)(U1_d), (double2*)(U2_d)); else if (pass==3) { hipLaunchKernelGGL(( BS_implicit2_rhs<32,32>), dim3(blocks), dim3(threads), 0, 0, NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); hipLaunchKernelGGL(( BS_implicit2_y<32,32>), dim3(NZ), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, dS, C1,C2,C3, U2_d); hipLaunchKernelGGL(( BS_implicit2_z<32,32>), dim3(NY), dim3(dim3(COLS,32)), 0, 0, NX,NY,NZ, dS, C1,C2,C3, U2_d); hipLaunchKernelGGL(( BS_implicit2_x<32,32>), dim3(NZ), dim3(dim3(32,4)), 0, 0, NX,NY,NZ, dS, C1,C2,C3, U1_d, U2_d); } if (pass<3) {Foo_d=U1_d; U1_d=U2_d; U2_d=Foo_d;} // swap U1, U2 pointers } } cudaSafeCall(hipEventRecord(stop)); cudaSafeCall(hipEventSynchronize(stop)); cudaSafeCall(hipEventElapsedTime(&milli, start, stop)); // imid = (NX/2+1) + (NY/2+1)*(NX+2) + (NZ/2+1)*(NX+2)*(NY+2); imid = (NX/2+pad_left) + (NY/2+1)*(NX+pad_total) + (NZ/2+1)*(NX+pad_total)*(NY+2); if (prec==0) { cudaSafeCall(hipMemcpy(u_h,u1_d,prod*sizeof(float), hipMemcpyDeviceToHost)); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); // if (i==NX/2 && k==NX/2) printf(" %d %f \n",j,u_h[ind]-u_h[imid]); } } if (err > 1e-2) printf(" %d %f \n",i,err); } val = u_h[imid]; } else { cudaSafeCall(hipMemcpy(U_h,U1_d,prod*sizeof(double), hipMemcpyDeviceToHost)); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); } } if (err > 1e-8) printf(" %d %f \n",i,err); } val = U_h[imid]; } if (pass<3) printf("explicit%d %9.0f %38.6f \n",pass+1,milli,val); else printf("implicit%d %9.0f %38.6f \n",pass-1,milli,val); } } // CUDA exit -- needed to flush printf write buffer cudaSafeCall(hipDeviceSynchronize()); cudaSafeCall(hipDeviceReset()); return 0; }
f0b9e6bcd2f9bfddf5efbaf61af63f6c95eb9d4f.cu
//////////////////////////////////////////////////////////////////// // // // standard headers plus new one defining tridiagonal solvers // // // //////////////////////////////////////////////////////////////////// #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "trid.h" #include "utilities.h" #define COLS 16 //////////////////////////////////////////////////////////////////// // // // error-checking utility // // // //////////////////////////////////////////////////////////////////// #define cudaSafeCall(err) __cudaSafeCall(err,__FILE__,__LINE__) inline void __cudaSafeCall(cudaError err, const char *file, const int line){ if(cudaSuccess != err) { printf("%s(%i) : cudaSafeCall() Runtime API error : %d %s.\n", file, line, err, cudaGetErrorString(err) ); exit(-1); } } //////////////////////////////////////////////////////////////////// // // // explicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// // // linear extrapolation b.c. // template <int pad_left, int pad_total, typename REAL> __global__ void BS_bc1(int NX, int NY, int NZ, REAL *u1) { int t, i, j, k, indg, IOFF, JOFF, KOFF; t = threadIdx.x + blockIdx.x*blockDim.x; IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); if (t<NX*NY) { i = t%NX; j = t/NX; k = NZ; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-KOFF] - u1[indg-2*KOFF]; } else if (t<NX*NY + NY*NZ) { t = t - NX*NY; j = t%NY; k = t/NY; i = NX; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-IOFF] - u1[indg-2*IOFF]; } else if (t<NX*NY + NY*NZ + NZ*NX) { t = t - NX*NY - NY*NZ; k = t%NZ; i = t/NZ; j = NY; indg = (i+pad_left) + (j+1)*JOFF + (k+1)*KOFF; u1[indg] = 2.0f*u1[indg-JOFF] - u1[indg-2*JOFF]; } } // // explicit solvers // template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit1(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u2[indg] = t23 * u1[indg-KOFF-JOFF] + t13 * u1[indg-KOFF-IOFF] + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1[indg-KOFF] + t12 * u1[indg-JOFF-IOFF] + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1[indg-JOFF] + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1[indg-IOFF] + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1[indg] + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1[indg+IOFF] + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1[indg+JOFF] + t12 * u1[indg+JOFF+IOFF] + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1[indg+KOFF] + t13 * u1[indg+KOFF+IOFF] + t23 * u1[indg+KOFF+JOFF]; indg += KOFF; } } } template <int pad_left, int pad_total, typename REAL> __global__ void BS_explicit2(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + (1.0f - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } template <int pad_left, int pad_total, typename REAL, typename REAL2> __launch_bounds__(256, 3) // (max 256 threads per block, min 3 blocks per SMX) __global__ void BS_explicit3(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL2 * __restrict__ u1, REAL2 * __restrict__ u2) { REAL S1m, S1p, S2, S3, t12m, t12p, t13m, t13p, t23; int i, j, k, indg, active, JOFF, KOFF; REAL2 u1_mm, u1_om, u1_pm, u1_mp, u1_op, u1_pp, u; REAL u1_om_w, u1_mm_w, u1_pm_z, u1_op_z; i = threadIdx.x - 1 + blockIdx.x*(blockDim.x-2); j = threadIdx.y + blockIdx.y*blockDim.y; JOFF = (NX+pad_total)/2; KOFF = JOFF*(NY+2); indg = i + pad_left/2 + (j+1)*JOFF; active = (i<=NX/2) && (j<NY); if (active) { u1_mm = u1[indg-JOFF]; u1_om = u1[indg ]; u1_pm = u1[indg+JOFF]; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_om_w = __shfl_up (u1_om.y,1); u1_op_z = __shfl_down(u1_op.x,1); for (k=0; k<NZ; k++) { S1m = ((REAL) (2*i ))*dS; S1p = ((REAL) (2*i+1))*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12m = c12*S2*S1m; t12p = c12*S2*S1p; t13m = c13*S3*S1m; t13p = c13*S3*S1p; t23 = c23*S2*S3; u.x = t23 * u1_mm.x + t13m * u1_om_w + (c1_3*S3*S3 - c2_3*S3 - t13m - t23) * u1_om.x; u.y = t23 * u1_mm.y + t13p * u1_om.x + (c1_3*S3*S3 - c2_3*S3 - t13p - t23) * u1_om.y; u1_mm = u1_mp; u1_om = u1_op; u1_pm = u1_pp; u1_mm_w = __shfl_up (u1_mm.y,1); // u1_mm_z = __shfl_down(u1_mm.x,1); u1_om_w = __shfl_up (u1_om.y,1); // u1_om_z = __shfl_down(u1_om.x,1); == u1_op_z // u1_pm_w = __shfl_up (u1_pm.y,1); u1_pm_z = __shfl_down(u1_pm.x,1); u.x = u.x + t12m * u1_mm_w + (c1_2*S2*S2 - c2_2*S2 - t12m - t23 ) * u1_mm.x + (c1_1*S1m*S1m - c2_1*S1m - t12m - t13m) * u1_om_w + (1.0f - c3 - 2.0f*( c1_1*S1m*S1m + c1_2*S2*S2 + c1_3*S3*S3 - t12m - t13m - t23 ) ) * u1_om.x + (c1_1*S1m*S1m + c2_1*S1m - t12m - t13m) * u1_om.y + (c1_2*S2*S2 + c2_2*S2 - t12m - t23 ) * u1_pm.x + t12m * u1_pm.y; u.y = u.y + t12p * u1_mm.x + (c1_2*S2*S2 - c2_2*S2 - t12p - t23 ) * u1_mm.y + (c1_1*S1p*S1p - c2_1*S1p - t12p - t13p) * u1_om.x + (1.0f - c3 - 2.0f*( c1_1*S1p*S1p + c1_2*S2*S2 + c1_3*S3*S3 - t12p - t13p - t23 ) ) * u1_om.y + (c1_1*S1p*S1p + c2_1*S1p - t12p - t13p) * u1_op_z + (c1_2*S2*S2 + c2_2*S2 - t12p - t23 ) * u1_pm.y + t12p * u1_pm_z; indg += KOFF; u1_mp = u1[indg-JOFF]; u1_op = u1[indg ]; u1_pp = u1[indg+JOFF]; u1_op_z = __shfl_down(u1_op.x,1); u.x = u.x + (c1_3*S3*S3 + c2_3*S3 - t13m - t23) * u1_op.x + t13m * u1_op.y + t23 * u1_pp.x; u.y = u.y + (c1_3*S3*S3 + c2_3*S3 - t13p - t23) * u1_op.y + t13p * u1_op_z + t23 * u1_pp.y; if (threadIdx.x>0 && threadIdx.x<blockDim.x-1 && i<NX/2) u2[indg-KOFF] = u; } } } //////////////////////////////////////////////////////////////////// // // // implicit Black-Scholes finite difference kernels // // // //////////////////////////////////////////////////////////////////// template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_rhs(int NX, int NY, int NZ, REAL dS, REAL c1_1, REAL c1_2, REAL c1_3, REAL c2_1, REAL c2_2, REAL c2_3, REAL c3, REAL c12, REAL c13, REAL c23, const REAL* __restrict__ u1, REAL* __restrict__ u2) { REAL S1, S2, S3, t12, t13, t23; REAL u1_mm, u1_om, u1_mo, u1_m, u1_oo, u1_po, u1_op, u1_pp, u; int i, j, k, indg, active, IOFF, JOFF, KOFF; i = threadIdx.x + blockIdx.x*blockDim.x; j = threadIdx.y + blockIdx.y*blockDim.y; indg = (i+pad_left) + (j+1)*(NX+pad_total) + (NX+pad_total)*(NY+2); IOFF = 1; JOFF = NX+pad_total; KOFF = (NX+pad_total)*(NY+2); active = (i<NX) && (j<NY); if (active) { u1_om = u1[indg-KOFF-JOFF]; u1_mo = u1[indg-KOFF-IOFF]; u1_m = u1[indg-KOFF]; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; for (k=0; k<NZ; k++) { S1 = ((REAL) i)*dS; S2 = ((REAL) j)*dS; S3 = ((REAL) k)*dS; t12 = c12*S1*S2; t13 = c13*S1*S3; t23 = c23*S2*S3; u = t23 * u1_om + t13 * u1_mo + (c1_3*S3*S3 - c2_3*S3 - t13 - t23) * u1_m; u1_mm = u1[indg-JOFF-IOFF]; u1_om = u1[indg-JOFF]; u1_mo = u1[indg-IOFF]; u1_pp = u1[indg+IOFF+JOFF]; u = u + t12 * u1_mm + (c1_2*S2*S2 - c2_2*S2 - t12 - t23) * u1_om + (c1_1*S1*S1 - c2_1*S1 - t12 - t13) * u1_mo + ( - c3 - 2.0f*( c1_1*S1*S1 + c1_2*S2*S2 + c1_3*S3*S3 - t12 - t13 - t23 ) ) * u1_oo + (c1_1*S1*S1 + c2_1*S1 - t12 - t13) * u1_po + (c1_2*S2*S2 + c2_2*S2 - t12 - t23) * u1_op + t12 * u1_pp; indg += KOFF; u1_m = u1_oo; u1_oo = u1[indg]; u1_po = u1[indg+IOFF]; u1_op = u1[indg+JOFF]; u = u + (c1_3*S3*S3 + c2_3*S3 - t13 - t23) * u1_oo + t13 * u1_po + t23 * u1_op; u2[indg-KOFF] = u; } } } // // solves tridiagonal equations in x-direction, and increments solution // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_x(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u, const REAL* __restrict__ rhs ) { volatile __shared__ REAL smem[(256+8)*4]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int j, k, tid; tid = threadIdx.x; j = threadIdx.y; k = blockIdx.x; rhs = rhs + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); u = u + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for ( ; j<NY; j=j+4) { for (int i=0; i<8; i++) { S = (8*tid+i) * dS; lambda = c1*S*S; gamma = c2*S; a[i] = - ( lambda - gamma ); b[i] = 1.0f + c3 + 2.0f*lambda; c[i] = - ( lambda + gamma ); } if (tid==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f + c3 - 2.0f*gamma; c[7] = 0.0f; } int off = threadIdx.y*(256+8); loadDataIntoRegisters_contig<8,32>(tid,256,d,smem+off,rhs,(REAL)0.0); trid_warp<8>(a,b,c,d); incDataFromRegisters_contig<8,32>(tid,256,d,smem+off,u); rhs = rhs + 4*(NX+pad_total); // increment pointers for next line u = u + 4*(NX+pad_total); } } // // solves tridiagonal equations in y-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_y(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = 8*threadIdx.y; k = blockIdx.x; u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (j+n) * dS; lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)]; } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)] = d[n]; u = u + COLS; // increment pointers for next lines } } // // similar to BS_implicit2_y but solving in z-direction // template <int pad_left, int pad_total, typename REAL> __global__ void BS_implicit2_z(int NX, int NY, int NZ, REAL dS, REAL c1, REAL c2, REAL c3, REAL* __restrict__ u ) { __shared__ REAL s1[33*COLS], s2[33*COLS]; REAL S, lambda, gamma, a[8], b[8], c[8], d[8]; int i, j, k, tid, ind1, ind2; tid = threadIdx.x + threadIdx.y*COLS; ind1 = tid + (tid/32); ind2 = (tid/32) + (tid%32)*COLS; ind2 += ind2 / 32; i = threadIdx.x; j = blockIdx.x; // swapping j, k in these two lines k = 8*threadIdx.y; // is one difference from implicit2_y u = u + i + pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); for (i=threadIdx.x; i<NX; i=i+COLS) { for (int n=0; n<8; n++) { S = (k+n) * dS; // changing j to k here is another lambda = c1*S*S; gamma = c2*S; a[n] = - ( lambda - gamma ); b[n] = 1.0f + 2.0f*lambda; c[n] = - ( lambda + gamma ); d[n] = u[n*(NX+pad_total)*(NY+2)]; // and a different offset here ... } if (threadIdx.y==31) { a[7] = + 2.0f*gamma; b[7] = 1.0f - 2.0f*gamma; c[7] = 0.0f; } trid_warp_part1<8>(a,b,c,d); s1[ind1] = a[0]; s2[ind1] = a[7]; __syncthreads(); a[0] = s1[ind2]; a[7] = s2[ind2]; __syncthreads(); s1[ind1] = c[0]; s2[ind1] = c[7]; __syncthreads(); c[0] = s1[ind2]; c[7] = s2[ind2]; __syncthreads(); s1[ind1] = d[0]; s2[ind1] = d[7]; __syncthreads(); d[0] = s1[ind2]; d[7] = s2[ind2]; trid2_warp(a[0],c[0],d[0],a[7],c[7],d[7]); s1[ind2] = d[0]; s2[ind2] = d[7]; __syncthreads(); d[0] = s1[ind1]; d[7] = s2[ind1]; for (int n=1; n<7; n++) d[n] = d[n] - a[n]*d[0] - c[n]*d[7]; for (int n=0; n<8; n++) u[n*(NX+pad_total)*(NY+2)] = d[n]; // ... and here u = u + COLS; // increment pointers for next lines } } //////////////////////////////////////////////////////////////////// // // // main code to test all solvers for single & double precision // // // //////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { int NX=256, NY=256, NZ=256, N, imid; float *u_h, *u1_d, *u2_d, *foo_d; double *U_h, *U1_d, *U2_d, *Foo_d, val, err; int pad_left, pad_total; // initialise CUDA timing float milli; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate memory for arrays int prod = (NX+32)*(NY+2)*(NZ+2)+2; u_h = (float *)malloc(prod*sizeof(float)); // U_h = (double *)malloc(prod*sizeof(double)); cudaSafeCall(cudaMalloc((void **)&u1_d, (prod+1)*sizeof(float))); // cudaSafeCall(cudaMalloc((void **)&U1_d, (prod+1)*sizeof(double))); cudaSafeCall(cudaMalloc((void **)&u2_d, (prod+1)*sizeof(float))); // cudaSafeCall(cudaMalloc((void **)&U2_d, (prod+1)*sizeof(double))); // execute kernels for (int prec=0; prec<1; prec++) { if (prec==0) { printf("\nsingle precision performance tests \n"); cudaSafeCall(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeFourByte)); } else { printf("\ndouble precision performance tests \n"); cudaSafeCall(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte)); } printf("---------------------------------- \n"); printf(" method exec time GFinsts GFlops value at strike \n"); for (int pass=0; pass<4; pass++) { pad_left = 32; pad_total = 32; if (pass<3) { N = 500; cudaSafeCall(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); } else { N = 100; //cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_x<32,32,double>, cudaFuncCachePreferShared)); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_y<32,32,double>, cudaFuncCachePreferL1)); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_z<32,32,double>, cudaFuncCachePreferL1)); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_rhs<32,32,double>, cudaFuncCachePreferL1)); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_x<32,32,float>, cudaFuncCachePreferShared)); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_y<32,32,float>, cudaFuncCachePreferL1)); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_z<32,32,float>, cudaFuncCachePreferL1)); cudaSafeCall(cudaFuncSetCacheConfig(BS_implicit2_rhs<32,32,float>, cudaFuncCachePreferL1)); } double Smax=200.0, K=100.0, r=0.05, sigma=0.2, T=0.05; double dS = Smax / 255.0; double dt = T / ( (double) N); double C1 = 0.5*dt*sigma*sigma / (dS*dS); double C2 = 0.5*dt*r / dS; double C3 = r*dt; float c1=C1, c2=C2, c3=C3, ds=dS; // initialise array (call on minimum of 3 assets) and copy over for (int i=-1; i<NX; i++) { for (int j=-1; j<NY; j++) { for (int k=-1; k<NZ; k++) { int indg = (i+pad_left) + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); // U_h[indg] = fmax(0.0, fmin(i*dS, fmin(j*dS,k*dS)) - K); U_h[indg] = fmax(0.0, i*dS-K); u_h[indg] = U_h[indg]; } } } if (prec==0) { cudaSafeCall(cudaMemcpy(u1_d,u_h, prod*sizeof(float) ,cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(u2_d,u_h, prod*sizeof(float) ,cudaMemcpyHostToDevice)); } else { cudaSafeCall(cudaMemcpy(U1_d,U_h, prod*sizeof(double),cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(U2_d,U_h, prod*sizeof(double),cudaMemcpyHostToDevice)); } // now do main computation int BLOCK_X = 64; int BLOCK_Y = 4; int bc_threads = BLOCK_X*BLOCK_Y; int bc_blocks = 1 + (NX*NY + NY*NZ + NZ*NX - 1) / bc_threads; int bx = 1 + (NX-1)/BLOCK_X; int by = 1 + (NY-1)/BLOCK_Y; if (pass==2) { BLOCK_X = 32; BLOCK_Y = 8; bx = 1 + (NX/2-1)/(BLOCK_X-2); by = 1 + (NY-1)/BLOCK_Y; } dim3 threads(BLOCK_X,BLOCK_Y); dim3 blocks(bx,by); cudaEventRecord(start); for (int n=1; n<=N; n++) { if (prec==0) { BS_bc1<32,32><<<bc_blocks, bc_threads>>>(NX,NY,NZ, u1_d); if (pass==0) BS_explicit1<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==1) BS_explicit2<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); else if (pass==2) BS_explicit3<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, (float2*)(u1_d), (float2*)(u2_d)); else if (pass==3) { BS_implicit2_rhs<32,32><<<blocks, threads>>>(NX,NY,NZ, ds, c1,c1,c1, c2,c2,c2, c3, 0.0f,0.0f,0.0f, u1_d, u2_d); BS_implicit2_y<32,32><<<NZ, dim3(COLS,32)>>>(NX,NY,NZ, ds, c1,c2,c3, u2_d); BS_implicit2_z<32,32><<<NY, dim3(COLS,32)>>>(NX,NY,NZ, ds, c1,c2,c3, u2_d); BS_implicit2_x<32,32><<<NZ, dim3(32,4)>>>(NX,NY,NZ, ds, c1,c2,c3, u1_d, u2_d); } if (pass<3) {foo_d=u1_d; u1_d=u2_d; u2_d=foo_d;} // swap u1, u2 pointers } else { BS_bc1<32,32><<<bc_blocks, bc_threads>>>(NX,NY,NZ, U1_d); if (pass==0) BS_explicit1<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==1) BS_explicit2<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); else if (pass==2) BS_explicit3<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, (double2*)(U1_d), (double2*)(U2_d)); else if (pass==3) { BS_implicit2_rhs<32,32><<<blocks, threads>>>(NX,NY,NZ, dS, C1,C1,C1, C2,C2,C2, C3, 0.0,0.0,0.0, U1_d, U2_d); BS_implicit2_y<32,32><<<NZ, dim3(COLS,32)>>>(NX,NY,NZ, dS, C1,C2,C3, U2_d); BS_implicit2_z<32,32><<<NY, dim3(COLS,32)>>>(NX,NY,NZ, dS, C1,C2,C3, U2_d); BS_implicit2_x<32,32><<<NZ, dim3(32,4)>>>(NX,NY,NZ, dS, C1,C2,C3, U1_d, U2_d); } if (pass<3) {Foo_d=U1_d; U1_d=U2_d; U2_d=Foo_d;} // swap U1, U2 pointers } } cudaSafeCall(cudaEventRecord(stop)); cudaSafeCall(cudaEventSynchronize(stop)); cudaSafeCall(cudaEventElapsedTime(&milli, start, stop)); // imid = (NX/2+1) + (NY/2+1)*(NX+2) + (NZ/2+1)*(NX+2)*(NY+2); imid = (NX/2+pad_left) + (NY/2+1)*(NX+pad_total) + (NZ/2+1)*(NX+pad_total)*(NY+2); if (prec==0) { cudaSafeCall(cudaMemcpy(u_h,u1_d,prod*sizeof(float), cudaMemcpyDeviceToHost)); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); // if (i==NX/2 && k==NX/2) printf(" %d %f \n",j,u_h[ind]-u_h[imid]); } } if (err > 1e-2) printf(" %d %f \n",i,err); } val = u_h[imid]; } else { cudaSafeCall(cudaMemcpy(U_h,U1_d,prod*sizeof(double), cudaMemcpyDeviceToHost)); for (int i=0; i<NX; i++) { val = u_h[i+pad_left+(NX+pad_total)+(NX+pad_total)*(NY+2)]; err = 0.0; for (int j=0; j<NY; j++) { for (int k=0; k<NZ; k++) { int ind = i+pad_left + (j+1)*(NX+pad_total) + (k+1)*(NX+pad_total)*(NY+2); err = fmax(err,fabs(val-u_h[ind])); } } if (err > 1e-8) printf(" %d %f \n",i,err); } val = U_h[imid]; } if (pass<3) printf("explicit%d %9.0f %38.6f \n",pass+1,milli,val); else printf("implicit%d %9.0f %38.6f \n",pass-1,milli,val); } } // CUDA exit -- needed to flush printf write buffer cudaSafeCall(cudaThreadSynchronize()); cudaSafeCall(cudaDeviceReset()); return 0; }
462a6d68c6c41c3272f3bd254d7cd4a89eeeb7cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/cuda/include/hl_base.h" #include "paddle/function/RowConvOp.h" namespace paddle { template <int BLOCK_H, int BLOCK_W> __global__ void KeRowConv(real* y, const real* x, const real* w, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sw[BLOCK_H][BLOCK_W]; for (int i = tidy; i < context; i += blky) { sw[i][tidx] = gidx + tidx < width ? w[i * width + gidx + tidx] : 0.0; } __syncthreads(); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { real sum = 0; int off = (start + j) * width; for (int t = 0; t < context; ++t) { if ((start + j + t) < end) { int xoff = off + t * width; real xVal = gidx + tidx < width ? x[xoff + gidx + tidx] : 0.0; sum += sw[t][tidx] * xVal; } } if (gidx + tidx < width) { y[off + gidx + tidx] += sum; } } } } __global__ void KeRowConv2(real* y, const real* x, const real* w, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { int off = (start + j) * width; real sum = 0; for (int t = 0; t < context && (start + j + t) < end; ++t) { int xoff = off + t * width; real xd = gidx + tidx < width ? x[xoff + gidx + tidx] : 0.0; real wd = gidx + tidx < width ? w[t * width + gidx + tidx] : 0.0; sum += wd * xd; } if (gidx + tidx < width) { y[off + gidx + tidx] += sum; } } } } template <> void RowConv<DEVICE_TYPE_GPU>(GpuMatrix& out, // NOLINT const GpuMatrix& in, const GpuMatrix& filter, const GpuIVector& seq) { const size_t numSeq = seq.getSize() - 1; const size_t contextLength = filter.getHeight(); const size_t height = in.getHeight(); const size_t width = in.getWidth(); real* y = out.getData(); const real* x = in.getData(); const real* w = filter.getData(); const int* starts = seq.getData(); dim3 dimBlock(32, 32); dim3 dimGrid(DIVUP(width, dimBlock.x), 1); if (contextLength <= 32) { hipLaunchKernelGGL(( KeRowConv<32, 32>), dim3(dimGrid), dim3(dimBlock), 0, STREAM_DEFAULT, y, x, w, starts, height, width, numSeq, contextLength); } else { hipLaunchKernelGGL(( KeRowConv2), dim3(dimGrid), dim3(dimBlock), 0, STREAM_DEFAULT, y, x, w, starts, height, width, numSeq, contextLength); } CHECK_SYNC("RowConv"); } template <int BLOCK_H, int BLOCK_W, int CONTEXT> __global__ void KeRowConvBwWeight(real* dw, const real* x, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sh_x[BLOCK_W][BLOCK_H]; __shared__ real sh_dy[BLOCK_W][BLOCK_H + CONTEXT - 1]; __shared__ real sh_dw[CONTEXT][BLOCK_W]; if (tidy < context) { sh_dw[tidy][tidx] = 0.0; } __syncthreads(); // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; const int size = ((steps + BLOCK_H - 1) / BLOCK_H) * BLOCK_H; for (int j = tidy; j < size; j += BLOCK_H) { int xoff = gidx + tidx; int yoff = start + j; // transpose sh_x[tidx][tidy] = (xoff < width && yoff < end) ? x[yoff * width + xoff] : 0.0; sh_dy[tidx][tidy + context - 1] = (xoff < width && yoff < end) ? dy[yoff * width + xoff] : 0.0; __syncthreads(); if (tidy < (context - 1)) { yoff = yoff - context + 1; sh_dy[tidx][tidy] = (xoff < width && yoff >= start) ? dy[yoff * width + xoff] : 0.0; } __syncthreads(); for (int t = 0; t < context; t++) { real val = sh_x[tidy][tidx] * sh_dy[tidy][tidx + context - 1 - t]; __syncthreads(); // warp size and blockDim.x is 32. for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(mask, val, offset); __syncthreads(); if (tidx == 0) { sh_dw[t][tidy] += val; } __syncthreads(); } } } for (int t = tidy; (t < context) && ((gidx + tidx) < width); t += blky) { dw[t * width + gidx + tidx] += sh_dw[t][tidx]; } } template <int BLOCK_H, int BLOCK_W> __global__ void KeRowConvBwWeight2(real* dw, const real* x, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sh_x[BLOCK_H][BLOCK_W]; __shared__ real sh_dy[BLOCK_H][BLOCK_W]; // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; const int size = ((steps + BLOCK_H - 1) / BLOCK_H) * BLOCK_H; for (int j = tidy; j < size; j += BLOCK_H) { int xoff = gidx + tidx; int yoff = start + j; // transpose sh_x[tidx][tidy] = (xoff < width && yoff < end) ? x[yoff * width + xoff] : 0.0; __syncthreads(); for (int t = 0; t < context; t++) { sh_dy[tidx][tidy] = (xoff < width && (yoff - t) >= start && yoff - t < end) ? dy[(yoff - t) * width + xoff] : 0.0; __syncthreads(); real val = sh_x[tidy][tidx] * sh_dy[tidy][tidx]; __syncthreads(); // warp size and blockDim.x is 32. for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(mask, val, offset); __syncthreads(); if (tidx == 0 && (gidx + tidy) < width) { dw[t * width + gidx + tidy] += val; } } } } } template <int BLOCK_H, int BLOCK_W> __global__ void KeRowConvBwData(real* dx, const real* w, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sw[BLOCK_H][BLOCK_W]; for (int i = tidy; i < context; i += blky) { sw[i][tidx] = gidx + tidx < width ? w[i * width + gidx + tidx] : 0.0; } __syncthreads(); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { real sum = 0; int off = (start + j) * width; for (int t = 0; t < context && (j - t) >= 0; ++t) { int dyOff = off - t * width; real dyVal = gidx + tidx < width ? dy[dyOff + gidx + tidx] : 0.0; sum += sw[t][tidx] * dyVal; } if (gidx + tidx < width) { dx[off + gidx + tidx] += sum; } } } } __global__ void KeRowConvBwData2(real* dx, const real* w, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { real sum = 0; int off = (start + j) * width; for (int t = 0; t < context && (j - t) >= 0; ++t) { int dyOff = off - t * width; real dyVal = gidx + tidx < width ? dy[dyOff + gidx + tidx] : 0.0; real wVal = gidx + tidx < width ? w[t * width + gidx + tidx] : 0.0; sum += wVal * dyVal; } if (gidx + tidx < width) { dx[off + gidx + tidx] += sum; } } } } template <> void RowConvGrad<DEVICE_TYPE_GPU>(const GpuMatrix& outG, const GpuMatrix& in, const GpuMatrix& filter, GpuMatrix& inG, // NOLINT GpuMatrix& filterG, // NOLINT const GpuIVector& seq) { const size_t numSeq = seq.getSize() - 1; const size_t contextLength = filter.getHeight(); const size_t height = in.getHeight(); const size_t width = in.getWidth(); const real* dy = outG.getData(); const real* x = in.getData(); const real* w = filter.getData(); const int* starts = seq.getData(); if (filterG) { dim3 dimBlock(32, 32); dim3 dimGrid(DIVUP(width, dimBlock.x), 1); real* dw = filterG.getData(); if (contextLength <= 32) { hipLaunchKernelGGL(( KeRowConvBwWeight<32, 32, 32>), dim3(dimGrid), dim3(dimBlock), 0, STREAM_DEFAULT, dw, x, dy, starts, height, width, numSeq, contextLength); } else { hipLaunchKernelGGL(( KeRowConvBwWeight2<32, 32>), dim3(dimGrid), dim3(dimBlock), 0, STREAM_DEFAULT, dw, x, dy, starts, height, width, numSeq, contextLength); } } if (inG) { real* dx = inG.getData(); dim3 dimBlock2(32, 32); dim3 dimGrid2(DIVUP(width, dimBlock2.x), 1); if (contextLength <= 64) { hipLaunchKernelGGL(( KeRowConvBwData<32, 64>), dim3(dimGrid2), dim3(dimBlock2), 0, STREAM_DEFAULT, dx, w, dy, starts, height, width, numSeq, contextLength); } else { hipLaunchKernelGGL(( KeRowConvBwData2), dim3(dimGrid2), dim3(dimBlock2), 0, STREAM_DEFAULT, dx, w, dy, starts, height, width, numSeq, contextLength); } } CHECK_SYNC("RowConvGrad"); } } // namespace paddle
462a6d68c6c41c3272f3bd254d7cd4a89eeeb7cc.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/cuda/include/hl_base.h" #include "paddle/function/RowConvOp.h" namespace paddle { template <int BLOCK_H, int BLOCK_W> __global__ void KeRowConv(real* y, const real* x, const real* w, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sw[BLOCK_H][BLOCK_W]; for (int i = tidy; i < context; i += blky) { sw[i][tidx] = gidx + tidx < width ? w[i * width + gidx + tidx] : 0.0; } __syncthreads(); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { real sum = 0; int off = (start + j) * width; for (int t = 0; t < context; ++t) { if ((start + j + t) < end) { int xoff = off + t * width; real xVal = gidx + tidx < width ? x[xoff + gidx + tidx] : 0.0; sum += sw[t][tidx] * xVal; } } if (gidx + tidx < width) { y[off + gidx + tidx] += sum; } } } } __global__ void KeRowConv2(real* y, const real* x, const real* w, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { int off = (start + j) * width; real sum = 0; for (int t = 0; t < context && (start + j + t) < end; ++t) { int xoff = off + t * width; real xd = gidx + tidx < width ? x[xoff + gidx + tidx] : 0.0; real wd = gidx + tidx < width ? w[t * width + gidx + tidx] : 0.0; sum += wd * xd; } if (gidx + tidx < width) { y[off + gidx + tidx] += sum; } } } } template <> void RowConv<DEVICE_TYPE_GPU>(GpuMatrix& out, // NOLINT const GpuMatrix& in, const GpuMatrix& filter, const GpuIVector& seq) { const size_t numSeq = seq.getSize() - 1; const size_t contextLength = filter.getHeight(); const size_t height = in.getHeight(); const size_t width = in.getWidth(); real* y = out.getData(); const real* x = in.getData(); const real* w = filter.getData(); const int* starts = seq.getData(); dim3 dimBlock(32, 32); dim3 dimGrid(DIVUP(width, dimBlock.x), 1); if (contextLength <= 32) { KeRowConv<32, 32><<<dimGrid, dimBlock, 0, STREAM_DEFAULT>>>( y, x, w, starts, height, width, numSeq, contextLength); } else { KeRowConv2<<<dimGrid, dimBlock, 0, STREAM_DEFAULT>>>( y, x, w, starts, height, width, numSeq, contextLength); } CHECK_SYNC("RowConv"); } template <int BLOCK_H, int BLOCK_W, int CONTEXT> __global__ void KeRowConvBwWeight(real* dw, const real* x, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sh_x[BLOCK_W][BLOCK_H]; __shared__ real sh_dy[BLOCK_W][BLOCK_H + CONTEXT - 1]; __shared__ real sh_dw[CONTEXT][BLOCK_W]; if (tidy < context) { sh_dw[tidy][tidx] = 0.0; } __syncthreads(); // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; const int size = ((steps + BLOCK_H - 1) / BLOCK_H) * BLOCK_H; for (int j = tidy; j < size; j += BLOCK_H) { int xoff = gidx + tidx; int yoff = start + j; // transpose sh_x[tidx][tidy] = (xoff < width && yoff < end) ? x[yoff * width + xoff] : 0.0; sh_dy[tidx][tidy + context - 1] = (xoff < width && yoff < end) ? dy[yoff * width + xoff] : 0.0; __syncthreads(); if (tidy < (context - 1)) { yoff = yoff - context + 1; sh_dy[tidx][tidy] = (xoff < width && yoff >= start) ? dy[yoff * width + xoff] : 0.0; } __syncthreads(); for (int t = 0; t < context; t++) { real val = sh_x[tidy][tidx] * sh_dy[tidy][tidx + context - 1 - t]; __syncthreads(); // warp size and blockDim.x is 32. for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(mask, val, offset); __syncthreads(); if (tidx == 0) { sh_dw[t][tidy] += val; } __syncthreads(); } } } for (int t = tidy; (t < context) && ((gidx + tidx) < width); t += blky) { dw[t * width + gidx + tidx] += sh_dw[t][tidx]; } } template <int BLOCK_H, int BLOCK_W> __global__ void KeRowConvBwWeight2(real* dw, const real* x, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sh_x[BLOCK_H][BLOCK_W]; __shared__ real sh_dy[BLOCK_H][BLOCK_W]; // NOTE(zcd): temporary solution unsigned mask = 0u; CREATE_SHFL_MASK(mask, true); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; const int size = ((steps + BLOCK_H - 1) / BLOCK_H) * BLOCK_H; for (int j = tidy; j < size; j += BLOCK_H) { int xoff = gidx + tidx; int yoff = start + j; // transpose sh_x[tidx][tidy] = (xoff < width && yoff < end) ? x[yoff * width + xoff] : 0.0; __syncthreads(); for (int t = 0; t < context; t++) { sh_dy[tidx][tidy] = (xoff < width && (yoff - t) >= start && yoff - t < end) ? dy[(yoff - t) * width + xoff] : 0.0; __syncthreads(); real val = sh_x[tidy][tidx] * sh_dy[tidy][tidx]; __syncthreads(); // warp size and blockDim.x is 32. for (int offset = 16; offset > 0; offset /= 2) val += __shfl_down_sync(mask, val, offset); __syncthreads(); if (tidx == 0 && (gidx + tidy) < width) { dw[t * width + gidx + tidy] += val; } } } } } template <int BLOCK_H, int BLOCK_W> __global__ void KeRowConvBwData(real* dx, const real* w, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; __shared__ real sw[BLOCK_H][BLOCK_W]; for (int i = tidy; i < context; i += blky) { sw[i][tidx] = gidx + tidx < width ? w[i * width + gidx + tidx] : 0.0; } __syncthreads(); for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { real sum = 0; int off = (start + j) * width; for (int t = 0; t < context && (j - t) >= 0; ++t) { int dyOff = off - t * width; real dyVal = gidx + tidx < width ? dy[dyOff + gidx + tidx] : 0.0; sum += sw[t][tidx] * dyVal; } if (gidx + tidx < width) { dx[off + gidx + tidx] += sum; } } } } __global__ void KeRowConvBwData2(real* dx, const real* w, const real* dy, const int* starts, const int height, const int width, const int numSeq, const int context) { const int tidx = threadIdx.x; const int tidy = threadIdx.y; const int blky = blockDim.y; const int gidx = blockIdx.x * blockDim.x; for (int i = 0; i < numSeq; ++i) { const int start = starts[i]; const int end = starts[i + 1]; const int steps = end - start; for (int j = tidy; j < steps; j += blky) { real sum = 0; int off = (start + j) * width; for (int t = 0; t < context && (j - t) >= 0; ++t) { int dyOff = off - t * width; real dyVal = gidx + tidx < width ? dy[dyOff + gidx + tidx] : 0.0; real wVal = gidx + tidx < width ? w[t * width + gidx + tidx] : 0.0; sum += wVal * dyVal; } if (gidx + tidx < width) { dx[off + gidx + tidx] += sum; } } } } template <> void RowConvGrad<DEVICE_TYPE_GPU>(const GpuMatrix& outG, const GpuMatrix& in, const GpuMatrix& filter, GpuMatrix& inG, // NOLINT GpuMatrix& filterG, // NOLINT const GpuIVector& seq) { const size_t numSeq = seq.getSize() - 1; const size_t contextLength = filter.getHeight(); const size_t height = in.getHeight(); const size_t width = in.getWidth(); const real* dy = outG.getData(); const real* x = in.getData(); const real* w = filter.getData(); const int* starts = seq.getData(); if (filterG) { dim3 dimBlock(32, 32); dim3 dimGrid(DIVUP(width, dimBlock.x), 1); real* dw = filterG.getData(); if (contextLength <= 32) { KeRowConvBwWeight<32, 32, 32><<<dimGrid, dimBlock, 0, STREAM_DEFAULT>>>( dw, x, dy, starts, height, width, numSeq, contextLength); } else { KeRowConvBwWeight2<32, 32><<<dimGrid, dimBlock, 0, STREAM_DEFAULT>>>( dw, x, dy, starts, height, width, numSeq, contextLength); } } if (inG) { real* dx = inG.getData(); dim3 dimBlock2(32, 32); dim3 dimGrid2(DIVUP(width, dimBlock2.x), 1); if (contextLength <= 64) { KeRowConvBwData<32, 64><<<dimGrid2, dimBlock2, 0, STREAM_DEFAULT>>>( dx, w, dy, starts, height, width, numSeq, contextLength); } else { KeRowConvBwData2<<<dimGrid2, dimBlock2, 0, STREAM_DEFAULT>>>( dx, w, dy, starts, height, width, numSeq, contextLength); } } CHECK_SYNC("RowConvGrad"); } } // namespace paddle
421374cbbe0c6a01e8d4eef6d6c7fdd823d5ab59.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2015-2022 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "../common/transform.h" #include "../common/common.h" #include "../common/threading_utils.h" #include "./regression_loss.h" namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } struct ObjInfo Task() const override { return Loss::Info(); } uint32_t Targets(MetaInfo const& info) const override { // Multi-target regression. return ::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels.Size()) << " " << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels.Size() << ", " << "Loss: " << Loss::Name(); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), info.labels.Shape(0)) << "Number of weights should be equal to number of data points."; } auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = ::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = ::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = ::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(PseudoHuberError, PseudoHuberError::Name()) .describe("Regression Pseudo Huber error.") .set_body([]() { return new RegLossObj<PseudoHuberError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += ::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = ::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = ::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ ::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return ::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); } // namespace obj } // namespace xgboost
421374cbbe0c6a01e8d4eef6d6c7fdd823d5ab59.cu
/*! * Copyright 2015-2022 by XGBoost Contributors * \file regression_obj.cu * \brief Definition of single-value regression and classification objectives. * \author Tianqi Chen, Kailong Chen */ #include <dmlc/omp.h> #include <xgboost/logging.h> #include <xgboost/objective.h> #include <cmath> #include <memory> #include <vector> #include "xgboost/host_device_vector.h" #include "xgboost/json.h" #include "xgboost/parameter.h" #include "xgboost/span.h" #include "../common/transform.h" #include "../common/common.h" #include "../common/threading_utils.h" #include "./regression_loss.h" namespace xgboost { namespace obj { #if defined(XGBOOST_USE_CUDA) DMLC_REGISTRY_FILE_TAG(regression_obj_gpu); #endif // defined(XGBOOST_USE_CUDA) struct RegLossParam : public XGBoostParameter<RegLossParam> { float scale_pos_weight; // declare parameters DMLC_DECLARE_PARAMETER(RegLossParam) { DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f) .describe("Scale the weight of positive examples by this factor"); } }; template<typename Loss> class RegLossObj : public ObjFunction { protected: HostDeviceVector<float> additional_input_; public: // 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight RegLossObj(): additional_input_(3) {} void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } struct ObjInfo Task() const override { return Loss::Info(); } uint32_t Targets(MetaInfo const& info) const override { // Multi-target regression. return std::max(static_cast<size_t>(1), info.labels.Shape(1)); } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair>* out_gpair) override { CHECK_EQ(preds.Size(), info.labels.Size()) << " " << "labels are not correctly provided" << "preds.size=" << preds.Size() << ", label.size=" << info.labels.Size() << ", " << "Loss: " << Loss::Name(); size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), info.labels.Shape(0)) << "Number of weights should be equal to number of data points."; } auto scale_pos_weight = param_.scale_pos_weight; additional_input_.HostVector().begin()[1] = scale_pos_weight; additional_input_.HostVector().begin()[2] = is_null_weight; const size_t nthreads = ctx_->Threads(); bool on_device = device >= 0; // On CPU we run the transformation each thread processing a contigious block of data // for better performance. const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads)); const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks); auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1)); common::Transform<>::Init( [block_size, ndata, n_targets] XGBOOST_DEVICE( size_t data_block_idx, common::Span<float> _additional_input, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { const bst_float* preds_ptr = _preds.data(); const bst_float* labels_ptr = _labels.data(); const bst_float* weights_ptr = _weights.data(); GradientPair* out_gpair_ptr = _out_gpair.data(); const size_t begin = data_block_idx*block_size; const size_t end = std::min(ndata, begin + block_size); const float _scale_pos_weight = _additional_input[1]; const bool _is_null_weight = _additional_input[2]; for (size_t idx = begin; idx < end; ++idx) { bst_float p = Loss::PredTransform(preds_ptr[idx]); bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets]; bst_float label = labels_ptr[idx]; if (label == 1.0f) { w *= _scale_pos_weight; } if (!Loss::CheckLabel(label)) { // If there is an incorrect label, the host code will know. _additional_input[0] = 0; } out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w, Loss::SecondOrderGradient(p, label) * w); } }, common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device) .Eval(&additional_input_, out_gpair, &preds, info.labels.Data(), &info.weights_); auto const flag = additional_input_.HostVector().begin()[0]; if (flag == 0) { LOG(FATAL) << Loss::LabelErrorMsg(); } } public: const char* DefaultEvalMetric() const override { return Loss::DefaultEvalMetric(); } void PredTransform(HostDeviceVector<float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) { _preds[_idx] = Loss::PredTransform(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } float ProbToMargin(float base_score) const override { return Loss::ProbToMargin(base_score); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String(Loss::Name()); out["reg_loss_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["reg_loss_param"], &param_); } protected: RegLossParam param_; }; // register the objective functions DMLC_REGISTER_PARAMETER(RegLossParam); XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name()) .describe("Regression with squared error.") .set_body([]() { return new RegLossObj<LinearSquareLoss>(); }); XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name()) .describe("Regression with root mean squared logarithmic error.") .set_body([]() { return new RegLossObj<SquaredLogError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name()) .describe("Logistic regression for probability regression task.") .set_body([]() { return new RegLossObj<LogisticRegression>(); }); XGBOOST_REGISTER_OBJECTIVE(PseudoHuberError, PseudoHuberError::Name()) .describe("Regression Pseudo Huber error.") .set_body([]() { return new RegLossObj<PseudoHuberError>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name()) .describe("Logistic regression for binary classification task.") .set_body([]() { return new RegLossObj<LogisticClassification>(); }); XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name()) .describe("Logistic regression for classification, output score " "before logistic transformation.") .set_body([]() { return new RegLossObj<LogisticRaw>(); }); // Deprecated functions XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear") .describe("Regression with squared error.") .set_body([]() { LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror."; return new RegLossObj<LinearSquareLoss>(); }); // End deprecated // declare parameter struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> { float max_delta_step; DMLC_DECLARE_PARAMETER(PoissonRegressionParam) { DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f) .describe("Maximum delta step we allow each weight estimation to be." \ " This parameter is required for possion regression."); } }; // poisson regression for count class PoissonRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); } struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; size_t const ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } bst_float max_delta_step = param_.max_delta_step; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair{(expf(p) - y) * w, expf(p + max_delta_step) * w}; }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "PoissonRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "poisson-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("count:poisson"); out["poisson_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["poisson_regression_param"], &param_); } private: PoissonRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(PoissonRegressionParam); XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson") .describe("Poisson regression for count data.") .set_body([]() { return new PoissonRegression(); }); // cox regression for survival data (negative values mean they are censored) class CoxRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const auto& preds_h = preds.HostVector(); out_gpair->Resize(preds_h.size()); auto& gpair = out_gpair->HostVector(); const std::vector<size_t> &label_order = info.LabelAbsSort(); const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*) const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } // pre-compute a sum double exp_p_sum = 0; // we use double because we might need the precision with large datasets for (omp_ulong i = 0; i < ndata; ++i) { exp_p_sum += std::exp(preds_h[label_order[i]]); } // start calculating grad and hess const auto& labels = info.labels.HostView(); double r_k = 0; double s_k = 0; double last_exp_p = 0.0; double last_abs_y = 0.0; double accumulated_sum = 0; for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*) const size_t ind = label_order[i]; const double p = preds_h[ind]; const double exp_p = std::exp(p); const double w = info.GetWeight(ind); const double y = labels(ind); const double abs_y = std::abs(y); // only update the denominator after we move forward in time (labels are sorted) // this is Breslow's method for ties accumulated_sum += last_exp_p; if (last_abs_y < abs_y) { exp_p_sum -= accumulated_sum; accumulated_sum = 0; } else { CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " << "MetaInfo::LabelArgsort failed!"; } if (y > 0) { r_k += 1.0/exp_p_sum; s_k += 1.0/(exp_p_sum*exp_p_sum); } const double grad = exp_p*r_k - static_cast<bst_float>(y > 0); const double hess = exp_p*r_k - exp_p*exp_p * s_k; gpair.at(ind) = GradientPair(grad * w, hess * w); last_abs_y = abs_y; last_exp_p = exp_p; } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { std::vector<bst_float> &preds = io_preds->HostVector(); const long ndata = static_cast<long>(preds.size()); // NOLINT(*) common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*) preds[j] = std::exp(preds[j]); }); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "cox-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("survival:cox"); } void LoadConfig(Json const&) override {} }; // register the objective function XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox") .describe("Cox regression for censored survival data (negative labels are considered censored).") .set_body([]() { return new CoxRegression(); }); // gamma regression class GammaRegression : public ObjFunction { public: void Configure( const std::vector<std::pair<std::string, std::string> >&) override {} struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float> &preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); auto device = ctx_->gpu_id; out_gpair->Resize(ndata); label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y <= 0.0f) { _label_correct[0] = 0; } _out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w); }, common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval( &label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "GammaRegression: label must be positive."; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } void EvalTransform(HostDeviceVector<bst_float> *io_preds) override { PredTransform(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return "gamma-nloglik"; } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:gamma"); } void LoadConfig(Json const&) override {} private: HostDeviceVector<int> label_correct_; }; // register the objective functions XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma") .describe("Gamma regression for severity data.") .set_body([]() { return new GammaRegression(); }); // declare parameter struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> { float tweedie_variance_power; DMLC_DECLARE_PARAMETER(TweedieRegressionParam) { DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f) .describe("Tweedie variance power. Must be between in range [1, 2)."); } }; // tweedie regression class TweedieRegression : public ObjFunction { public: // declare functions void Configure(const std::vector<std::pair<std::string, std::string> >& args) override { param_.UpdateAllowUnknown(args); std::ostringstream os; os << "tweedie-nloglik@" << param_.tweedie_variance_power; metric_ = os.str(); } struct ObjInfo Task() const override { return {ObjInfo::kRegression, false}; } void GetGradient(const HostDeviceVector<bst_float>& preds, const MetaInfo &info, int, HostDeviceVector<GradientPair> *out_gpair) override { CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty"; CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided"; const size_t ndata = preds.Size(); out_gpair->Resize(ndata); auto device = ctx_->gpu_id; label_correct_.Resize(1); label_correct_.Fill(1); const bool is_null_weight = info.weights_.Size() == 0; if (!is_null_weight) { CHECK_EQ(info.weights_.Size(), ndata) << "Number of weights should be equal to number of data points."; } const float rho = param_.tweedie_variance_power; common::Transform<>::Init( [=] XGBOOST_DEVICE(size_t _idx, common::Span<int> _label_correct, common::Span<GradientPair> _out_gpair, common::Span<const bst_float> _preds, common::Span<const bst_float> _labels, common::Span<const bst_float> _weights) { bst_float p = _preds[_idx]; bst_float w = is_null_weight ? 1.0f : _weights[_idx]; bst_float y = _labels[_idx]; if (y < 0.0f) { _label_correct[0] = 0; } bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p); bst_float hess = -y * (1 - rho) * \ std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p); _out_gpair[_idx] = GradientPair(grad * w, hess * w); }, common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device) .Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_); // copy "label correct" flags back to host std::vector<int>& label_correct_h = label_correct_.HostVector(); for (auto const flag : label_correct_h) { if (flag == 0) { LOG(FATAL) << "TweedieRegression: label must be nonnegative"; } } } void PredTransform(HostDeviceVector<bst_float> *io_preds) const override { common::Transform<>::Init( [] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) { _preds[_idx] = expf(_preds[_idx]); }, common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(), io_preds->DeviceIdx()) .Eval(io_preds); } bst_float ProbToMargin(bst_float base_score) const override { return std::log(base_score); } const char* DefaultEvalMetric() const override { return metric_.c_str(); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["name"] = String("reg:tweedie"); out["tweedie_regression_param"] = ToJson(param_); } void LoadConfig(Json const& in) override { FromJson(in["tweedie_regression_param"], &param_); } private: std::string metric_; TweedieRegressionParam param_; HostDeviceVector<int> label_correct_; }; // register the objective functions DMLC_REGISTER_PARAMETER(TweedieRegressionParam); XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie") .describe("Tweedie regression for insurance data.") .set_body([]() { return new TweedieRegression(); }); } // namespace obj } // namespace xgboost
13a033aac912318b3f1ec6c19168d5cd9f542f2f.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <map> #include <numeric> #include <random> #include <sys/time.h> #include <valarray> #include <hip/hip_runtime_api.h> #include "range.hpp" #include "utils.hpp" #define blockSize 128 // Power of 2 ? #define BLOCK_SIZE 1024 //Thread Block size __global__ void decompress_kernel(int devX, int devY ){ int i = threadIdx.x + blockDim.x*blockIdx.x; } long getVals(char * t , int base_size) { long a ; if(base_size == 16) a = *((int16_t *)t) ; else if(base_size == 32) a = *((int32_t*)t) ; else a = *((long*)t) ; return a ; } void initArray(int size , int16_t *isCompressed) { int i ; for ( i =0 ; i < size ; i++) { isCompressed[i] = 0 ; } } int bdCompress(char* c , int len , char * compressed , int16_t * isCompressed , long * baseVals) { int bytesCopied = 0 ; int blkCounter = 0 ; int numBlocks = ((len - 1/blockSize)) + 1 ; // ceiling int i ; int size_index ; int size_array[3] ; size_array[0] = 16 ; size_array[1] = 32 ; size_array[2] = 64 ; int offset = 0 ; // long *ptrArray[numPtrs] ; if ( offset + blockSize > len) { baseVals[blkCounter] = 0 ; isCompressed[blkCounter] = offset - len ; memcpy(&compressed[bytesCopied] , &c[offset] , len - offset) ; bytesCopied += len - offset ; return bytesCopied ; } while(offset + blockSize <= len) // Don't want to compress if length remaining is less than block { // Assume each value is 4 bytes , long , can try to vary this later. // Try to compress it to unsigned int_8 (0-255) // Get the minimum value as base so unsigned int 8 can be used for deltas // If value ranges are more than max value of unsigned int 8 , try to convert to unsigned int. // If ranges are more than unsigned int , don't compress this block , move onto next block. // bool used = false ; // int minBytesUsed = -1 ; int minCompressed ; int minDivision ; long minVal ; for ( size_index = 0 ; size_index < 3 ; size_index++) { int base_size = size_array[size_index] ; int numPtrs = (blockSize*8)/(size_array[size_index]) ; long ptrArray [numPtrs] ; char local_storage[blockSize] ; for ( i = 0 ; i < numPtrs ; i++) { ptrArray[i] = getVals((char*)&c[offset + i*(base_size/8)] , base_size) ; } bool flag = false ; long minValue ; for ( i =0 ; i<numPtrs ; i++) { if(flag==false) { flag = true ; minValue = ptrArray[i]; } else { if( ptrArray[i]< minValue) minValue = ptrArray[i] ; } } long range = 0 ; flag = false ; for (i =0 ; i<numPtrs ; i++) { if(flag ==false) { range = (ptrArray[i]) - minValue ; flag = true ; } else { if((ptrArray[i]) - minValue > range) range = ptrArray[i] - minValue ; // printf("Calc-Range:%ld\n",range) ; } } printf("Final rnage is %ld , Size is %d, Min Val is %d , Num ptrs is %d\n",range , size_array[size_index] , minValue,numPtrs); if((range < pow(2 , sizeof(uint8_t) * 8))) // compress into uint8 { if(minBytesUsed == -1 ){ minBytesUsed = 8*numPtrs ; minCompressed = 8 ; minDivision = base_size ; minVal = minValue ; } else if ( 8 * numPtrs < minBytesUsed) { minBytesUsed = 8 * numPtrs ; minCompressed = 8 ; minDivision = base_size ; minVal = minValue ; printf("8: Range : %ld , minValue : %ld , min_div:%d , numPtrs:%d\n" , range , minValue,base_size,numPtrs) ; } } else if ((range < pow(2,sizeof(uint16_t)*8)) && (base_size > 16)) { if(minBytesUsed == -1 ){ minBytesUsed = 16 *numPtrs ; minCompressed = 16 ; minDivision = base_size ; minVal = minValue ; } else if ( 16 * numPtrs < minBytesUsed) { minBytesUsed = 16 * numPtrs ; minCompressed = 16 ; minDivision = base_size ; minVal = minValue ; printf("16 : Range : %ld, minValue : %ld , min_div:%d , numPtrs:%d\n" , range , minValue,base_size,numPtrs) ; } } else if ( (range < pow ( 2 , sizeof(uint32_t)*8)) && (base_size > 32)) { if(minBytesUsed == -1 ){ minBytesUsed = 32 *numPtrs ; minCompressed = 32 ; minDivision = base_size ; minVal = minValue ; } else if ( 32 * numPtrs < minBytesUsed) { minBytesUsed = 32 * numPtrs ; minCompressed = 32 ; minDivision = base_size ; minVal = minValue ; printf("32: Range : %ld, minValue : %ld, min_div:%d , numPtrs:%d\n" , range , minValue,base_size,numPtrs) ; } } printf("Bytes used so far :%d\n" , minBytesUsed) ; } if ( minBytesUsed >= 0 ) { int i ; int numPtrs = (blockSize*8/minDivision) ; long ptrArray[numPtrs] ; int div_off , compressed_off ; for (i =0 ; i < numPtrs ; i++) { ptrArray[i] = getVals(&c[offset + (i*minDivision/8)] , minDivision) ; div_off = 3 ; if ( minDivision == 16) { // ptrArray[i] = (int16_t *)(&c[offset + i*minDivision]) ; div_off = 1 ; } else if (minDivision == 32) { // ptrArray[i] = (int32_t*) (&c[offset + i*minDivision]) ; div_off = 2 ; } if ( minCompressed == 32) { uint32_t a = ptrArray[i] - minVal ; memcpy(&compressed[bytesCopied] , &a , sizeof(uint32_t)) ; compressed_off = 3 ; } else if ( minCompressed == 16) { uint16_t a = ptrArray[i] - minVal ; memcpy(&compressed[bytesCopied] , &a , sizeof(uint16_t)) ; compressed_off = 2 ; } else if ( minCompressed == 8) { uint8_t a = (ptrArray[i]) - minVal ; memcpy(&compressed[bytesCopied] , &a , sizeof(uint8_t)) ; compressed_off = 1 ; } bytesCopied += (minCompressed/8) ; } isCompressed[blkCounter] = 4*div_off + compressed_off ; baseVals[blkCounter] = minVal ; } else { baseVals[blkCounter] = minVal ; isCompressed[blkCounter] = 0 ; memcpy(&compressed[bytesCopied] , &c[offset] , blockSize) ; bytesCopied += blockSize ; } offset += blockSize ; blkCounter ++ ; if ( offset + blockSize > len) { baseVals[blkCounter] = 0 ; isCompressed[blkCounter] = offset - len ; memcpy(&compressed[bytesCopied] , &c[offset] , len - offset) ; bytesCopied += len - offset ; break ; } } return bytesCopied ; } int decompress ( char * compressed , char * decompressed , int bytesCopied , long *baseVals , int16_t *isCompressed , int numBlocks) { int i = 0 ; int offset_compressed = 0 ; int offset_decompressed = 0 ; for (i = 0 ; i <numBlocks ; i++) // decompress every block { if(isCompressed[i] ==0) { memcpy(&decompressed[offset_decompressed] , &compressed[offset_compressed] , blockSize) ; offset_compressed += blockSize ; offset_decompressed += blockSize ; continue ; } if (isCompressed[i] <0) { int bytes_to_copy = -1* isCompressed[i] ; memcpy(&decompressed[offset_decompressed] , &compressed[offset_compressed] , bytes_to_copy) ; offset_compressed += bytes_to_copy ; offset_decompressed += bytes_to_copy ; break ; } // If code reaches this point then actual compression has taken place int chunk_size ; int compressed_size ; if((isCompressed[i] / 4 ) ==1 ) chunk_size = 2 ; else if ((isCompressed[i]/4) ==2) chunk_size = 4 ; else chunk_size = 8 ; if(isCompressed[i] % 4==1) compressed_size = 1 ; else if (isCompressed[i]%4 ==2) compressed_size = 2 ; else compressed_size = 4 ; int numPtrs = blockSize/chunk_size ; int j ; for (j = 0 ; j < numPtrs ; ++j) { long compressed_val = getVals((char *)&compressed[offset_compressed], compressed_size * 8) ; compressed_val += baseVals[i] ; if (chunk_size ==2 ) { int16_t num = compressed_val ; memcpy(&decompressed[offset_decompressed] , &num , chunk_size) ; } else if (chunk_size ==4) { int32_t num = compressed_val ; memcpy(&decompressed[offset_decompressed] , &num , chunk_size) ; } else { long num = compressed_val ; memcpy(&decompressed[offset_decompressed] , &num , chunk_size) ; } offset_compressed += compressed_size ; offset_decompressed += chunk_size ; } } return offset_decompressed ; } int main(int argc, char **argv) { // get start time const auto start = now(); int longArraySize = 512 ; long testArray[longArraySize] ; int i ,j ; for ( i =0 ; i < longArraySize ; i++) testArray[i] = (100*i) ; int numBlocks = (((longArraySize * sizeof(long))-1)/blockSize) + 1 ; //ceiling long baseVals[numBlocks] ; int16_t isCompressed[numBlocks] ; char * compressed = (char*) malloc(100000) ; initArray(numBlocks , isCompressed) ; int bytesCopied = bdCompress((char*)testArray , longArraySize * sizeof(long) ,compressed , isCompressed , baseVals) ; printf("Length , Bytes copied : %d , %d\n" , longArraySize*sizeof(long) , bytesCopied) ; int bytesAfterCompress = numBlocks*(sizeof(long) + sizeof(int16_t)) + bytesCopied; int bytesBeforeCompress =longArraySize * sizeof(long); float compression_ratio = ((float) bytesAfterCompress)/ ((float) bytesBeforeCompress); for ( i = 0 ; i < numBlocks ; i++) { printf("Base value , compressed info , Ratio : %lu , %d\n , %f\n", baseVals[i] , isCompressed[i] , compression_ratio) ; } printf("\n") ; int numPtrs = blockSize/sizeof(long) ; for ( i = 0 ; i < numBlocks ; i++) { printf("Base value , compressed info , Ratio : %lu , %d\n , %f\n", baseVals[i] , isCompressed[i] , compression_ratio) ; } char * decompressed = (char*) malloc(100000) ; int bytes = decompress(compressed , decompressed , bytesCopied , baseVals , isCompressed , numBlocks) ; printf("Bytes after decompression : %d\n" , bytes) ; bool t = (bytes == longArraySize * sizeof(long)) && (strncmp((char*)testArray , decompressed , bytes) ==0) ; if(t) printf("Successful \n") ; // get end time const auto end = now(); // get elapsed time in milliseconds const auto elapsed = std::chrono::duration<double, std::milli>(end - start).count(); std::cout << "Compression & Decompression time = " << elapsed << " milliseconds."; // Transfer compacted to GPU // ---------------------------------------- int *devX; int *devY; check_success(hipMalloc(&devX, bytesAfterCompress)); check_success(hipMalloc(&devY, bytesBeforeCompress)); check_success(hipMemcpy(devX, , bytesAfterCompress, hipMemcpyHostToDevice)); const auto transferCPU_GPU = now(); // get elapsed time in milliseconds elapsed = std::chrono::duration<double, std::milli>(transferCPU_GPU - end).count(); std::cout << "Transfer CPU to GPU time = " << elapsed << " milliseconds."; // Decompress in GPU // ---------------------------------------- int x = ceil(bytesAfterCompress/1024.0); dim3 DimGrid(x, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); hipLaunchKernelGGL(( decompress_kernel), dim3(DimGrid), dim3(DimBlock), 0, 0, devX, devY ); hipDeviceSynchronize(); const auto Decompress_GPU = now(); // get elapsed time in milliseconds elapsed = std::chrono::duration<double, std::milli>(Decompress_GPU - transferCPU_GPU).count(); std::cout << "De-Compression in GPU time = " << elapsed << " milliseconds."; // Free Device Memory // ---------------------------------------- check_success(hipFree(devX)); check_success(hipFree(devY)); return 0; }
13a033aac912318b3f1ec6c19168d5cd9f542f2f.cu
#include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <map> #include <numeric> #include <random> #include <sys/time.h> #include <valarray> #include <cuda_profiler_api.h> #include "range.hpp" #include "utils.hpp" #define blockSize 128 // Power of 2 ? #define BLOCK_SIZE 1024 //Thread Block size __global__ void decompress_kernel(int devX, int devY ){ int i = threadIdx.x + blockDim.x*blockIdx.x; } long getVals(char * t , int base_size) { long a ; if(base_size == 16) a = *((int16_t *)t) ; else if(base_size == 32) a = *((int32_t*)t) ; else a = *((long*)t) ; return a ; } void initArray(int size , int16_t *isCompressed) { int i ; for ( i =0 ; i < size ; i++) { isCompressed[i] = 0 ; } } int bdCompress(char* c , int len , char * compressed , int16_t * isCompressed , long * baseVals) { int bytesCopied = 0 ; int blkCounter = 0 ; int numBlocks = ((len - 1/blockSize)) + 1 ; // ceiling int i ; int size_index ; int size_array[3] ; size_array[0] = 16 ; size_array[1] = 32 ; size_array[2] = 64 ; int offset = 0 ; // long *ptrArray[numPtrs] ; if ( offset + blockSize > len) { baseVals[blkCounter] = 0 ; isCompressed[blkCounter] = offset - len ; memcpy(&compressed[bytesCopied] , &c[offset] , len - offset) ; bytesCopied += len - offset ; return bytesCopied ; } while(offset + blockSize <= len) // Don't want to compress if length remaining is less than block { // Assume each value is 4 bytes , long , can try to vary this later. // Try to compress it to unsigned int_8 (0-255) // Get the minimum value as base so unsigned int 8 can be used for deltas // If value ranges are more than max value of unsigned int 8 , try to convert to unsigned int. // If ranges are more than unsigned int , don't compress this block , move onto next block. // bool used = false ; // int minBytesUsed = -1 ; int minCompressed ; int minDivision ; long minVal ; for ( size_index = 0 ; size_index < 3 ; size_index++) { int base_size = size_array[size_index] ; int numPtrs = (blockSize*8)/(size_array[size_index]) ; long ptrArray [numPtrs] ; char local_storage[blockSize] ; for ( i = 0 ; i < numPtrs ; i++) { ptrArray[i] = getVals((char*)&c[offset + i*(base_size/8)] , base_size) ; } bool flag = false ; long minValue ; for ( i =0 ; i<numPtrs ; i++) { if(flag==false) { flag = true ; minValue = ptrArray[i]; } else { if( ptrArray[i]< minValue) minValue = ptrArray[i] ; } } long range = 0 ; flag = false ; for (i =0 ; i<numPtrs ; i++) { if(flag ==false) { range = (ptrArray[i]) - minValue ; flag = true ; } else { if((ptrArray[i]) - minValue > range) range = ptrArray[i] - minValue ; // printf("Calc-Range:%ld\n",range) ; } } printf("Final rnage is %ld , Size is %d, Min Val is %d , Num ptrs is %d\n",range , size_array[size_index] , minValue,numPtrs); if((range < pow(2 , sizeof(uint8_t) * 8))) // compress into uint8 { if(minBytesUsed == -1 ){ minBytesUsed = 8*numPtrs ; minCompressed = 8 ; minDivision = base_size ; minVal = minValue ; } else if ( 8 * numPtrs < minBytesUsed) { minBytesUsed = 8 * numPtrs ; minCompressed = 8 ; minDivision = base_size ; minVal = minValue ; printf("8: Range : %ld , minValue : %ld , min_div:%d , numPtrs:%d\n" , range , minValue,base_size,numPtrs) ; } } else if ((range < pow(2,sizeof(uint16_t)*8)) && (base_size > 16)) { if(minBytesUsed == -1 ){ minBytesUsed = 16 *numPtrs ; minCompressed = 16 ; minDivision = base_size ; minVal = minValue ; } else if ( 16 * numPtrs < minBytesUsed) { minBytesUsed = 16 * numPtrs ; minCompressed = 16 ; minDivision = base_size ; minVal = minValue ; printf("16 : Range : %ld, minValue : %ld , min_div:%d , numPtrs:%d\n" , range , minValue,base_size,numPtrs) ; } } else if ( (range < pow ( 2 , sizeof(uint32_t)*8)) && (base_size > 32)) { if(minBytesUsed == -1 ){ minBytesUsed = 32 *numPtrs ; minCompressed = 32 ; minDivision = base_size ; minVal = minValue ; } else if ( 32 * numPtrs < minBytesUsed) { minBytesUsed = 32 * numPtrs ; minCompressed = 32 ; minDivision = base_size ; minVal = minValue ; printf("32: Range : %ld, minValue : %ld, min_div:%d , numPtrs:%d\n" , range , minValue,base_size,numPtrs) ; } } printf("Bytes used so far :%d\n" , minBytesUsed) ; } if ( minBytesUsed >= 0 ) { int i ; int numPtrs = (blockSize*8/minDivision) ; long ptrArray[numPtrs] ; int div_off , compressed_off ; for (i =0 ; i < numPtrs ; i++) { ptrArray[i] = getVals(&c[offset + (i*minDivision/8)] , minDivision) ; div_off = 3 ; if ( minDivision == 16) { // ptrArray[i] = (int16_t *)(&c[offset + i*minDivision]) ; div_off = 1 ; } else if (minDivision == 32) { // ptrArray[i] = (int32_t*) (&c[offset + i*minDivision]) ; div_off = 2 ; } if ( minCompressed == 32) { uint32_t a = ptrArray[i] - minVal ; memcpy(&compressed[bytesCopied] , &a , sizeof(uint32_t)) ; compressed_off = 3 ; } else if ( minCompressed == 16) { uint16_t a = ptrArray[i] - minVal ; memcpy(&compressed[bytesCopied] , &a , sizeof(uint16_t)) ; compressed_off = 2 ; } else if ( minCompressed == 8) { uint8_t a = (ptrArray[i]) - minVal ; memcpy(&compressed[bytesCopied] , &a , sizeof(uint8_t)) ; compressed_off = 1 ; } bytesCopied += (minCompressed/8) ; } isCompressed[blkCounter] = 4*div_off + compressed_off ; baseVals[blkCounter] = minVal ; } else { baseVals[blkCounter] = minVal ; isCompressed[blkCounter] = 0 ; memcpy(&compressed[bytesCopied] , &c[offset] , blockSize) ; bytesCopied += blockSize ; } offset += blockSize ; blkCounter ++ ; if ( offset + blockSize > len) { baseVals[blkCounter] = 0 ; isCompressed[blkCounter] = offset - len ; memcpy(&compressed[bytesCopied] , &c[offset] , len - offset) ; bytesCopied += len - offset ; break ; } } return bytesCopied ; } int decompress ( char * compressed , char * decompressed , int bytesCopied , long *baseVals , int16_t *isCompressed , int numBlocks) { int i = 0 ; int offset_compressed = 0 ; int offset_decompressed = 0 ; for (i = 0 ; i <numBlocks ; i++) // decompress every block { if(isCompressed[i] ==0) { memcpy(&decompressed[offset_decompressed] , &compressed[offset_compressed] , blockSize) ; offset_compressed += blockSize ; offset_decompressed += blockSize ; continue ; } if (isCompressed[i] <0) { int bytes_to_copy = -1* isCompressed[i] ; memcpy(&decompressed[offset_decompressed] , &compressed[offset_compressed] , bytes_to_copy) ; offset_compressed += bytes_to_copy ; offset_decompressed += bytes_to_copy ; break ; } // If code reaches this point then actual compression has taken place int chunk_size ; int compressed_size ; if((isCompressed[i] / 4 ) ==1 ) chunk_size = 2 ; else if ((isCompressed[i]/4) ==2) chunk_size = 4 ; else chunk_size = 8 ; if(isCompressed[i] % 4==1) compressed_size = 1 ; else if (isCompressed[i]%4 ==2) compressed_size = 2 ; else compressed_size = 4 ; int numPtrs = blockSize/chunk_size ; int j ; for (j = 0 ; j < numPtrs ; ++j) { long compressed_val = getVals((char *)&compressed[offset_compressed], compressed_size * 8) ; compressed_val += baseVals[i] ; if (chunk_size ==2 ) { int16_t num = compressed_val ; memcpy(&decompressed[offset_decompressed] , &num , chunk_size) ; } else if (chunk_size ==4) { int32_t num = compressed_val ; memcpy(&decompressed[offset_decompressed] , &num , chunk_size) ; } else { long num = compressed_val ; memcpy(&decompressed[offset_decompressed] , &num , chunk_size) ; } offset_compressed += compressed_size ; offset_decompressed += chunk_size ; } } return offset_decompressed ; } int main(int argc, char **argv) { // get start time const auto start = now(); int longArraySize = 512 ; long testArray[longArraySize] ; int i ,j ; for ( i =0 ; i < longArraySize ; i++) testArray[i] = (100*i) ; int numBlocks = (((longArraySize * sizeof(long))-1)/blockSize) + 1 ; //ceiling long baseVals[numBlocks] ; int16_t isCompressed[numBlocks] ; char * compressed = (char*) malloc(100000) ; initArray(numBlocks , isCompressed) ; int bytesCopied = bdCompress((char*)testArray , longArraySize * sizeof(long) ,compressed , isCompressed , baseVals) ; printf("Length , Bytes copied : %d , %d\n" , longArraySize*sizeof(long) , bytesCopied) ; int bytesAfterCompress = numBlocks*(sizeof(long) + sizeof(int16_t)) + bytesCopied; int bytesBeforeCompress =longArraySize * sizeof(long); float compression_ratio = ((float) bytesAfterCompress)/ ((float) bytesBeforeCompress); for ( i = 0 ; i < numBlocks ; i++) { printf("Base value , compressed info , Ratio : %lu , %d\n , %f\n", baseVals[i] , isCompressed[i] , compression_ratio) ; } printf("\n") ; int numPtrs = blockSize/sizeof(long) ; for ( i = 0 ; i < numBlocks ; i++) { printf("Base value , compressed info , Ratio : %lu , %d\n , %f\n", baseVals[i] , isCompressed[i] , compression_ratio) ; } char * decompressed = (char*) malloc(100000) ; int bytes = decompress(compressed , decompressed , bytesCopied , baseVals , isCompressed , numBlocks) ; printf("Bytes after decompression : %d\n" , bytes) ; bool t = (bytes == longArraySize * sizeof(long)) && (strncmp((char*)testArray , decompressed , bytes) ==0) ; if(t) printf("Successful \n") ; // get end time const auto end = now(); // get elapsed time in milliseconds const auto elapsed = std::chrono::duration<double, std::milli>(end - start).count(); std::cout << "Compression & Decompression time = " << elapsed << " milliseconds."; // Transfer compacted to GPU // ---------------------------------------- int *devX; int *devY; check_success(cudaMalloc(&devX, bytesAfterCompress)); check_success(cudaMalloc(&devY, bytesBeforeCompress)); check_success(cudaMemcpy(devX, , bytesAfterCompress, cudaMemcpyHostToDevice)); const auto transferCPU_GPU = now(); // get elapsed time in milliseconds elapsed = std::chrono::duration<double, std::milli>(transferCPU_GPU - end).count(); std::cout << "Transfer CPU to GPU time = " << elapsed << " milliseconds."; // Decompress in GPU // ---------------------------------------- int x = ceil(bytesAfterCompress/1024.0); dim3 DimGrid(x, 1, 1); dim3 DimBlock(BLOCK_SIZE, 1, 1); decompress_kernel<<<DimGrid, DimBlock>>>(devX, devY ); cudaDeviceSynchronize(); const auto Decompress_GPU = now(); // get elapsed time in milliseconds elapsed = std::chrono::duration<double, std::milli>(Decompress_GPU - transferCPU_GPU).count(); std::cout << "De-Compression in GPU time = " << elapsed << " milliseconds."; // Free Device Memory // ---------------------------------------- check_success(cudaFree(devX)); check_success(cudaFree(devY)); return 0; }
b6a7e6936874bddd55b6dc42935516bc76166071.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2017 Darius Rckert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/config.h" #include "saiga/util/ArrayView.h" int main() { cout << "hello world" << endl; return 0; } #if 0 #include <thrust/device_vector.h> #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/event.h" #include "saiga/cuda/pinned_vector.h" #include "saiga/cuda/stream.h" #include "saiga/util/math.h" #include <iostream> #include <vector> using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; //#define LECTURE template <int K> class GLM_ALIGN(16) Element { public: vec4 data; HD inline void operator()() { for (int k = 0; k < K * 512; ++k) { data = data * data + data; } } }; template <typename T> __global__ static void process(ArrayView<T> data) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; T e = data[ti.thread_id]; e(); data[ti.thread_id] = e; } #ifdef LECTURE template <int K> static void uploadProcessDownloadAsync(int N) { using T = Element<K>; thrust::host_vector<T> h_data(N); thrust::device_vector<T> d_data(N); { Saiga::CUDA::CudaScopedTimerPrint timer("process"); // Compute launch arguments const unsigned int BLOCK_SIZE = 128; const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(N, BLOCK_SIZE); hipMemcpy(d_data.data().get(), h_data.data(), N * sizeof(T), hipMemcpyHostToDevice); hipLaunchKernelGGL(( process<T>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_data); hipMemcpy(h_data.data(), d_data.data().get(), N * sizeof(T), hipMemcpyDeviceToHost); } } int main(int argc, char* argv[]) { uploadProcessDownloadAsync<8>(1024 * 1024); cout << "Done." << endl; } #else template <int K> static void uploadProcessDownloadAsync(int N, int slices, int streamCount) { using T = Element<K>; Saiga::thrust::pinned_vector<T> h_data(N); // thrust::host_vector<T> h_data(N); thrust::device_vector<T> d_data(N); // size_t size = N * sizeof(T); SAIGA_ASSERT(N % slices == 0); int sliceN = N / slices; size_t slizeSize = sliceN * sizeof(T); // Create a separate stream for each slice for maximum parallelism std::vector<Saiga::CUDA::CudaStream> streams(streamCount); { // ArrayViews simplify slice creation ArrayView<T> vd(d_data); ArrayView<T> vh(h_data); Saiga::CUDA::CudaScopedTimerPrint tim("uploadProcessDownloadAsync " + std::to_string(slices)); for (int i = 0; i < slices; ++i) { // Pick current stream and slice auto& stream = streams[i % streamCount]; auto d_slice = vd.slice_n(i * sliceN, sliceN); auto h_slice = vh.slice_n(i * sliceN, sliceN); // Compute launch arguments const unsigned int BLOCK_SIZE = 128; const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(sliceN, BLOCK_SIZE); hipMemcpyAsync(d_slice.data(), h_slice.data(), slizeSize, hipMemcpyHostToDevice, stream); hipLaunchKernelGGL(( process<T>), dim3(BLOCKS), dim3(BLOCK_SIZE), 0, stream, d_slice); hipMemcpyAsync(h_slice.data(), d_slice.data(), slizeSize, hipMemcpyDeviceToHost, stream); } } } int main(int argc, char* argv[]) { uploadProcessDownloadAsync<8>(1024 * 1024, 1, 1); uploadProcessDownloadAsync<8>(1024 * 1024, 2, 2); uploadProcessDownloadAsync<8>(1024 * 1024, 4, 4); uploadProcessDownloadAsync<8>(1024 * 1024, 8, 8); uploadProcessDownloadAsync<8>(1024 * 1024, 16, 16); uploadProcessDownloadAsync<8>(1024 * 1024, 64, 8); uploadProcessDownloadAsync<8>(1024 * 1024, 64, 64); cout << "Done." << endl; } #endif #endif
b6a7e6936874bddd55b6dc42935516bc76166071.cu
/** * Copyright (c) 2017 Darius Rückert * Licensed under the MIT License. * See LICENSE file for more information. */ #include "saiga/config.h" #include "saiga/util/ArrayView.h" int main() { cout << "hello world" << endl; return 0; } #if 0 #include <thrust/device_vector.h> #include "saiga/cuda/cudaHelper.h" #include "saiga/cuda/device_helper.h" #include "saiga/cuda/event.h" #include "saiga/cuda/pinned_vector.h" #include "saiga/cuda/stream.h" #include "saiga/util/math.h" #include <iostream> #include <vector> using Saiga::ArrayView; using Saiga::CUDA::ThreadInfo; //#define LECTURE template <int K> class GLM_ALIGN(16) Element { public: vec4 data; HD inline void operator()() { for (int k = 0; k < K * 512; ++k) { data = data * data + data; } } }; template <typename T> __global__ static void process(ArrayView<T> data) { ThreadInfo<> ti; if (ti.thread_id >= data.size()) return; T e = data[ti.thread_id]; e(); data[ti.thread_id] = e; } #ifdef LECTURE template <int K> static void uploadProcessDownloadAsync(int N) { using T = Element<K>; thrust::host_vector<T> h_data(N); thrust::device_vector<T> d_data(N); { Saiga::CUDA::CudaScopedTimerPrint timer("process"); // Compute launch arguments const unsigned int BLOCK_SIZE = 128; const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(N, BLOCK_SIZE); cudaMemcpy(d_data.data().get(), h_data.data(), N * sizeof(T), cudaMemcpyHostToDevice); process<T><<<BLOCKS, BLOCK_SIZE, 0>>>(d_data); cudaMemcpy(h_data.data(), d_data.data().get(), N * sizeof(T), cudaMemcpyDeviceToHost); } } int main(int argc, char* argv[]) { uploadProcessDownloadAsync<8>(1024 * 1024); cout << "Done." << endl; } #else template <int K> static void uploadProcessDownloadAsync(int N, int slices, int streamCount) { using T = Element<K>; Saiga::thrust::pinned_vector<T> h_data(N); // thrust::host_vector<T> h_data(N); thrust::device_vector<T> d_data(N); // size_t size = N * sizeof(T); SAIGA_ASSERT(N % slices == 0); int sliceN = N / slices; size_t slizeSize = sliceN * sizeof(T); // Create a separate stream for each slice for maximum parallelism std::vector<Saiga::CUDA::CudaStream> streams(streamCount); { // ArrayViews simplify slice creation ArrayView<T> vd(d_data); ArrayView<T> vh(h_data); Saiga::CUDA::CudaScopedTimerPrint tim("uploadProcessDownloadAsync " + std::to_string(slices)); for (int i = 0; i < slices; ++i) { // Pick current stream and slice auto& stream = streams[i % streamCount]; auto d_slice = vd.slice_n(i * sliceN, sliceN); auto h_slice = vh.slice_n(i * sliceN, sliceN); // Compute launch arguments const unsigned int BLOCK_SIZE = 128; const unsigned int BLOCKS = Saiga::CUDA::getBlockCount(sliceN, BLOCK_SIZE); cudaMemcpyAsync(d_slice.data(), h_slice.data(), slizeSize, cudaMemcpyHostToDevice, stream); process<T><<<BLOCKS, BLOCK_SIZE, 0, stream>>>(d_slice); cudaMemcpyAsync(h_slice.data(), d_slice.data(), slizeSize, cudaMemcpyDeviceToHost, stream); } } } int main(int argc, char* argv[]) { uploadProcessDownloadAsync<8>(1024 * 1024, 1, 1); uploadProcessDownloadAsync<8>(1024 * 1024, 2, 2); uploadProcessDownloadAsync<8>(1024 * 1024, 4, 4); uploadProcessDownloadAsync<8>(1024 * 1024, 8, 8); uploadProcessDownloadAsync<8>(1024 * 1024, 16, 16); uploadProcessDownloadAsync<8>(1024 * 1024, 64, 8); uploadProcessDownloadAsync<8>(1024 * 1024, 64, 64); cout << "Done." << endl; } #endif #endif
test3.hip
// !!! This is a file automatically generated by hipify!!! //, (, ) #include <hip/hip_runtime.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctime> //---------------------------------------------- // (GPU) ** __global__ , void** __global__ void gpu_add(float* c, float* a, float* b, int n){ int j=blockIdx.x*blockDim.x+threadIdx.x; c[j]=a[j]+b[j]; } //---------------------------------------------- // (Host) void host_add(float* c, float* a, float* b, int n){ for(int k=0; k<n; k++){ c[k]=a[k]+b[k]; } } //---------------------------------------------- // double diff(float* a, float* b, int n){ double s=0, r=0; for(int k=0; k<n; k++){ double w=a[k]-b[k]; s+=w*w; r+=a[k]*a[k]; } return sqrt(s/r); // } //---------------------------------------------- // (:) double ms_time(){ return (double)clock()/CLOCKS_PER_SEC*1000.0; } //---------------------------------------------- // int main(){ // int n=1024*1024; int size=n*sizeof(float); // int block=512; //blockDim () int grid=n/block; //gridDim () // () int loop=100; // float *a,*b,*c,*d; a=(float*)malloc(size); b=(float*)malloc(size); c=(float*)malloc(size); d=(float*)malloc(size); // srand(time(0)); for(int k=0; k<n; k++){ a[k]=(float)rand()/RAND_MAX*2-1; b[k]=(float)rand()/RAND_MAX*2-1; } // float *ga,*gb,*gc; hipMalloc((void**)&ga, size); hipMalloc((void**)&gb, size); hipMalloc((void**)&gc, size); // a,b hipMemcpy(ga, a, size, hipMemcpyHostToDevice); hipMemcpy(gb, b, size, hipMemcpyHostToDevice); //---- part 1 : -------- // (GPU) hipLaunchKernelGGL(( gpu_add), dim3(grid), dim3(block), 0, 0, gc, ga, gb, n); // (Host) host_add(d, a, b, n); // hipMemcpy(c, gc, size, hipMemcpyDeviceToHost); // printf("vector add N(%d) elements, diff = %g\n", n, diff(c,d,n)); //---- part 2 : -------- // GPU double gpu_dt = ms_time(); for(int w=0; w<loop; w++){ hipLaunchKernelGGL(( gpu_add), dim3(grid), dim3(block), 0, 0, gc, ga, gb, n); hipDeviceSynchronize(); // } gpu_dt = (ms_time()-gpu_dt)/loop; // // Host double host_dt = ms_time(); for(int w=0; w<loop; w++){ host_add(d, a, b, n); } host_dt = (ms_time()-host_dt)/loop; // // printf("host time: %g ms\n", host_dt); printf("gpu time: %g ms\n", gpu_dt); // free(a); free(b); free(c); free(d); // hipFree(ga); hipFree(gb); hipFree(gc); return 0; }
test3.cu
//多區塊, 多執行緒 (不使用迴圈, 用網格與區塊設定代替) #include <cuda.h> #include <time.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctime> //---------------------------------------------- //向量加法的運算核心 (GPU) **函式前加 __global__ 即為核心, 核心只傳回 void** __global__ void gpu_add(float* c, float* a, float* b, int n){ int j=blockIdx.x*blockDim.x+threadIdx.x; c[j]=a[j]+b[j]; } //---------------------------------------------- //向量加法的一般函式 (Host) void host_add(float* c, float* a, float* b, int n){ for(int k=0; k<n; k++){ c[k]=a[k]+b[k]; } } //---------------------------------------------- //計算誤差用的函式 double diff(float* a, float* b, int n){ double s=0, r=0; for(int k=0; k<n; k++){ double w=a[k]-b[k]; s+=w*w; r+=a[k]*a[k]; } return sqrt(s/r); //相對誤差 } //---------------------------------------------- //時間函數 (傳回單位:千分之一秒) double ms_time(){ return (double)clock()/CLOCKS_PER_SEC*1000.0; } //---------------------------------------------- //主程式 int main(){ //設定向量大小 int n=1024*1024; int size=n*sizeof(float); //網格與區塊設定 int block=512; //blockDim (每個區塊具有的執行緒數) int grid=n/block; //gridDim (每個網格具有的區塊數) //設定呼叫次數 (測量平均效能) int loop=100; //配置主機記憶體 float *a,*b,*c,*d; a=(float*)malloc(size); b=(float*)malloc(size); c=(float*)malloc(size); d=(float*)malloc(size); //設定亂數的輸入向量 srand(time(0)); for(int k=0; k<n; k++){ a[k]=(float)rand()/RAND_MAX*2-1; b[k]=(float)rand()/RAND_MAX*2-1; } //配置顯示卡記憶體 float *ga,*gb,*gc; cudaMalloc((void**)&ga, size); cudaMalloc((void**)&gb, size); cudaMalloc((void**)&gc, size); //載入向量 a,b 到顯示卡記憶體中 cudaMemcpy(ga, a, size, cudaMemcpyHostToDevice); cudaMemcpy(gb, b, size, cudaMemcpyHostToDevice); //---- part 1 : 測量精確度 -------- //呼叫核心來運算 (GPU) gpu_add<<<grid, block>>>(gc, ga, gb, n); //呼叫一般函數來運算 (Host) host_add(d, a, b, n); //把計算結果存回主機 cudaMemcpy(c, gc, size, cudaMemcpyDeviceToHost); //比較兩者差異 printf("vector add N(%d) elements, diff = %g\n", n, diff(c,d,n)); //---- part 2 : 測量效能 -------- //測量 GPU 核心效能 double gpu_dt = ms_time(); for(int w=0; w<loop; w++){ gpu_add<<<grid, block>>>(gc, ga, gb, n); cudaThreadSynchronize(); //避免核心執行不完全 } gpu_dt = (ms_time()-gpu_dt)/loop; //平均時間 //測量 Host 函數效能 double host_dt = ms_time(); for(int w=0; w<loop; w++){ host_add(d, a, b, n); } host_dt = (ms_time()-host_dt)/loop; //平均時間 //輸出平均執行時間 printf("host time: %g ms\n", host_dt); printf("gpu time: %g ms\n", gpu_dt); //釋放主機記憶體 free(a); free(b); free(c); free(d); //釋放顯示卡記憶體 cudaFree(ga); cudaFree(gb); cudaFree(gc); return 0; }
7dacfe8f1c0c9c763e78a9adc7aeb83056d67e7e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "ProjHelperFun.cu.h" #include "Constants.h" #include "TridagPar.h" #include "../include/CudaUtilProj.cu.h" //#include "ProjHost.cu" #define EPSILON 0.0001 #define T 32 //#define GPU_INIT_TEST // tested OK //#define GPU_SETPAYOFF_TEST // tested OK //#define GPU_UPDATE_PARAMS_TEST // tested OK #define GPU_ROLLBACK_PART_1_TEST // tested ok #define GPU_ROLLBACK_PART_2_TEST //{{{KERNELS ------ __global__ void d_initTimeline( REAL* d_timeline, const unsigned numT, const REAL t){ unsigned gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < numT) { d_timeline[gid] = t*gid / (numT-1); } } __global__ void d_initNUM( REAL* d_num, unsigned int num_size, const REAL d, unsigned myIndex, const REAL s){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < num_size) { d_num[gid] = gid*d - myIndex*d + s; } } __global__ void d_initOperator( REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < x_size) { REAL dxl, dxu; if(gid == 0){ // lower boundary dxl = 0.0; dxu = d_x[1] - d_x[0]; d_dxx[0] = 0.0; d_dxx[1] = 0.0; d_dxx[2] = 0.0; d_dxx[3] = 0.0; }else if(gid == x_size-1){ // upper boundary dxl = d_x[x_size-1] - d_x[x_size-2]; dxu = 0.0; d_dxx[(x_size-1)*4+0] = 0.0; d_dxx[(x_size-1)*4+1] = 0.0; d_dxx[(x_size-1)*4+2] = 0.0; d_dxx[(x_size-1)*4+3] = 0.0; }else{ dxl = d_x[gid] - d_x[gid-1]; dxu = d_x[gid+1] - d_x[gid]; d_dxx[gid*4+0] = 2.0/dxl/(dxl+dxu); d_dxx[gid*4+1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu); d_dxx[gid*4+2] = 2.0/dxu/(dxl+dxu); d_dxx[gid*4+3] = 0.0; } } } __global__ void d_setPayoff(REAL* d_result, REAL* d_x, unsigned int x_size, unsigned int y_size, unsigned int z_size){ unsigned int x = blockDim.x*blockIdx.x + threadIdx.x; unsigned int y = blockDim.y*blockIdx.y + threadIdx.y; unsigned int z = blockDim.z*blockIdx.z + threadIdx.z; if(x < x_size && y < y_size && z < z_size){ d_result[z*y_size*x_size + y*x_size + x] = max(d_x[y]-(0.001*z), (REAL)0.0); } } __global__ void d_updateParams(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(i >= numX || j >= numY) return; d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); } #define YX(k,j,i) ((k)*(numY)*(numX)+(j)*(numX)+(i)) #define XY(k,j,i) ((k)*(numY)*(numX)+(j)*(numY)+(i)) #define ZZ(k,j,i) (k*(numZ)*(numZ)+(j)*(numZ)+(i)) #define D4ID(j,i) ((j)*4+(i)) __global__ void d_explicit_xy_implicit_x(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int j = blockDim.y * blockIdx.y + threadIdx.y; //numY unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; //numX if(k >= outer || j >= numY || i >= numX) return; // explicit x u[YX(k,j,i)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,i,j)]; if(i > 0) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)] ) * result[XY(k,i-1,j)]; } u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)] ) * result[XY(k,i,j)]; if(i < numX-1) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)] ) * result[XY(k,i+1,j)]; } // explicit y ; RAW v, write u v[XY(k,i,j)] = 0.0; if(j > 0) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)] ) * result[XY(k,i,j-1)]; } v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)] ) * result[XY(k,i,j)]; if(j < numY-1) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)] ) * result[XY(k,i,j+1)]; } u[YX(k,j,i)] += v[XY(k,i,j)]; // implicit x // write a,b,c a[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)]); b[ZZ(k,j,i)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)]); c[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)]); } /* __global__ void d_implicit_x_tridag(REAL* u, R)(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(i >= outer || j >= numY) return; d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); } */ __global__ void d_implicit_y(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)]); y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u[YX(k,j,i)] - 0.5*v[XY(k,i,j)]; } //{{{ wrapper void initGrid_GPU( const REAL s0, const REAL alpha, const REAL nu,const REAL t, const unsigned numX, const unsigned numY, const unsigned numT, REAL* d_myX, REAL* d_myY, REAL* d_myTimeline, unsigned myXindex, unsigned myYindex) { const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(numT / (float)BLOCK_SIZE); hipLaunchKernelGGL(( d_initTimeline), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myTimeline, numT, t); NUM_BLOCKS = ceil(numX / (float)BLOCK_SIZE); const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; hipLaunchKernelGGL(( d_initNUM), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myX, numX, dx, myXindex, s0); const REAL stdY = 10.0*nu*sqrt(t); const REAL dy = stdY/numY; const REAL logAlpha = log(alpha); NUM_BLOCKS = ceil(numY / (float)BLOCK_SIZE); hipLaunchKernelGGL(( d_initNUM), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_myY, numY, dy, myYindex, logAlpha); } void initOperator_GPU(REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(x_size / (float)BLOCK_SIZE); hipLaunchKernelGGL(( d_initOperator), dim3(NUM_BLOCKS),dim3(BLOCK_SIZE), 0, 0, d_x, x_size, d_dxx); } // read a b c r, write u inline void tridag( const vector<REAL>& a, // size [n] const vector<REAL>& b, // size [n] const vector<REAL>& c, // size [n] const vector<REAL>& r, // size [n] const int n, vector<REAL>& u, // size [n] vector<REAL>& uu // size [n] temporary ) { int i; // int offset; REAL beta; u[0] = r[0]; uu[0] = b[0]; for(i=1; i<n; i++) { beta = a[i] / uu[i-1]; uu[i] = b[i] - beta*c[i-1]; u[i] = r[i] - beta*u[i-1]; } #if 1 // X) this is a backward recurrence u[n-1] = u[n-1] / uu[n-1]; for(i=n-2; i>=0; i--) { u[i] = (u[i] - c[i]*u[i+1]) / uu[i]; } #else // Hint: X) can be written smth like (once you make a non-constant) for(i=0; i<n; i++) a[i] = u[n-1-i]; a[0] = a[0] / uu[n-1]; for(i=1; i<n; i++) a[i] = (a[i] - c[n-1-i]*a[i-1]) / uu[n-1-i]; for(i=0; i<n; i++) u[i] = a[n-1-i]; #endif } void run_OrigCPU( const unsigned int& outer, const unsigned int& numX, const unsigned int& numY, const unsigned int& numT, const REAL& s0, const REAL& t, const REAL& alpha, const REAL& nu, const REAL& beta, REAL* res // [outer] RESULT ) { PrivGlobs globs(numX, numY, numT); initGrid (s0,alpha,nu,t, numX, numY, numT, globs); initOperator(globs.myX,globs.myDxx); initOperator(globs.myY,globs.myDyy); // array expansion on myResult (originally globs.myResult) from [numX][numY] to [outer][numX][numY] vector<vector<vector<REAL> > > myResult; myResult.resize(outer); #pragma omp parallel for default(shared) schedule(static) for(int i=0; i<outer; i++) { myResult[i].resize(numX); for(int j=0; j<numX; j++){ myResult[i][j].resize(numY); } } //myVarX myVarY: [numX][numY] vector<vector<REAL> > myVarX, myVarY; myVarX.resize(numX); myVarY.resize(numX); for(int i=0; i<numX; i++){ myVarX[i].resize(numY); myVarY[i].resize(numY); } unsigned numZ = max(numX, numY); // array expansion on a, b, c, y, yy, [outer][numZ][numZ] vector<vector<vector<REAL> > > a,b,c,y,yy; a.resize(outer); b.resize(outer); c.resize(outer); y.resize(outer); yy.resize(outer); #pragma omp parallel for default(shared) schedule(static) for(int i=0; i<outer; i++) { a[i].resize(numZ); b[i].resize(numZ); c[i].resize(numZ); y[i].resize(numZ); yy[i].resize(numZ); for(int j=0; j<numZ; j++){ a[i][j].resize(numZ); b[i][j].resize(numZ); c[i][j].resize(numZ); y[i][j].resize(numZ); yy[i][j].resize(numZ); } } // array expansion on u,v, u is [outer][numY][numX], v is [outer][numX][] vector<vector<vector<REAL> > > u,v; u.resize(outer); v.resize(outer); for(int k=0; k<outer; k++){ u[k].resize(numY); for(int i=0; i< numY; i++) u[k][i].resize(numX); v[k].resize(numX); for(int i=0; i< numX; i++) v[k][i].resize(numY); } // setPayoff(strike, globs); it's parallel so can be loop-distributed on the outmost loop // also need to do array expansion on globs.myResult, i.e. myResult #pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D for( unsigned k = 0; k < outer; ++ k ) { // outmost loop // modified setPayoff function below for(unsigned i=0;i<globs.myX.size();++i) { //REAL payoff = max(globs.myX[i]-strike, (REAL)0.0); // move this inside the loop to do privatization for(unsigned j=0;j<globs.myY.size();++j) // globs.myResult[i][j] = payoff; // note that payoff is just a scalar variables, myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0); } } //--- original code: // for(int i = globs.myTimeline.size()-2;i>=0;--i) // { // updateParams(i,alpha,beta,nu,globs); // rollback(i, globs); // } //--- use loop interchange and loop distribution //modified updateParams(g,alpha,beta,nu,globs); // Kernel-2: 3D for(int g = globs.myTimeline.size()-2;g>=0;--g) { // second outer loop, g #pragma omp parallel for default(shared) schedule(static) // Kernel-2: 2D for(unsigned i=0;i<globs.myX.size();++i){ for(unsigned j=0;j<globs.myY.size();++j) { myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i]) + globs.myY[j] - 0.5*nu*nu*globs.myTimeline[g] ) ); myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i]) + globs.myY[j] - 0.5*nu*nu*globs.myTimeline[g] ) ); // nu*nu } } // rollback Part 1, write u,v, a, b, c #pragma omp parallel for default(shared) schedule(static) // Kernel-3: 3D for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop for(unsigned i=0;i<numX;i++) { // explicit x u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j]; if(i > 0) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] ) * myResult[k][i-1][j]; } u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] ) * myResult[k][i][j]; if(i < numX-1) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] ) * myResult[k][i+1][j]; } // explicit y ; RAW v, write u v[k][i][j] = 0.0; if(j > 0) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] ) * myResult[k][i][j-1]; } v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] ) * myResult[k][i][j]; if(j < numY-1) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] ) * myResult[k][i][j+1]; } u[k][j][i] += v[k][i][j]; // implicit x // write a,b,c a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]); b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]); c[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][2]); } } } //Part 2 : read a,b,c,u to write u #pragma omp parallel for default(shared) schedule(static) //kernel-4: 2D Kernel or can be merged with the last one to make a 2D kernel for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par for(unsigned j=0;j<numY;j++) { // Par tridagPar(a[k][j],b[k][j],c[k][j],u[k][j],numX,u[k][j],yy[k][j]); } } //Part 3, write a b c y reading from u,v // implicit y, #pragma omp parallel for default(shared) schedule(static) // Kernel-5: 3D for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par for(unsigned i=0;i<numX;i++) { for(unsigned j=0;j<numY;j++) { a[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][0]); b[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][1]); c[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][2]); y[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *u[k][j][i] - 0.5*v[k][i][j]; } } } //Part 4: write myResult reading from a b c y #pragma omp parallel for default(shared) schedule(static) //kernel-6 for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par for(unsigned i=0;i<numX;i++) { tridagPar(a[k][i],b[k][i],c[k][i],y[k][i],numY,myResult[k][i],yy[k][i]); } } } #pragma omp parallel for default(shared) schedule(static) for( unsigned k = 0; k < outer; ++ k ) //outermost loop k res[k] = myResult[k][globs.myXindex][globs.myYindex]; // myRes[0][k]; //// ---------- GPU version -------------------- //// // globs vars for gpu REAL *h_result; // the final result // GPU variables REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy; REAL *d_result, *d_varX, *d_varY; REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v; // myXindex myYindex are scalars const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; unsigned myXindex = static_cast<unsigned>(s0/dx) % numX; // const REAL stdY = 10.0*nu*sqrt(t); // const REAL dy = stdY/numY; // const REAL logAlpha = log(alpha); unsigned myYindex = static_cast<unsigned>(numY/2.0); int memsize_X = numX * sizeof(REAL); int memsize_Y = numY * sizeof(REAL); int memsize_T = numT * sizeof(REAL); int memsize_XY = numX * numY * sizeof(REAL); int memsize_OXY = outer * numX * numY * sizeof (REAL); int memsize_OZZ = outer * numZ * numZ * sizeof (REAL); // CPU variables h_result = (REAL*) malloc (memsize_OXY); // GPU variables hipMalloc((void**)&d_result, memsize_OXY); //[outer][numX][numY] hipMalloc((void**)&d_varX, memsize_XY); //[numX][numY] hipMalloc((void**)&d_varY, memsize_XY); //[numX][numY] hipMalloc((void**)&d_x, memsize_X); //[numX] hipMalloc((void**)&d_y, memsize_Y); //[numY] hipMalloc((void**)&d_timeline, memsize_T); //[numT] hipMalloc((void**)&d_dxx, 4 * memsize_X); //[numX][4] hipMalloc((void**)&d_dyy, 4 * memsize_Y); //[numY][4] //a b c yy yyy: [outer][numZ][numZ] hipMalloc((void**)&d_a , memsize_OZZ); hipMalloc((void**)&d_b , memsize_OZZ); hipMalloc((void**)&d_c , memsize_OZZ); hipMalloc((void**)&d_yy , memsize_OZZ); //y in seq code hipMalloc((void**)&d_yyy, memsize_OZZ); //yy in seq code hipMalloc((void**)&d_u , memsize_OXY); //d_u : [outer][numY][numX] hipMalloc((void**)&d_v , memsize_OXY); //d_v : [outer][numX][numY] //GPU init initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, myXindex, myYindex); initOperator_GPU( d_x, numX, d_dxx); initOperator_GPU( d_y, numY, d_dyy); // test GPU init #ifdef GPU_INIT_TEST REAL *h_x, *h_y, *h_timeline, *h_dxx, *h_dyy; h_x = (REAL *) malloc (memsize_X ); h_y = (REAL *) malloc (memsize_Y ); h_timeline = (REAL *) malloc (memsize_T ); h_dxx = (REAL *) malloc (4* memsize_X ); h_dyy = (REAL *) malloc (4* memsize_Y ); hipMemcpy( h_x , d_x , numX*sizeof(REAL) , hipMemcpyDeviceToHost); hipMemcpy( h_y , d_y , numY*sizeof(REAL) , hipMemcpyDeviceToHost); hipMemcpy( h_timeline , d_timeline, memsize_T , hipMemcpyDeviceToHost); hipMemcpy( h_dxx , d_dxx , numX*4*sizeof(REAL) , hipMemcpyDeviceToHost); hipMemcpy( h_dyy , d_dyy , numY*4*sizeof(REAL) , hipMemcpyDeviceToHost); bool valid = true; for(int i = 0; i < numX; i++){ if(abs(h_x[i]-globs.myX[i]) > EPSILON){ valid = false; printf("\n** invalid h_x %f %f**\n", h_x[i], globs.myX[i]); break; } } for(int i = 0; i < numY; i++){ if(abs(h_y[i]-globs.myY[i]) > EPSILON){ valid = false; printf("\n** invalid h_y **\n"); break; } } for(int i = 0; i < numT; i++){ if(abs(h_timeline[i]-globs.myTimeline[i]) > EPSILON){ valid = false; printf("\n** invalid h_timeline %d %d**\n", h_timeline[i], globs.myTimeline[i]); break; } } for(int i = 0; i < numX*4; i++){ if(abs(h_dxx[i]-globs.myDxx[i/4][i%4]) > EPSILON){ valid = false; printf("\n** Invalid h_dxx **\n"); break; } } for(int i = 0; i < numY*4; i++){ if(abs(h_dyy[i]-globs.myDyy[i/4][i%4]) > EPSILON){ valid = false; printf("\n** Invalid h_dyy **\n"); break; } } if(!valid){ printf("\n**Initialization did not validate**\n"); } free(h_x);free(h_y);free(h_timeline);free(h_dxx);free(h_dyy); #endif // GPU setPayoff dim3 block_3D(8, 8, 8); dim3 grid_3D_OXY(ceil(numY/8.0), ceil(numX/8.0), ceil(outer/8.0)); hipLaunchKernelGGL(( d_setPayoff), dim3(grid_3D_OXY), dim3(block_3D), 0, 0, d_result, d_x, numY, numX, outer); #ifdef GPU_SETPAYOFF_TEST hipMemcpy( h_result , d_result , memsize_OXY, hipMemcpyDeviceToHost); for(int k = 0; k < outer; k++) for(int i = 0; i < globs.myX.size(); i++) for(int j = 0; j < globs.myY.size(); j++){ REAL myResultTemp = max(globs.myX[i]-(0.001*k), (REAL)0.0); if(abs(h_result[k*numX*numY+i*numY+j]-myResultTemp) > EPSILON){ printf("\n**SetPayOff did not validate %f %f**\n", h_result[k*numX*numY+i*numY+j], myResultTemp); break; } } #endif //GPU updateParams int dimy = ceil( numY / T ); int dimx = ceil( numX / T ); dim3 block(T,T), grid(dimx,dimy); // Test only when g = 0 the last Timeline iteration hipLaunchKernelGGL(( d_updateParams), dim3(grid), dim3(block) , 0, 0, d_varX, d_varY, d_x, d_y, d_timeline, 0, alpha, beta, nu, numX, numY); #ifdef GPU_UPDATE_PARAMS_TEST // Test only when g = 0 the last Timeline iteration, i.e., //hipLaunchKernelGGL(( d_updateParams), dim3(grid), dim3(block) , 0, 0, d_varX, d_varY, d_x, d_y, d_timeline, 0, alpha, beta, nu, numX, numY); REAL *h_varX, *h_varY; h_varX = (REAL*) malloc (memsize_XY ); h_varY = (REAL*) malloc (memsize_XY ); hipMemcpy( h_varX , d_varX , memsize_XY , hipMemcpyDeviceToHost); hipMemcpy( h_varY , d_varY , memsize_XY , hipMemcpyDeviceToHost); for(int i = 0; i < numX*numY; i++){ if(abs(h_varX[i] - myVarX[i/numY][i%numY]) > EPSILON || abs(h_varY[i] - myVarY[i/numY][i%numY]) > EPSILON){ printf("\n**Update Params did not validate %f=%f and %f=%f**\n", h_varX[i], myVarX[i/numY][i%numY], h_varY[i], myVarY[i/numY][i%numY]); break; } } free(h_varX); free(h_varY); #endif // GPU rollback Part_1 // const dim3 block_3D(8, 8, 8); const dim3 grid_3D_OYX(ceil(numX/8.0), ceil(numY/8.0),ceil(outer/8.0) ); hipLaunchKernelGGL(( d_explicit_xy_implicit_x), dim3(grid_3D_OYX), dim3(block_3D), 0, 0, d_u,d_v,d_a,d_b,d_c, d_varX,d_varY,d_timeline,d_dxx,d_dyy,d_result, 0, // firstly only test the case that g = 0 numX, numY, outer, numZ); #ifdef GPU_ROLLBACK_PART_1_TEST REAL *h_a, *h_b, *h_c, // *h_yy, *h_yyy, *h_u, *h_v; h_a = (REAL *) malloc (memsize_OZZ ); h_b = (REAL *) malloc (memsize_OZZ ); h_c = (REAL *) malloc (memsize_OZZ ); // h_yy = (REAL *) malloc (memsize_OZZ ); // h_yyy = (REAL *) malloc (memsize_OZZ ); h_u = (REAL *) malloc (memsize_OXY ); h_v = (REAL *) malloc (memsize_OXY ); hipMemcpy( h_a , d_a , memsize_OZZ , hipMemcpyDeviceToHost); hipMemcpy( h_b , d_b , memsize_OZZ , hipMemcpyDeviceToHost); hipMemcpy( h_c , d_c , memsize_OZZ , hipMemcpyDeviceToHost); // hipMemcpy( h_yy , d_yy , memsize_OZZ , hipMemcpyDeviceToHost); // hipMemcpy( h_yyy , d_yyy , memsize_OZZ , hipMemcpyDeviceToHost); hipMemcpy( h_u , d_u , memsize_OXY , hipMemcpyDeviceToHost); hipMemcpy( h_v , d_v , memsize_OXY , hipMemcpyDeviceToHost); #pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D for( unsigned k = 0; k < outer; ++ k ) { // outmost loop // modified setPayoff function below for(unsigned i=0;i<globs.myX.size();++i) { //REAL payoff = max(globs.myX[i]-strike, (REAL)0.0); // move this inside the loop to do privatization for(unsigned j=0;j<globs.myY.size();++j) // globs.myResult[i][j] = payoff; // note that payoff is just a scalar variables, myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0); } } unsigned g = 0; for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop for(unsigned i=0;i<numX;i++) { // explicit x u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j]; if(i > 0) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] ) * myResult[k][i-1][j]; } u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] ) * myResult[k][i][j]; if(i < numX-1) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] ) * myResult[k][i+1][j]; } // explicit y ; RAW v, write u v[k][i][j] = 0.0; if(j > 0) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] ) * myResult[k][i][j-1]; } v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] ) * myResult[k][i][j]; if(j < numY-1) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] ) * myResult[k][i][j+1]; } u[k][j][i] += v[k][i][j]; // implicit x // write a,b,c a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]); b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]); c[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][2]); } } } bool valid = true; for(int iter = 0; iter < outer*numZ*numZ; iter ++){ unsigned k = iter /numZ/numZ; unsigned j = iter/ numZ; unsigned i = iter % numZ; if(i< numX && j < numY){ // REAL temp_a = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]); if(abs(h_a[iter] - a[k][j][i]) > EPSILON || abs(h_b[iter] - b[k][j][i]) > EPSILON || abs(h_c[iter] - c[k][j][i]) > EPSILON){ valid = false; printf("\n** [h_a] k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_a[iter], a[k][j][i]); printf("\n** [h_b] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_b[iter], b[k][j][i]); printf("\n** [h_c] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_c[iter], c[k][j][i]); } if (abs(h_u[iter] - u[k][j][i]) > EPSILON || abs(h_v[iter] - v[k][i][j]) > EPSILON ) { valid = false; printf("\n** [h_u] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_u[iter], u[k][j][i]); printf("\n** [h_v] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_v[iter], v[k][i][j]); break; } } } if(!valid){ printf("\n** GPU_ROLLBACK_PART1_TEST did not validate**\n"); } free(h_a); free(h_b);free(h_c); // free(h_yy);free(h_yyy); free(h_u); free(h_v); #endif hipFree(d_x); hipFree(d_y); hipFree(d_dxx);hipFree(d_dyy); hipFree(d_timeline); hipFree(d_result); hipFree(d_varX); hipFree(d_varY); hipFree(d_a); hipFree(d_b);hipFree(d_c); hipFree(d_yy);hipFree(d_yyy); hipFree(d_u); hipFree(d_v); free(h_result); //SHould perhaps be initialized on the gpu instead to save PCI bandwidth. Possibly negl /* * setPayOff: * INPUT: globs.myX * Output: myResult * * updateParams: * input: globs.myTimeline, globs.myX, globs.myY, alpha, beta, * output: myVarX, myVarY * * rollback-1: * input: globs.myTimeLine, myResult, * output: * * tridagPar: * * rollback-2: * input: * output: * */ // #endif }
7dacfe8f1c0c9c763e78a9adc7aeb83056d67e7e.cu
#include "ProjHelperFun.cu.h" #include "Constants.h" #include "TridagPar.h" #include "../include/CudaUtilProj.cu.h" //#include "ProjHost.cu" #define EPSILON 0.0001 #define T 32 //#define GPU_INIT_TEST // tested OK //#define GPU_SETPAYOFF_TEST // tested OK //#define GPU_UPDATE_PARAMS_TEST // tested OK #define GPU_ROLLBACK_PART_1_TEST // tested ok #define GPU_ROLLBACK_PART_2_TEST //{{{KERNELS ------ __global__ void d_initTimeline( REAL* d_timeline, const unsigned numT, const REAL t){ unsigned gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < numT) { d_timeline[gid] = t*gid / (numT-1); } } __global__ void d_initNUM( REAL* d_num, unsigned int num_size, const REAL d, unsigned myIndex, const REAL s){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < num_size) { d_num[gid] = gid*d - myIndex*d + s; } } __global__ void d_initOperator( REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned long gid = blockIdx.x*blockDim.x + threadIdx.x; if(gid < x_size) { REAL dxl, dxu; if(gid == 0){ // lower boundary dxl = 0.0; dxu = d_x[1] - d_x[0]; d_dxx[0] = 0.0; d_dxx[1] = 0.0; d_dxx[2] = 0.0; d_dxx[3] = 0.0; }else if(gid == x_size-1){ // upper boundary dxl = d_x[x_size-1] - d_x[x_size-2]; dxu = 0.0; d_dxx[(x_size-1)*4+0] = 0.0; d_dxx[(x_size-1)*4+1] = 0.0; d_dxx[(x_size-1)*4+2] = 0.0; d_dxx[(x_size-1)*4+3] = 0.0; }else{ dxl = d_x[gid] - d_x[gid-1]; dxu = d_x[gid+1] - d_x[gid]; d_dxx[gid*4+0] = 2.0/dxl/(dxl+dxu); d_dxx[gid*4+1] = -2.0*(1.0/dxl + 1.0/dxu)/(dxl+dxu); d_dxx[gid*4+2] = 2.0/dxu/(dxl+dxu); d_dxx[gid*4+3] = 0.0; } } } __global__ void d_setPayoff(REAL* d_result, REAL* d_x, unsigned int x_size, unsigned int y_size, unsigned int z_size){ unsigned int x = blockDim.x*blockIdx.x + threadIdx.x; unsigned int y = blockDim.y*blockIdx.y + threadIdx.y; unsigned int z = blockDim.z*blockIdx.z + threadIdx.z; if(x < x_size && y < y_size && z < z_size){ d_result[z*y_size*x_size + y*x_size + x] = max(d_x[y]-(0.001*z), (REAL)0.0); } } __global__ void d_updateParams(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(i >= numX || j >= numY) return; d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); } #define YX(k,j,i) ((k)*(numY)*(numX)+(j)*(numX)+(i)) #define XY(k,j,i) ((k)*(numY)*(numX)+(j)*(numY)+(i)) #define ZZ(k,j,i) (k*(numZ)*(numZ)+(j)*(numZ)+(i)) #define D4ID(j,i) ((j)*4+(i)) __global__ void d_explicit_xy_implicit_x(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* varX, REAL* varY, REAL* timeline, REAL* dxx, REAL* dyy, REAL* result, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int j = blockDim.y * blockIdx.y + threadIdx.y; //numY unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; //numX if(k >= outer || j >= numY || i >= numX) return; // explicit x u[YX(k,j,i)] = (1.0/(timeline[g+1]-timeline[g])) *result[XY(k,i,j)]; if(i > 0) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)] ) * result[XY(k,i-1,j)]; } u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)] ) * result[XY(k,i,j)]; if(i < numX-1) { u[YX(k,j,i)] += 0.5*( 0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)] ) * result[XY(k,i+1,j)]; } // explicit y ; RAW v, write u v[XY(k,i,j)] = 0.0; if(j > 0) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)] ) * result[XY(k,i,j-1)]; } v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)] ) * result[XY(k,i,j)]; if(j < numY-1) { v[XY(k,i,j)] += ( 0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)] ) * result[XY(k,i,j+1)]; } u[YX(k,j,i)] += v[XY(k,i,j)]; // implicit x // write a,b,c a[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,0)]); b[ZZ(k,j,i)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,1)]); c[ZZ(k,j,i)] = - 0.5*(0.5*varX[XY(0,i,j)]*dxx[D4ID(i,2)]); } /* __global__ void d_implicit_x_tridag(REAL* u, R)(REAL* d_varX, REAL* d_varY, REAL* d_x, REAL* d_y, REAL* d_timeline, unsigned int g, REAL alpha, REAL beta, REAL nu, unsigned int numX, unsigned int numY){ unsigned int i = blockDim.x*blockIdx.x + threadIdx.x; unsigned int j = blockDim.y*blockIdx.y + threadIdx.y; if(i >= outer || j >= numY) return; d_varX[i*numY+j] = exp(2.0*( beta*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); d_varY[i*numY+j] = exp(2.0*( alpha*log(d_x[i]) + d_y[j] - 0.5*nu*nu*d_timeline[g])); } */ __global__ void d_implicit_y(REAL* u, REAL* v, REAL* a, REAL* b, REAL* c, REAL* y, REAL* varY, REAL* timeline, REAL* dyy, unsigned int g, unsigned numX, unsigned numY, unsigned outer, unsigned numZ){ unsigned int k = blockDim.z * blockIdx.z + threadIdx.z; //Outer unsigned int i = blockDim.y * blockIdx.y + threadIdx.y; //numX unsigned int j = blockDim.x * blockIdx.x + threadIdx.x; //numY if(k >= outer || j >= numY || i >= numX) return; a[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,0)]); b[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,1)]); c[ZZ(k,i,j)] = - 0.5*(0.5*varY[XY(0,i,j)]*dyy[D4ID(j,2)]); y[ZZ(k,i,j)] = ( 1.0/(timeline[g+1]-timeline[g])) * u[YX(k,j,i)] - 0.5*v[XY(k,i,j)]; } //{{{ wrapper void initGrid_GPU( const REAL s0, const REAL alpha, const REAL nu,const REAL t, const unsigned numX, const unsigned numY, const unsigned numT, REAL* d_myX, REAL* d_myY, REAL* d_myTimeline, unsigned myXindex, unsigned myYindex) { const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(numT / (float)BLOCK_SIZE); d_initTimeline<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myTimeline, numT, t); NUM_BLOCKS = ceil(numX / (float)BLOCK_SIZE); const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; d_initNUM<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myX, numX, dx, myXindex, s0); const REAL stdY = 10.0*nu*sqrt(t); const REAL dy = stdY/numY; const REAL logAlpha = log(alpha); NUM_BLOCKS = ceil(numY / (float)BLOCK_SIZE); d_initNUM<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_myY, numY, dy, myYindex, logAlpha); } void initOperator_GPU(REAL* d_x, unsigned int x_size, REAL* d_dxx){ const unsigned int BLOCK_SIZE = 256; unsigned int NUM_BLOCKS = ceil(x_size / (float)BLOCK_SIZE); d_initOperator<<<NUM_BLOCKS,BLOCK_SIZE>>>(d_x, x_size, d_dxx); } // read a b c r, write u inline void tridag( const vector<REAL>& a, // size [n] const vector<REAL>& b, // size [n] const vector<REAL>& c, // size [n] const vector<REAL>& r, // size [n] const int n, vector<REAL>& u, // size [n] vector<REAL>& uu // size [n] temporary ) { int i; // int offset; REAL beta; u[0] = r[0]; uu[0] = b[0]; for(i=1; i<n; i++) { beta = a[i] / uu[i-1]; uu[i] = b[i] - beta*c[i-1]; u[i] = r[i] - beta*u[i-1]; } #if 1 // X) this is a backward recurrence u[n-1] = u[n-1] / uu[n-1]; for(i=n-2; i>=0; i--) { u[i] = (u[i] - c[i]*u[i+1]) / uu[i]; } #else // Hint: X) can be written smth like (once you make a non-constant) for(i=0; i<n; i++) a[i] = u[n-1-i]; a[0] = a[0] / uu[n-1]; for(i=1; i<n; i++) a[i] = (a[i] - c[n-1-i]*a[i-1]) / uu[n-1-i]; for(i=0; i<n; i++) u[i] = a[n-1-i]; #endif } void run_OrigCPU( const unsigned int& outer, const unsigned int& numX, const unsigned int& numY, const unsigned int& numT, const REAL& s0, const REAL& t, const REAL& alpha, const REAL& nu, const REAL& beta, REAL* res // [outer] RESULT ) { PrivGlobs globs(numX, numY, numT); initGrid (s0,alpha,nu,t, numX, numY, numT, globs); initOperator(globs.myX,globs.myDxx); initOperator(globs.myY,globs.myDyy); // array expansion on myResult (originally globs.myResult) from [numX][numY] to [outer][numX][numY] vector<vector<vector<REAL> > > myResult; myResult.resize(outer); #pragma omp parallel for default(shared) schedule(static) for(int i=0; i<outer; i++) { myResult[i].resize(numX); for(int j=0; j<numX; j++){ myResult[i][j].resize(numY); } } //myVarX myVarY: [numX][numY] vector<vector<REAL> > myVarX, myVarY; myVarX.resize(numX); myVarY.resize(numX); for(int i=0; i<numX; i++){ myVarX[i].resize(numY); myVarY[i].resize(numY); } unsigned numZ = max(numX, numY); // array expansion on a, b, c, y, yy, [outer][numZ][numZ] vector<vector<vector<REAL> > > a,b,c,y,yy; a.resize(outer); b.resize(outer); c.resize(outer); y.resize(outer); yy.resize(outer); #pragma omp parallel for default(shared) schedule(static) for(int i=0; i<outer; i++) { a[i].resize(numZ); b[i].resize(numZ); c[i].resize(numZ); y[i].resize(numZ); yy[i].resize(numZ); for(int j=0; j<numZ; j++){ a[i][j].resize(numZ); b[i][j].resize(numZ); c[i][j].resize(numZ); y[i][j].resize(numZ); yy[i][j].resize(numZ); } } // array expansion on u,v, u is [outer][numY][numX], v is [outer][numX][] vector<vector<vector<REAL> > > u,v; u.resize(outer); v.resize(outer); for(int k=0; k<outer; k++){ u[k].resize(numY); for(int i=0; i< numY; i++) u[k][i].resize(numX); v[k].resize(numX); for(int i=0; i< numX; i++) v[k][i].resize(numY); } // setPayoff(strike, globs); it's parallel so can be loop-distributed on the outmost loop // also need to do array expansion on globs.myResult, i.e. myResult #pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D for( unsigned k = 0; k < outer; ++ k ) { // outmost loop // modified setPayoff function below for(unsigned i=0;i<globs.myX.size();++i) { //REAL payoff = max(globs.myX[i]-strike, (REAL)0.0); // move this inside the loop to do privatization for(unsigned j=0;j<globs.myY.size();++j) // globs.myResult[i][j] = payoff; // note that payoff is just a scalar variables, myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0); } } //--- original code: // for(int i = globs.myTimeline.size()-2;i>=0;--i) // { // updateParams(i,alpha,beta,nu,globs); // rollback(i, globs); // } //--- use loop interchange and loop distribution //modified updateParams(g,alpha,beta,nu,globs); // Kernel-2: 3D for(int g = globs.myTimeline.size()-2;g>=0;--g) { // second outer loop, g #pragma omp parallel for default(shared) schedule(static) // Kernel-2: 2D for(unsigned i=0;i<globs.myX.size();++i){ for(unsigned j=0;j<globs.myY.size();++j) { myVarX[i][j] = exp(2.0*( beta*log(globs.myX[i]) + globs.myY[j] - 0.5*nu*nu*globs.myTimeline[g] ) ); myVarY[i][j] = exp(2.0*( alpha*log(globs.myX[i]) + globs.myY[j] - 0.5*nu*nu*globs.myTimeline[g] ) ); // nu*nu } } // rollback Part 1, write u,v, a, b, c #pragma omp parallel for default(shared) schedule(static) // Kernel-3: 3D for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop for(unsigned i=0;i<numX;i++) { // explicit x u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j]; if(i > 0) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] ) * myResult[k][i-1][j]; } u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] ) * myResult[k][i][j]; if(i < numX-1) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] ) * myResult[k][i+1][j]; } // explicit y ; RAW v, write u v[k][i][j] = 0.0; if(j > 0) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] ) * myResult[k][i][j-1]; } v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] ) * myResult[k][i][j]; if(j < numY-1) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] ) * myResult[k][i][j+1]; } u[k][j][i] += v[k][i][j]; // implicit x // write a,b,c a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]); b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]); c[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][2]); } } } //Part 2 : read a,b,c,u to write u #pragma omp parallel for default(shared) schedule(static) //kernel-4: 2D Kernel or can be merged with the last one to make a 2D kernel for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par for(unsigned j=0;j<numY;j++) { // Par tridagPar(a[k][j],b[k][j],c[k][j],u[k][j],numX,u[k][j],yy[k][j]); } } //Part 3, write a b c y reading from u,v // implicit y, #pragma omp parallel for default(shared) schedule(static) // Kernel-5: 3D for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par for(unsigned i=0;i<numX;i++) { for(unsigned j=0;j<numY;j++) { a[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][0]); b[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][1]); c[k][i][j] = - 0.5*(0.5*myVarY[i][j]*globs.myDyy[j][2]); y[k][i][j] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *u[k][j][i] - 0.5*v[k][i][j]; } } } //Part 4: write myResult reading from a b c y #pragma omp parallel for default(shared) schedule(static) //kernel-6 for( unsigned k = 0; k < outer; ++ k ) { //outermost loop distribution //Par for(unsigned i=0;i<numX;i++) { tridagPar(a[k][i],b[k][i],c[k][i],y[k][i],numY,myResult[k][i],yy[k][i]); } } } #pragma omp parallel for default(shared) schedule(static) for( unsigned k = 0; k < outer; ++ k ) //outermost loop k res[k] = myResult[k][globs.myXindex][globs.myYindex]; // myRes[0][k]; //// ---------- GPU version -------------------- //// // globs vars for gpu REAL *h_result; // the final result // GPU variables REAL *d_x, *d_y, *d_timeline, *d_dxx, *d_dyy; REAL *d_result, *d_varX, *d_varY; REAL *d_a, *d_b, *d_c, *d_yy, *d_yyy, *d_u, *d_v; // myXindex myYindex are scalars const REAL stdX = 20.0*alpha*s0*sqrt(t); const REAL dx = stdX/numX; unsigned myXindex = static_cast<unsigned>(s0/dx) % numX; // const REAL stdY = 10.0*nu*sqrt(t); // const REAL dy = stdY/numY; // const REAL logAlpha = log(alpha); unsigned myYindex = static_cast<unsigned>(numY/2.0); int memsize_X = numX * sizeof(REAL); int memsize_Y = numY * sizeof(REAL); int memsize_T = numT * sizeof(REAL); int memsize_XY = numX * numY * sizeof(REAL); int memsize_OXY = outer * numX * numY * sizeof (REAL); int memsize_OZZ = outer * numZ * numZ * sizeof (REAL); // CPU variables h_result = (REAL*) malloc (memsize_OXY); // GPU variables cudaMalloc((void**)&d_result, memsize_OXY); //[outer][numX][numY] cudaMalloc((void**)&d_varX, memsize_XY); //[numX][numY] cudaMalloc((void**)&d_varY, memsize_XY); //[numX][numY] cudaMalloc((void**)&d_x, memsize_X); //[numX] cudaMalloc((void**)&d_y, memsize_Y); //[numY] cudaMalloc((void**)&d_timeline, memsize_T); //[numT] cudaMalloc((void**)&d_dxx, 4 * memsize_X); //[numX][4] cudaMalloc((void**)&d_dyy, 4 * memsize_Y); //[numY][4] //a b c yy yyy: [outer][numZ][numZ] cudaMalloc((void**)&d_a , memsize_OZZ); cudaMalloc((void**)&d_b , memsize_OZZ); cudaMalloc((void**)&d_c , memsize_OZZ); cudaMalloc((void**)&d_yy , memsize_OZZ); //y in seq code cudaMalloc((void**)&d_yyy, memsize_OZZ); //yy in seq code cudaMalloc((void**)&d_u , memsize_OXY); //d_u : [outer][numY][numX] cudaMalloc((void**)&d_v , memsize_OXY); //d_v : [outer][numX][numY] //GPU init initGrid_GPU(s0, alpha, nu,t, numX,numY, numT, d_x, d_y, d_timeline, myXindex, myYindex); initOperator_GPU( d_x, numX, d_dxx); initOperator_GPU( d_y, numY, d_dyy); // test GPU init #ifdef GPU_INIT_TEST REAL *h_x, *h_y, *h_timeline, *h_dxx, *h_dyy; h_x = (REAL *) malloc (memsize_X ); h_y = (REAL *) malloc (memsize_Y ); h_timeline = (REAL *) malloc (memsize_T ); h_dxx = (REAL *) malloc (4* memsize_X ); h_dyy = (REAL *) malloc (4* memsize_Y ); cudaMemcpy( h_x , d_x , numX*sizeof(REAL) , cudaMemcpyDeviceToHost); cudaMemcpy( h_y , d_y , numY*sizeof(REAL) , cudaMemcpyDeviceToHost); cudaMemcpy( h_timeline , d_timeline, memsize_T , cudaMemcpyDeviceToHost); cudaMemcpy( h_dxx , d_dxx , numX*4*sizeof(REAL) , cudaMemcpyDeviceToHost); cudaMemcpy( h_dyy , d_dyy , numY*4*sizeof(REAL) , cudaMemcpyDeviceToHost); bool valid = true; for(int i = 0; i < numX; i++){ if(abs(h_x[i]-globs.myX[i]) > EPSILON){ valid = false; printf("\n** invalid h_x %f %f**\n", h_x[i], globs.myX[i]); break; } } for(int i = 0; i < numY; i++){ if(abs(h_y[i]-globs.myY[i]) > EPSILON){ valid = false; printf("\n** invalid h_y **\n"); break; } } for(int i = 0; i < numT; i++){ if(abs(h_timeline[i]-globs.myTimeline[i]) > EPSILON){ valid = false; printf("\n** invalid h_timeline %d %d**\n", h_timeline[i], globs.myTimeline[i]); break; } } for(int i = 0; i < numX*4; i++){ if(abs(h_dxx[i]-globs.myDxx[i/4][i%4]) > EPSILON){ valid = false; printf("\n** Invalid h_dxx **\n"); break; } } for(int i = 0; i < numY*4; i++){ if(abs(h_dyy[i]-globs.myDyy[i/4][i%4]) > EPSILON){ valid = false; printf("\n** Invalid h_dyy **\n"); break; } } if(!valid){ printf("\n**Initialization did not validate**\n"); } free(h_x);free(h_y);free(h_timeline);free(h_dxx);free(h_dyy); #endif // GPU setPayoff dim3 block_3D(8, 8, 8); dim3 grid_3D_OXY(ceil(numY/8.0), ceil(numX/8.0), ceil(outer/8.0)); d_setPayoff<<<grid_3D_OXY, block_3D>>>(d_result, d_x, numY, numX, outer); #ifdef GPU_SETPAYOFF_TEST cudaMemcpy( h_result , d_result , memsize_OXY, cudaMemcpyDeviceToHost); for(int k = 0; k < outer; k++) for(int i = 0; i < globs.myX.size(); i++) for(int j = 0; j < globs.myY.size(); j++){ REAL myResultTemp = max(globs.myX[i]-(0.001*k), (REAL)0.0); if(abs(h_result[k*numX*numY+i*numY+j]-myResultTemp) > EPSILON){ printf("\n**SetPayOff did not validate %f %f**\n", h_result[k*numX*numY+i*numY+j], myResultTemp); break; } } #endif //GPU updateParams int dimy = ceil( numY / T ); int dimx = ceil( numX / T ); dim3 block(T,T), grid(dimx,dimy); // Test only when g = 0, the last Timeline iteration d_updateParams<<< grid, block >>>(d_varX, d_varY, d_x, d_y, d_timeline, 0, alpha, beta, nu, numX, numY); #ifdef GPU_UPDATE_PARAMS_TEST // Test only when g = 0, the last Timeline iteration, i.e., // d_updateParams<<< grid, block >>>(d_varX, d_varY, d_x, d_y, d_timeline, 0, alpha, beta, nu, numX, numY); REAL *h_varX, *h_varY; h_varX = (REAL*) malloc (memsize_XY ); h_varY = (REAL*) malloc (memsize_XY ); cudaMemcpy( h_varX , d_varX , memsize_XY , cudaMemcpyDeviceToHost); cudaMemcpy( h_varY , d_varY , memsize_XY , cudaMemcpyDeviceToHost); for(int i = 0; i < numX*numY; i++){ if(abs(h_varX[i] - myVarX[i/numY][i%numY]) > EPSILON || abs(h_varY[i] - myVarY[i/numY][i%numY]) > EPSILON){ printf("\n**Update Params did not validate %f=%f and %f=%f**\n", h_varX[i], myVarX[i/numY][i%numY], h_varY[i], myVarY[i/numY][i%numY]); break; } } free(h_varX); free(h_varY); #endif // GPU rollback Part_1 // const dim3 block_3D(8, 8, 8); const dim3 grid_3D_OYX(ceil(numX/8.0), ceil(numY/8.0),ceil(outer/8.0) ); d_explicit_xy_implicit_x<<<grid_3D_OYX, block_3D>>>(d_u,d_v,d_a,d_b,d_c, d_varX,d_varY,d_timeline,d_dxx,d_dyy,d_result, 0, // firstly only test the case that g = 0 numX, numY, outer, numZ); #ifdef GPU_ROLLBACK_PART_1_TEST REAL *h_a, *h_b, *h_c, // *h_yy, *h_yyy, *h_u, *h_v; h_a = (REAL *) malloc (memsize_OZZ ); h_b = (REAL *) malloc (memsize_OZZ ); h_c = (REAL *) malloc (memsize_OZZ ); // h_yy = (REAL *) malloc (memsize_OZZ ); // h_yyy = (REAL *) malloc (memsize_OZZ ); h_u = (REAL *) malloc (memsize_OXY ); h_v = (REAL *) malloc (memsize_OXY ); cudaMemcpy( h_a , d_a , memsize_OZZ , cudaMemcpyDeviceToHost); cudaMemcpy( h_b , d_b , memsize_OZZ , cudaMemcpyDeviceToHost); cudaMemcpy( h_c , d_c , memsize_OZZ , cudaMemcpyDeviceToHost); // cudaMemcpy( h_yy , d_yy , memsize_OZZ , cudaMemcpyDeviceToHost); // cudaMemcpy( h_yyy , d_yyy , memsize_OZZ , cudaMemcpyDeviceToHost); cudaMemcpy( h_u , d_u , memsize_OXY , cudaMemcpyDeviceToHost); cudaMemcpy( h_v , d_v , memsize_OXY , cudaMemcpyDeviceToHost); #pragma omp parallel for default(shared) schedule(static) //Kernel-1: 3D for( unsigned k = 0; k < outer; ++ k ) { // outmost loop // modified setPayoff function below for(unsigned i=0;i<globs.myX.size();++i) { //REAL payoff = max(globs.myX[i]-strike, (REAL)0.0); // move this inside the loop to do privatization for(unsigned j=0;j<globs.myY.size();++j) // globs.myResult[i][j] = payoff; // note that payoff is just a scalar variables, myResult[k][i][j] = max(globs.myX[i]-(0.001*k), (REAL)0.0); } } unsigned g = 0; for( unsigned k = 0; k < outer; ++ k ) { //outermost loop k, after interchanged //Par for(unsigned j=0;j<numY;j++) { // interchanged with the inner loop for(unsigned i=0;i<numX;i++) { // explicit x u[k][j][i] = (1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) *myResult[k][i][j]; if(i > 0) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][0] ) * myResult[k][i-1][j]; } u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][1] ) * myResult[k][i][j]; if(i < numX-1) { u[k][j][i] += 0.5*( 0.5*myVarX[i][j]*globs.myDxx[i][2] ) * myResult[k][i+1][j]; } // explicit y ; RAW v, write u v[k][i][j] = 0.0; if(j > 0) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][0] ) * myResult[k][i][j-1]; } v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][1] ) * myResult[k][i][j]; if(j < numY-1) { v[k][i][j] += ( 0.5*myVarY[i][j]*globs.myDyy[j][2] ) * myResult[k][i][j+1]; } u[k][j][i] += v[k][i][j]; // implicit x // write a,b,c a[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]); b[k][j][i] = ( 1.0/(globs.myTimeline[g+1]-globs.myTimeline[g])) - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][1]); c[k][j][i] = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][2]); } } } bool valid = true; for(int iter = 0; iter < outer*numZ*numZ; iter ++){ unsigned k = iter /numZ/numZ; unsigned j = iter/ numZ; unsigned i = iter % numZ; if(i< numX && j < numY){ // REAL temp_a = - 0.5*(0.5*myVarX[i][j]*globs.myDxx[i][0]); if(abs(h_a[iter] - a[k][j][i]) > EPSILON || abs(h_b[iter] - b[k][j][i]) > EPSILON || abs(h_c[iter] - c[k][j][i]) > EPSILON){ valid = false; printf("\n** [h_a] k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_a[iter], a[k][j][i]); printf("\n** [h_b] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_b[iter], b[k][j][i]); printf("\n** [h_c] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_c[iter], c[k][j][i]); } if (abs(h_u[iter] - u[k][j][i]) > EPSILON || abs(h_v[iter] - v[k][i][j]) > EPSILON ) { valid = false; printf("\n** [h_u] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_u[iter], u[k][j][i]); printf("\n** [h_v] did not validate ! k %d, j %d, i %d, : %f != %f **\n", k,j,i, h_v[iter], v[k][i][j]); break; } } } if(!valid){ printf("\n** GPU_ROLLBACK_PART1_TEST did not validate**\n"); } free(h_a); free(h_b);free(h_c); // free(h_yy);free(h_yyy); free(h_u); free(h_v); #endif cudaFree(d_x); cudaFree(d_y); cudaFree(d_dxx);cudaFree(d_dyy); cudaFree(d_timeline); cudaFree(d_result); cudaFree(d_varX); cudaFree(d_varY); cudaFree(d_a); cudaFree(d_b);cudaFree(d_c); cudaFree(d_yy);cudaFree(d_yyy); cudaFree(d_u); cudaFree(d_v); free(h_result); //SHould perhaps be initialized on the gpu instead to save PCI bandwidth. Possibly negl /* * setPayOff: * INPUT: globs.myX * Output: myResult * * updateParams: * input: globs.myTimeline, globs.myX, globs.myY, alpha, beta, * output: myVarX, myVarY * * rollback-1: * input: globs.myTimeLine, myResult, * output: * * tridagPar: * * rollback-2: * input: * output: * */ // #endif }
8bd4835d3bbbbb84842e2dcbf060cf56c656cd27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const Nd4jLong index = x[xOffset]; const auto zOffset = shape::getIndexOffset(index, zShapeInfo); z[zOffset] = i; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { hipLaunchKernelGGL(( invertPermutationCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vz, zShapeInfo); } //////////////////////////////////////////////////////////////////////// ND4J_LOCAL void invertPermutation(sd::LaunchContext* context, const NDArray& input, NDArray& output) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "invertPermutation"); NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ T sharedMem[CUDA_BLOCK_SIZE]; __shared__ int xRank, zRank; // xRank = zRank + 2 __shared__ Nd4jLong xLen, zLen; if (threadIdx.x == 0) { xRank = shape::rank(xShapeInfo); zRank = shape::rank(zShapeInfo); xLen = shape::length(xShapeInfo); zLen = shape::length(zShapeInfo); // corresponds to number of matrices } __syncthreads(); Nd4jLong coords[MAX_RANK]; for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix shape::index2coords(m, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); sharedMem[threadIdx.x] = 0; for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) { coords[zRank] = coords[zRank + 1] = i; const auto xOffset = shape::getOffset(xShapeInfo, coords); sharedMem[threadIdx.x] += x[xOffset]; } __syncthreads(); // aggregate sum for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[zOffset] = *sharedMem; __syncthreads(); } } /////////////////////////////////////////////////////////////////// template<typename T> static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint diagLen) { hipLaunchKernelGGL(( traceCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diagLen); } /////////////////////////////////////////////////////////////////// ND4J_LOCAL void trace(sd::LaunchContext* context, const NDArray& input, NDArray& output) { PointersManager manager(context, "trace"); const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2); const int threadsPerBlock = CUDA_BLOCK_SIZE; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 1024; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int rank, areSameOffsets; __shared__ Nd4jLong len, totalThreads; // xLen = zLen if (threadIdx.x == 0) { areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); rank = shape::rank(xShapeInfo); len = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); Nd4jLong coords[MAX_RANK]; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col z[zOffset] = 0; else z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords)]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { hipLaunchKernelGGL(( triuBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, diag); } /////////////////////////////////////////////////////////////////// ND4J_LOCAL void triuBP(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) { const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * gradO.rankOf() + 128; PointersManager manager(context, "triuBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int xRank, zRank; // xRank >= zRank __shared__ Nd4jLong numOfXOffsets, zLen, totalThreads; // xLen >= zLen if (threadIdx.x == 0) { xRank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); numOfXOffsets = shape::length(xShapeInfo) / zLen; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int memBuff[MAX_RANK * 2]; auto xOffsets = globMem + tid * numOfXOffsets; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const auto zOffset = shape::getIndexOffset(i, zShapeInfo); shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff); z[zOffset] = x[xOffsets[0]]; // first offset for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets z[zOffset] += x[xOffsets[j]]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { hipLaunchKernelGGL(( tileBPCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vz, zShapeInfo, globMem); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void tileBP(sd::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) { NDArray memBuff('c', gradO.getShapeAsVector(), sd::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * 2 * gradO.rankOf() + 128; PointersManager manager(context, "tileBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff}); BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void eye(sd::LaunchContext * context, NDArray& output) { output.setIdentity(); } } } }
8bd4835d3bbbbb84842e2dcbf060cf56c656cd27.cu
/* * ****************************************************************************** * * * * * * This program and the accompanying materials are made available under the * * terms of the Apache License, Version 2.0 which is available at * * https://www.apache.org/licenses/LICENSE-2.0. * * * * See the NOTICE file distributed with this work for additional * * information regarding copyright ownership. * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * * License for the specific language governing permissions and limitations * * under the License. * * * * SPDX-License-Identifier: Apache-2.0 * ***************************************************************************** */ // // @author Yurii Shyrma ([email protected]), created on 20.04.2018 // #include<ops/declarable/helpers/transforms.h> #include <array/ResultSet.h> #include <helpers/ShapeUtils.h> #include <numeric> #include <array/NDArrayFactory.h> #include <helpers/TAD.h> #include <exceptions/cuda_exception.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void invertPermutationCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { const T* x = reinterpret_cast<const T*>(vx); T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong len, totalThreads; if (threadIdx.x == 0) { len = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { const auto xOffset = shape::getIndexOffset(i, xShapeInfo); const Nd4jLong index = x[xOffset]; const auto zOffset = shape::getIndexOffset(index, zShapeInfo); z[zOffset] = i; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void invertPermutationCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { invertPermutationCuda<T><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vz, zShapeInfo); } //////////////////////////////////////////////////////////////////////// ND4J_LOCAL void invertPermutation(sd::LaunchContext* context, const NDArray& input, NDArray& output) { const int threadsPerBlock = MAX_NUM_THREADS; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; PointersManager manager(context, "invertPermutation"); NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), invertPermutationCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo()), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// template<typename T> __global__ static void traceCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const uint diagLen) { const auto x = reinterpret_cast<const T*>(vx); auto z = reinterpret_cast<T*>(vz); __shared__ T sharedMem[CUDA_BLOCK_SIZE]; __shared__ int xRank, zRank; // xRank = zRank + 2 __shared__ Nd4jLong xLen, zLen; if (threadIdx.x == 0) { xRank = shape::rank(xShapeInfo); zRank = shape::rank(zShapeInfo); xLen = shape::length(xShapeInfo); zLen = shape::length(zShapeInfo); // corresponds to number of matrices } __syncthreads(); Nd4jLong coords[MAX_RANK]; for (uint m = blockIdx.x; m < zLen; m += gridDim.x) { // one block per each element of z, that is per each matrix shape::index2coords(m, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); sharedMem[threadIdx.x] = 0; for (uint i = threadIdx.x; i < diagLen; i += blockDim.x) { coords[zRank] = coords[zRank + 1] = i; const auto xOffset = shape::getOffset(xShapeInfo, coords); sharedMem[threadIdx.x] += x[xOffset]; } __syncthreads(); // aggregate sum for (Nd4jLong activeThreads = blockDim.x / 2; activeThreads > 0; activeThreads /= 2) { if (threadIdx.x < activeThreads) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + activeThreads]; __syncthreads(); } if (threadIdx.x == 0) z[zOffset] = *sharedMem; __syncthreads(); } } /////////////////////////////////////////////////////////////////// template<typename T> static void traceCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo, const uint diagLen) { traceCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diagLen); } /////////////////////////////////////////////////////////////////// ND4J_LOCAL void trace(sd::LaunchContext* context, const NDArray& input, NDArray& output) { PointersManager manager(context, "trace"); const uint diagLen = input.sizeAt(-1) < input.sizeAt(-2) ? input.sizeAt(-1) : input.sizeAt(-2); const int threadsPerBlock = CUDA_BLOCK_SIZE; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = 1024; NDArray::prepareSpecialUse({&output}, {&input}); BUILD_SINGLE_SELECTOR(input.dataType(), traceCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), diagLen), LIBND4J_TYPES); NDArray::registerSpecialUse({&output}, {&input}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void triuBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int rank, areSameOffsets; __shared__ Nd4jLong len, totalThreads; // xLen = zLen if (threadIdx.x == 0) { areSameOffsets = shape::haveSameShapeAndStrides(xShapeInfo, zShapeInfo); rank = shape::rank(xShapeInfo); len = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); Nd4jLong coords[MAX_RANK]; const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < len; i += totalThreads) { shape::index2coords(i, zShapeInfo, coords); const auto zOffset = shape::getOffset(zShapeInfo, coords); if((coords[rank - 2] + diag > coords[rank - 1])) // row + diag > col z[zOffset] = 0; else z[zOffset] = x[areSameOffsets ? zOffset : shape::getOffset(xShapeInfo, coords)]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void triuBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int diag) { triuBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, diag); } /////////////////////////////////////////////////////////////////// ND4J_LOCAL void triuBP(sd::LaunchContext* context, const NDArray& input, const NDArray& gradO, NDArray& gradI, const int diagonal) { const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradO.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * gradO.rankOf() + 128; PointersManager manager(context, "triuBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO}); BUILD_SINGLE_SELECTOR(gradI.dataType(), triuBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), diagonal), LIBND4J_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO}); manager.synchronize(); } /////////////////////////////////////////////////////////////////// template<typename T> __global__ static void tileBPCuda(const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { // x and z have same shapes const auto x = reinterpret_cast<const T*>(vx); // gradO auto z = reinterpret_cast<T*>(vz); // gradI __shared__ int xRank, zRank; // xRank >= zRank __shared__ Nd4jLong numOfXOffsets, zLen, totalThreads; // xLen >= zLen if (threadIdx.x == 0) { xRank = shape::rank(zShapeInfo); zLen = shape::length(zShapeInfo); numOfXOffsets = shape::length(xShapeInfo) / zLen; totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; int memBuff[MAX_RANK * 2]; auto xOffsets = globMem + tid * numOfXOffsets; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const auto zOffset = shape::getIndexOffset(i, zShapeInfo); shape::outerArrayOffsets(xOffsets, i, xShapeInfo, zShapeInfo, memBuff); z[zOffset] = x[xOffsets[0]]; // first offset for (Nd4jLong j = 1; j < numOfXOffsets; ++j) // rest offsets z[zOffset] += x[xOffsets[j]]; } } /////////////////////////////////////////////////////////////////// template<typename T> static void tileBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* vz, const Nd4jLong* zShapeInfo, Nd4jLong* globMem) { tileBPCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vz, zShapeInfo, globMem); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void tileBP(sd::LaunchContext * context, const NDArray& gradO /*input*/, NDArray& gradI /*output*/, const std::vector<Nd4jLong> reps) { NDArray memBuff('c', gradO.getShapeAsVector(), sd::DataType::INT64, context); // empty auxiliary array for storing device memory which will be used in kernel calculations const int threadsPerBlock = MAX_NUM_THREADS / 4; const int blocksPerGrid = (gradI.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; const int sharedMem = threadsPerBlock * sizeof(int) * 2 * gradO.rankOf() + 128; PointersManager manager(context, "tileBP"); NDArray::prepareSpecialUse({&gradI}, {&gradO, &memBuff}); BUILD_SINGLE_SELECTOR(gradI.dataType(), tileBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), gradO.specialBuffer(), gradO.specialShapeInfo(), gradI.specialBuffer(), gradI.specialShapeInfo(), reinterpret_cast<Nd4jLong*>(memBuff.specialBuffer())), FLOAT_TYPES); NDArray::registerSpecialUse({&gradI}, {&gradO, &memBuff}); manager.synchronize(); } ////////////////////////////////////////////////////////////////////////// ND4J_LOCAL void eye(sd::LaunchContext * context, NDArray& output) { output.setIdentity(); } } } }
4_2d_1d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Babak Poursartip // 09/14/2020 // Udemy Cuda // unique index calculation #include <cstdio> // =========================================== // 2d grid, 1d block __global__ void unique_gid_calculation_2d(int *input) { int tid = threadIdx.x; int block_offset = blockIdx.x * blockDim.x; int row_offset = blockIdx.y * (blockDim.x * gridDim.x); int gid = row_offset + block_offset + tid; printf(" blockIdx.x: %3d, blockIdx.y: %3d, threadIdx.x: %3d, gid: %3d, " "value: %4d \n", blockIdx.x, blockIdx.y, tid, gid, input[gid]); } // =========================================== int main() { printf(" starts ..."); int array_size = 16; int array_byte_size = sizeof(int) * array_size; int h_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; // array on the host printf(" data on the host: \n"); for (int i = 0; i < array_size; ++i) printf(" %d", h_data[i]); printf("\n\n"); int *d_data; // array on the device hipMalloc((void **)&d_data, array_byte_size); hipMemcpy(d_data, h_data, array_byte_size, hipMemcpyHostToDevice); dim3 block(4); dim3 grid(2, 2); printf(" data on the device: \n"); hipLaunchKernelGGL(( unique_gid_calculation_2d), dim3(grid), dim3(block), 0, 0, d_data); hipDeviceSynchronize(); hipDeviceReset(); printf(" finished."); return 0; }
4_2d_1d.cu
// Babak Poursartip // 09/14/2020 // Udemy Cuda // unique index calculation #include <cstdio> // =========================================== // 2d grid, 1d block __global__ void unique_gid_calculation_2d(int *input) { int tid = threadIdx.x; int block_offset = blockIdx.x * blockDim.x; int row_offset = blockIdx.y * (blockDim.x * gridDim.x); int gid = row_offset + block_offset + tid; printf(" blockIdx.x: %3d, blockIdx.y: %3d, threadIdx.x: %3d, gid: %3d, " "value: %4d \n", blockIdx.x, blockIdx.y, tid, gid, input[gid]); } // =========================================== int main() { printf(" starts ..."); int array_size = 16; int array_byte_size = sizeof(int) * array_size; int h_data[] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; // array on the host printf(" data on the host: \n"); for (int i = 0; i < array_size; ++i) printf(" %d", h_data[i]); printf("\n\n"); int *d_data; // array on the device cudaMalloc((void **)&d_data, array_byte_size); cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); dim3 block(4); dim3 grid(2, 2); printf(" data on the device: \n"); unique_gid_calculation_2d<<<grid, block>>>(d_data); cudaDeviceSynchronize(); cudaDeviceReset(); printf(" finished."); return 0; }
6bad496789e88f558846f2718ab95a63d896f06a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "cpu_bitmap.h" static const int DIM = 1000; struct hipComplex { float r; float i; __device__ hipComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2() const { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex &a) { return {r * a.r - i * a.i, i * a.r + r * a.i}; } __device__ hipComplex operator+(const hipComplex &a) { return {r + a.r, i + a.i}; } }; __device__ int julia(int x, int y) { const float scale = 1.5f; const auto dim = static_cast<float>(DIM); float jx = scale * (dim / 2.f - float(x)) / (dim / 2.f); float jy = scale * (dim / 2.f - float(y)) / (dim / 2.f); hipComplex c(-0.8f, 0.156f); hipComplex a(jx, jy); for (int i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000.f) { return 0; } } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int julia_value = julia(x, y); ptr[offset * 4 + 0] = 255 * julia_value; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main() { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; CHECK(hipMalloc(&dev_bitmap, bitmap.image_size())); dim3 grid(DIM, DIM); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(1), 0, 0, dev_bitmap); CHECK(hipDeviceSynchronize()); CHECK(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost)); bitmap.display_and_exit(); hipFree(dev_bitmap); return 0; }
6bad496789e88f558846f2718ab95a63d896f06a.cu
#include "common.h" #include "cpu_bitmap.h" static const int DIM = 1000; struct cuComplex { float r; float i; __device__ cuComplex(float a, float b) : r(a), i(b) {} __device__ float magnitude2() const { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex &a) { return {r * a.r - i * a.i, i * a.r + r * a.i}; } __device__ cuComplex operator+(const cuComplex &a) { return {r + a.r, i + a.i}; } }; __device__ int julia(int x, int y) { const float scale = 1.5f; const auto dim = static_cast<float>(DIM); float jx = scale * (dim / 2.f - float(x)) / (dim / 2.f); float jy = scale * (dim / 2.f - float(y)) / (dim / 2.f); cuComplex c(-0.8f, 0.156f); cuComplex a(jx, jy); for (int i = 0; i < 200; i++) { a = a * a + c; if (a.magnitude2() > 1000.f) { return 0; } } return 1; } __global__ void kernel(unsigned char *ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; int julia_value = julia(x, y); ptr[offset * 4 + 0] = 255 * julia_value; ptr[offset * 4 + 1] = 0; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int main() { CPUBitmap bitmap(DIM, DIM); unsigned char *dev_bitmap; CHECK(cudaMalloc(&dev_bitmap, bitmap.image_size())); dim3 grid(DIM, DIM); kernel<<<grid, 1>>>(dev_bitmap); CHECK(cudaDeviceSynchronize()); CHECK(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost)); bitmap.display_and_exit(); cudaFree(dev_bitmap); return 0; }
2e0ddd963a276665260370070f36a45b357bdd39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "funcao.h" /*===================== KERNEL CUDA ====================================================================*/ __global__ void sepia(RGBA *d_buffer){ int id = blockIdx.x*blockDim.x + threadIdx.x; d_buffer[id].r = (d_buffer[id].r * 0.393f) + (d_buffer[id].g * 0.769f) + (d_buffer[id].b * 0.189f); d_buffer[id].g = (d_buffer[id].r * 0.349f) + (d_buffer[id].g * 0.686f) + (d_buffer[id].b * 0.168f); d_buffer[id].b = (d_buffer[id].r * 0.272f) + (d_buffer[id].g * 0.534f) + (d_buffer[id].b * 0.131f); } __global__ void negative(RGBA *d_buffer){ int ix = blockIdx.x*blockDim.x + threadIdx.x; d_buffer[ix].r = 255 - d_buffer[ix].r; d_buffer[ix].g = 255 - d_buffer[ix].g; d_buffer[ix].b = 255 - d_buffer[ix].b; } __global__ void grayscale(RGBA *d_buffer){ int ix = blockIdx.x*blockDim.x + threadIdx.x; int luminosidade = d_buffer[ix].r + d_buffer[ix].g + d_buffer[ix].b; d_buffer[ix].r = luminosidade/3; d_buffer[ix].g = luminosidade/3; d_buffer[ix].b = luminosidade/3; } /*===================== KERNEL CUDA ====================================================================*/ int size = 150*100; int quantidade = 5000; char img_name[] = "150*100.png"; /*=============================== Envia ===============================*/ extern "C" void funcaoEnv1(){ MPI_Datatype mpi_rgba_type = create_mpi_rgba(); clock_t start,end; start=clock(); PNG_DATA *arquivo = read_png_file(img_name); RGBA *pixels = translate_px_to_vec(arquivo); sendMessage("funcaoEnv1","funcaoProc1", MPI_RGBA, pixels, size); receiveMessage("funcaoEnv1","funcaoProc1",MPI_RGBA,pixels,size); //translate_vec_to_px(pixels,arquivo); //write_png_file("saida.png",arquivo); end = clock(); printf("F1: %lf\n",(double)(end - start) / CLOCKS_PER_SEC); MPI_Type_free(&mpi_rgba_type); } /*============================== Processadores =====================================*/ void processa_vetor_pixel(RGBA *buffer, int size, int device, int op){ hipSetDevice(device); RGBA *d_buffer; hipMalloc((void **)&d_buffer,sizeof(RGBA)*size); hipMemcpy(d_buffer, buffer,(size*sizeof(RGBA)),hipMemcpyHostToDevice); int i; for(i=0; i<200; i++){ if(op==0){ hipLaunchKernelGGL(( sepia), dim3(size/512),dim3(512), 0, 0, d_buffer); }else if (op == 1){ hipLaunchKernelGGL(( negative), dim3(size/512),dim3(512), 0, 0, d_buffer); }else if (op == 2){ hipLaunchKernelGGL(( grayscale), dim3(size/512),dim3(512), 0, 0, d_buffer); } } hipMemcpy(buffer, d_buffer, (size*sizeof(RGBA)), hipMemcpyDeviceToHost); hipFree(d_buffer); } extern "C" void funcaoProc1(){ int i; RGBA *buffer = (RGBA*)malloc(sizeof(RGBA)*size); //printf("F1 %d\n",quantidade); receiveMessage("funcaoProc1","funcaoEnv1",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,0,0); sendMessage("funcaoProc1","funcaoProc2",MPI_RGBA,buffer,size); for( i=0;i < quantidade-1; i++){ buffer = (RGBA*)malloc(sizeof(RGBA)*size); receiveMessage("funcaoProc1","funcaoProc3",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,0,0); sendMessage("funcaoProc1","funcaoProc2",MPI_RGBA,buffer,size); } receiveMessage("funcaoProc1","funcaoProc3",MPI_RGBA,buffer,size); sendMessage("funcaoProc1","funcaoEnv1",MPI_RGBA,buffer,size); } extern "C" void funcaoProc2(){ int i; //printf("F2 %d\n",quantidade); for( i=0; i < quantidade; i++){ RGBA *buffer = (RGBA*)malloc(sizeof(RGBA)*size); receiveMessage("funcaoProc2","funcaoProc1",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,1,1); sendMessage("funcaoProc2","funcaoProc3",MPI_RGBA,buffer,size); } } extern "C" void funcaoProc3(){ int i; //printf("F3 %d\n",quantidade); for( i=0;i < quantidade; i++){ RGBA *buffer = (RGBA*)malloc(sizeof(RGBA)*size); receiveMessage("funcaoProc3","funcaoProc2",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,0,2); sendMessage("funcaoProc3","funcaoProc1",MPI_RGBA,buffer,size); } }
2e0ddd963a276665260370070f36a45b357bdd39.cu
#include "funcao.h" /*===================== KERNEL CUDA ====================================================================*/ __global__ void sepia(RGBA *d_buffer){ int id = blockIdx.x*blockDim.x + threadIdx.x; d_buffer[id].r = (d_buffer[id].r * 0.393f) + (d_buffer[id].g * 0.769f) + (d_buffer[id].b * 0.189f); d_buffer[id].g = (d_buffer[id].r * 0.349f) + (d_buffer[id].g * 0.686f) + (d_buffer[id].b * 0.168f); d_buffer[id].b = (d_buffer[id].r * 0.272f) + (d_buffer[id].g * 0.534f) + (d_buffer[id].b * 0.131f); } __global__ void negative(RGBA *d_buffer){ int ix = blockIdx.x*blockDim.x + threadIdx.x; d_buffer[ix].r = 255 - d_buffer[ix].r; d_buffer[ix].g = 255 - d_buffer[ix].g; d_buffer[ix].b = 255 - d_buffer[ix].b; } __global__ void grayscale(RGBA *d_buffer){ int ix = blockIdx.x*blockDim.x + threadIdx.x; int luminosidade = d_buffer[ix].r + d_buffer[ix].g + d_buffer[ix].b; d_buffer[ix].r = luminosidade/3; d_buffer[ix].g = luminosidade/3; d_buffer[ix].b = luminosidade/3; } /*===================== KERNEL CUDA ====================================================================*/ int size = 150*100; int quantidade = 5000; char img_name[] = "150*100.png"; /*=============================== Envia ===============================*/ extern "C" void funcaoEnv1(){ MPI_Datatype mpi_rgba_type = create_mpi_rgba(); clock_t start,end; start=clock(); PNG_DATA *arquivo = read_png_file(img_name); RGBA *pixels = translate_px_to_vec(arquivo); sendMessage("funcaoEnv1","funcaoProc1", MPI_RGBA, pixels, size); receiveMessage("funcaoEnv1","funcaoProc1",MPI_RGBA,pixels,size); //translate_vec_to_px(pixels,arquivo); //write_png_file("saida.png",arquivo); end = clock(); printf("F1: %lf\n",(double)(end - start) / CLOCKS_PER_SEC); MPI_Type_free(&mpi_rgba_type); } /*============================== Processadores =====================================*/ void processa_vetor_pixel(RGBA *buffer, int size, int device, int op){ cudaSetDevice(device); RGBA *d_buffer; cudaMalloc((void **)&d_buffer,sizeof(RGBA)*size); cudaMemcpy(d_buffer, buffer,(size*sizeof(RGBA)),cudaMemcpyHostToDevice); int i; for(i=0; i<200; i++){ if(op==0){ sepia<<<size/512,512>>>(d_buffer); }else if (op == 1){ negative<<<size/512,512>>>(d_buffer); }else if (op == 2){ grayscale<<<size/512,512>>>(d_buffer); } } cudaMemcpy(buffer, d_buffer, (size*sizeof(RGBA)), cudaMemcpyDeviceToHost); cudaFree(d_buffer); } extern "C" void funcaoProc1(){ int i; RGBA *buffer = (RGBA*)malloc(sizeof(RGBA)*size); //printf("F1 %d\n",quantidade); receiveMessage("funcaoProc1","funcaoEnv1",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,0,0); sendMessage("funcaoProc1","funcaoProc2",MPI_RGBA,buffer,size); for( i=0;i < quantidade-1; i++){ buffer = (RGBA*)malloc(sizeof(RGBA)*size); receiveMessage("funcaoProc1","funcaoProc3",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,0,0); sendMessage("funcaoProc1","funcaoProc2",MPI_RGBA,buffer,size); } receiveMessage("funcaoProc1","funcaoProc3",MPI_RGBA,buffer,size); sendMessage("funcaoProc1","funcaoEnv1",MPI_RGBA,buffer,size); } extern "C" void funcaoProc2(){ int i; //printf("F2 %d\n",quantidade); for( i=0; i < quantidade; i++){ RGBA *buffer = (RGBA*)malloc(sizeof(RGBA)*size); receiveMessage("funcaoProc2","funcaoProc1",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,1,1); sendMessage("funcaoProc2","funcaoProc3",MPI_RGBA,buffer,size); } } extern "C" void funcaoProc3(){ int i; //printf("F3 %d\n",quantidade); for( i=0;i < quantidade; i++){ RGBA *buffer = (RGBA*)malloc(sizeof(RGBA)*size); receiveMessage("funcaoProc3","funcaoProc2",MPI_RGBA,buffer,size); processa_vetor_pixel(buffer,size,0,2); sendMessage("funcaoProc3","funcaoProc1",MPI_RGBA,buffer,size); } }
57260550ae3e2c7ec4587e5677f1146839156d42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string> #include <iostream> #define THREADS 512 #define N1 2 #define M1 3 #define N2 3 #define M2 2 template <typename T> __global__ void is_orthogonal_int(T* a, bool *t, int n1, int m1, int n2, int m2) { unsigned i = blockDim.x * blockIdx.x + threadIdx.x; unsigned j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n1 || j >= m2) return; T temp = 0; for (int p = 0; p < M1; p++) { temp += a[i * M1 + p] * a[p + j * N2]; //if (i == 1 && j == 1) //printf("i = %li i_elem = %i\tj = %li j_elem = %i\tval1 = %f\tval2 = %f\n", i, i * M1 + p, j, p + j * N2, a[i * M1 + p], a[p + j * N2]); } //printf("i = %i\t j = %i\t res = %i\n", i, j, static_cast<int>(temp)); if ((i != j && temp != 0) || (i == j && temp != 1)) *t = false; } template <typename T> __global__ void multiply(T* a, T* b, T *c, int n1, int m1, int n2, int m2) { unsigned i = blockDim.x * blockIdx.x + threadIdx.x; unsigned j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n1 || j >= m2) return; T temp = 0; for (int p = 0; p < m1; p++) { temp += a[i * m1 + p] * b[p * m2 + j]; //if (i == 0) //printf("i = %li i_elem = %i\tj = %li j_elem = %i\tval1 = %f\tval2 = %f\n", i, i * m1 + p, j, p * m2 + j, a[i * m1 + p], b[p * m2 + j]); } c[i * n1 + j] = temp; } template <typename T> void print_matrix(T* matrix, int n, int m, std::string const & msg) { printf("%s", msg.c_str()); for (int i = 0; i < n; i++) { printf("| "); for (int j = 0; j < m; j++) printf("%f ", matrix[i * m + j]); printf("|\n"); } printf("\n"); } template <typename T> void fill_matrix(T* matrix, int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) matrix[i * m + j] = static_cast<T>(i * n - j); } template <typename T> void fill_matrix2(T* matrix, int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) matrix[i * m + j] = static_cast<T>(i * n + j); } template <typename T> void fille_matrix(T* matrix, int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) matrix[i * m + j] = i == j ? 1 : 0; } template <typename T> T* transpose(T* a, int n, int m) { T* t; hipHostMalloc(&t, sizeof(T) * n * m); for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) t[i * n + j] = a[j * m + i]; return t; } template <typename T> T* mult(T* a_h, T* b_h, int n1, int m1, int n2, int m2) { hipError_t error; T * c_h, * a_d, * b_d, * c_d; if (m1 != n2) { printf("multiplication is impossible\n"); return nullptr; } hipHostMalloc(&c_h, sizeof(T) * n1 * m2); hipMalloc(&a_d, sizeof(T) * n1 * m1); hipMalloc(&b_d, sizeof(T) * n2 * m2); hipMalloc(&c_d, sizeof(T) * n1 * m2); //print_matrix(a_h, n1, m1); //print_matrix(b_h, n2, m2); hipMemcpy(a_d, a_h, sizeof(T) * n1 * m1, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, sizeof(T) * n2 * m2, hipMemcpyHostToDevice); dim3 threads(THREADS, THREADS); dim3 blocks(static_cast<unsigned>((ceil(n1 * 1.0 / THREADS))), static_cast<unsigned>((ceil(m2 * 1.0 / THREADS)))); multiply << <blocks, threads >> > (a_d, b_d, c_d, n1, m1, n2, m2); if ((error = hipGetLastError()) != hipSuccess || (error = hipDeviceSynchronize()) != hipSuccess) std::cout << "Cuda error in matrix multiplication" << std::endl << "Error description: " << hipGetErrorString(error) << std::endl << hipGetErrorName(error) << std::endl; hipMemcpy(c_h, c_d, sizeof(T) * n1 * m2, hipMemcpyDeviceToHost); hipFree(a_d); hipFree(b_d); hipFree(c_d); return c_h; } void test_multiply() { float* a_h, * b_h; hipHostMalloc(&a_h, sizeof(float) * N1 * M1); hipHostMalloc(&b_h, sizeof(float) * N2 * M2); fill_matrix(a_h, N1, M1); fill_matrix(b_h, N2, M2); print_matrix(a_h, N1, M1, "Matrix a\n"); print_matrix(b_h, N2, M2, "Matrix b\n"); float *c_h = mult(a_h, b_h, N1, M1, N2, M2); print_matrix(c_h, N1, M2, "MULTIPLICATION RESULT\n"); hipHostFree(a_h); hipHostFree(b_h); if (c_h) hipHostFree(c_h); } void ex0() { hipError_t error; bool* t_h, * t_d; float* a_h, *a_d; hipHostMalloc(&t_h, sizeof(bool)); hipHostMalloc(&a_h, sizeof(float) * N1 * M1); hipMalloc(&a_d, sizeof(float) * N1 * M1); hipMalloc(&t_d, sizeof(bool)); *t_h = true; fill_matrix(a_h, N1, M1); print_matrix(a_h, N1, M1, "Matrix a\n"); hipMemcpy(t_d, t_h, sizeof(bool), hipMemcpyHostToDevice); hipMemcpy(a_d, a_h, sizeof(float) * N1 * M1, hipMemcpyHostToDevice); dim3 threads(THREADS, THREADS); dim3 blocks(static_cast<unsigned>((ceil(N1 * 1.0 / THREADS))), static_cast<unsigned>((ceil(M1 * 1.0 / THREADS)))); is_orthogonal_int<float> << <blocks, threads >> > (a_d, t_d, N1, M1, M1, N1); if ((error = hipGetLastError()) != hipSuccess || (error = hipDeviceSynchronize()) != hipSuccess) std::cout << "Cuda error in orthogonalization" << std::endl << "Error description: " << hipGetErrorString(error) << std::endl << hipGetErrorName(error) << std::endl; hipMemcpy(t_h, t_d, sizeof(bool), hipMemcpyDeviceToHost); printf("ex 1: input matrix is %s", *t_h ? "orthogonal\n" : "NOT orthogonal\n"); float* at_h = transpose(a_h, N1, M1); print_matrix(at_h, M1, N1, "transpose matrix a\n"); float* ata_mult = mult(a_h, at_h, N1, M1, M1, N1); print_matrix(ata_mult, N1, N1, "aT * a multiplication\n"); hipHostFree(a_h); hipHostFree(at_h); hipHostFree(ata_mult); hipHostFree(t_h); hipFree(t_d); hipFree(a_d); } void ex1(int n) { float* a_h, * b_h; hipHostMalloc(&a_h, sizeof(float) * n * n); hipHostMalloc(&b_h, sizeof(float) * n * n); fill_matrix(a_h, n, n); fill_matrix2(b_h, n, n); print_matrix(a_h, n, n, "Matrix a\n"); print_matrix(b_h, n, n, "Matrix b\n"); float* ab_h = mult(a_h, b_h, n, n, n, n); float* ba_h = mult(b_h, a_h, n, n, n, n); bool ok = true; for (int i = 0; i < n && ok; i++) { for (int j = 0; j < n && ok; j++) if (ab_h[i * n + j] != ba_h[i * n + j]) ok = false; } printf("matrix a and matrix b are %s\n", ok ? "commutable" : "not commutable"); print_matrix(ab_h, n, n, "Matrix a * b\n"); print_matrix(ba_h, n, n, "Matrix b * a\n"); hipHostFree(a_h); hipHostFree(b_h); hipHostFree(ab_h); hipHostFree(ba_h); } template <typename T> void fillv(T a[][M1], int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) a[i][j] = static_cast<T>(i * n - j); } template <typename T> void print_matrixv(T a[][M1], int n, int m, std::string const & msg) { printf("%s", msg.c_str()); for (int i = 0; i < n; i++) { printf("| "); for (int j = 0; j < m; j++) printf("%f ", a[i][j]); printf("|\n"); } printf("\n"); } template <typename T> __global__ void sum(T* a, T* b, T* c, int n1, int m1, int n2, int m2) { unsigned i = blockDim.x * blockIdx.x + threadIdx.x; unsigned j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n1 || j >= m2) return; c[i * m1 + j] = a[i * m1 + j] + b[i * m1 + j]; } void ex2() { hipError_t error; float a_h[N1][M1], b_h[N1][M1], *a_d, *b_d, *c_d, *c_h; fillv(a_h, N1, M1); fillv(b_h, N1, M1); hipMalloc(&a_d, sizeof(float) * N1 * M1); hipMalloc(&b_d, sizeof(float) * N1 * M1); hipMalloc(&c_d, sizeof(float) * N1 * M1); hipHostMalloc(&c_h, sizeof(float) * N1 * M1); print_matrixv(a_h, N1, M1, "MATRIX A\n"); print_matrixv(b_h, N1, M1, "MATRIX B\n"); hipMemcpy(a_d, a_h, sizeof(float) * N1 * M1, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, sizeof(float) * N1 * M1, hipMemcpyHostToDevice); dim3 threads(THREADS, THREADS); dim3 blocks(static_cast<unsigned>((ceil(N1 * 1.0 / THREADS))), static_cast<unsigned>((ceil(M1 * 1.0 / THREADS)))); sum << <blocks, threads >> > (a_d, b_d, c_d, N1, M1, N1, M1); if ((error = hipGetLastError()) != hipSuccess || (error = hipDeviceSynchronize()) != hipSuccess) std::cout << "Cuda error in matrix sum" << std::endl << "Error description: " << hipGetErrorString(error) << std::endl << hipGetErrorName(error) << std::endl; hipMemcpy(c_h, c_d, sizeof(float) * N1 * M1, hipMemcpyDeviceToHost); print_matrix(c_h, N1, M1, "SUM\n"); hipFree(a_d); hipFree(b_d); hipFree(c_d); hipFree(c_h); } int main() { //test_multiply(); ex0(); ex1(2); ex2(); return 0; }
57260550ae3e2c7ec4587e5677f1146839156d42.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string> #include <iostream> #define THREADS 512 #define N1 2 #define M1 3 #define N2 3 #define M2 2 template <typename T> __global__ void is_orthogonal_int(T* a, bool *t, int n1, int m1, int n2, int m2) { unsigned i = blockDim.x * blockIdx.x + threadIdx.x; unsigned j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n1 || j >= m2) return; T temp = 0; for (int p = 0; p < M1; p++) { temp += a[i * M1 + p] * a[p + j * N2]; //if (i == 1 && j == 1) //printf("i = %li i_elem = %i\tj = %li j_elem = %i\tval1 = %f\tval2 = %f\n", i, i * M1 + p, j, p + j * N2, a[i * M1 + p], a[p + j * N2]); } //printf("i = %i\t j = %i\t res = %i\n", i, j, static_cast<int>(temp)); if ((i != j && temp != 0) || (i == j && temp != 1)) *t = false; } template <typename T> __global__ void multiply(T* a, T* b, T *c, int n1, int m1, int n2, int m2) { unsigned i = blockDim.x * blockIdx.x + threadIdx.x; unsigned j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n1 || j >= m2) return; T temp = 0; for (int p = 0; p < m1; p++) { temp += a[i * m1 + p] * b[p * m2 + j]; //if (i == 0) //printf("i = %li i_elem = %i\tj = %li j_elem = %i\tval1 = %f\tval2 = %f\n", i, i * m1 + p, j, p * m2 + j, a[i * m1 + p], b[p * m2 + j]); } c[i * n1 + j] = temp; } template <typename T> void print_matrix(T* matrix, int n, int m, std::string const & msg) { printf("%s", msg.c_str()); for (int i = 0; i < n; i++) { printf("| "); for (int j = 0; j < m; j++) printf("%f ", matrix[i * m + j]); printf("|\n"); } printf("\n"); } template <typename T> void fill_matrix(T* matrix, int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) matrix[i * m + j] = static_cast<T>(i * n - j); } template <typename T> void fill_matrix2(T* matrix, int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) matrix[i * m + j] = static_cast<T>(i * n + j); } template <typename T> void fille_matrix(T* matrix, int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) matrix[i * m + j] = i == j ? 1 : 0; } template <typename T> T* transpose(T* a, int n, int m) { T* t; cudaMallocHost(&t, sizeof(T) * n * m); for (int i = 0; i < m; i++) for (int j = 0; j < n; j++) t[i * n + j] = a[j * m + i]; return t; } template <typename T> T* mult(T* a_h, T* b_h, int n1, int m1, int n2, int m2) { cudaError_t error; T * c_h, * a_d, * b_d, * c_d; if (m1 != n2) { printf("multiplication is impossible\n"); return nullptr; } cudaMallocHost(&c_h, sizeof(T) * n1 * m2); cudaMalloc(&a_d, sizeof(T) * n1 * m1); cudaMalloc(&b_d, sizeof(T) * n2 * m2); cudaMalloc(&c_d, sizeof(T) * n1 * m2); //print_matrix(a_h, n1, m1); //print_matrix(b_h, n2, m2); cudaMemcpy(a_d, a_h, sizeof(T) * n1 * m1, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, sizeof(T) * n2 * m2, cudaMemcpyHostToDevice); dim3 threads(THREADS, THREADS); dim3 blocks(static_cast<unsigned>((ceil(n1 * 1.0 / THREADS))), static_cast<unsigned>((ceil(m2 * 1.0 / THREADS)))); multiply << <blocks, threads >> > (a_d, b_d, c_d, n1, m1, n2, m2); if ((error = cudaGetLastError()) != cudaSuccess || (error = cudaDeviceSynchronize()) != cudaSuccess) std::cout << "Cuda error in matrix multiplication" << std::endl << "Error description: " << cudaGetErrorString(error) << std::endl << cudaGetErrorName(error) << std::endl; cudaMemcpy(c_h, c_d, sizeof(T) * n1 * m2, cudaMemcpyDeviceToHost); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return c_h; } void test_multiply() { float* a_h, * b_h; cudaMallocHost(&a_h, sizeof(float) * N1 * M1); cudaMallocHost(&b_h, sizeof(float) * N2 * M2); fill_matrix(a_h, N1, M1); fill_matrix(b_h, N2, M2); print_matrix(a_h, N1, M1, "Matrix a\n"); print_matrix(b_h, N2, M2, "Matrix b\n"); float *c_h = mult(a_h, b_h, N1, M1, N2, M2); print_matrix(c_h, N1, M2, "MULTIPLICATION RESULT\n"); cudaFreeHost(a_h); cudaFreeHost(b_h); if (c_h) cudaFreeHost(c_h); } void ex0() { cudaError_t error; bool* t_h, * t_d; float* a_h, *a_d; cudaMallocHost(&t_h, sizeof(bool)); cudaMallocHost(&a_h, sizeof(float) * N1 * M1); cudaMalloc(&a_d, sizeof(float) * N1 * M1); cudaMalloc(&t_d, sizeof(bool)); *t_h = true; fill_matrix(a_h, N1, M1); print_matrix(a_h, N1, M1, "Matrix a\n"); cudaMemcpy(t_d, t_h, sizeof(bool), cudaMemcpyHostToDevice); cudaMemcpy(a_d, a_h, sizeof(float) * N1 * M1, cudaMemcpyHostToDevice); dim3 threads(THREADS, THREADS); dim3 blocks(static_cast<unsigned>((ceil(N1 * 1.0 / THREADS))), static_cast<unsigned>((ceil(M1 * 1.0 / THREADS)))); is_orthogonal_int<float> << <blocks, threads >> > (a_d, t_d, N1, M1, M1, N1); if ((error = cudaGetLastError()) != cudaSuccess || (error = cudaDeviceSynchronize()) != cudaSuccess) std::cout << "Cuda error in orthogonalization" << std::endl << "Error description: " << cudaGetErrorString(error) << std::endl << cudaGetErrorName(error) << std::endl; cudaMemcpy(t_h, t_d, sizeof(bool), cudaMemcpyDeviceToHost); printf("ex 1: input matrix is %s", *t_h ? "orthogonal\n" : "NOT orthogonal\n"); float* at_h = transpose(a_h, N1, M1); print_matrix(at_h, M1, N1, "transpose matrix a\n"); float* ata_mult = mult(a_h, at_h, N1, M1, M1, N1); print_matrix(ata_mult, N1, N1, "aT * a multiplication\n"); cudaFreeHost(a_h); cudaFreeHost(at_h); cudaFreeHost(ata_mult); cudaFreeHost(t_h); cudaFree(t_d); cudaFree(a_d); } void ex1(int n) { float* a_h, * b_h; cudaMallocHost(&a_h, sizeof(float) * n * n); cudaMallocHost(&b_h, sizeof(float) * n * n); fill_matrix(a_h, n, n); fill_matrix2(b_h, n, n); print_matrix(a_h, n, n, "Matrix a\n"); print_matrix(b_h, n, n, "Matrix b\n"); float* ab_h = mult(a_h, b_h, n, n, n, n); float* ba_h = mult(b_h, a_h, n, n, n, n); bool ok = true; for (int i = 0; i < n && ok; i++) { for (int j = 0; j < n && ok; j++) if (ab_h[i * n + j] != ba_h[i * n + j]) ok = false; } printf("matrix a and matrix b are %s\n", ok ? "commutable" : "not commutable"); print_matrix(ab_h, n, n, "Matrix a * b\n"); print_matrix(ba_h, n, n, "Matrix b * a\n"); cudaFreeHost(a_h); cudaFreeHost(b_h); cudaFreeHost(ab_h); cudaFreeHost(ba_h); } template <typename T> void fillv(T a[][M1], int n, int m) { for (int i = 0; i < n; i++) for (int j = 0; j < m; j++) a[i][j] = static_cast<T>(i * n - j); } template <typename T> void print_matrixv(T a[][M1], int n, int m, std::string const & msg) { printf("%s", msg.c_str()); for (int i = 0; i < n; i++) { printf("| "); for (int j = 0; j < m; j++) printf("%f ", a[i][j]); printf("|\n"); } printf("\n"); } template <typename T> __global__ void sum(T* a, T* b, T* c, int n1, int m1, int n2, int m2) { unsigned i = blockDim.x * blockIdx.x + threadIdx.x; unsigned j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= n1 || j >= m2) return; c[i * m1 + j] = a[i * m1 + j] + b[i * m1 + j]; } void ex2() { cudaError_t error; float a_h[N1][M1], b_h[N1][M1], *a_d, *b_d, *c_d, *c_h; fillv(a_h, N1, M1); fillv(b_h, N1, M1); cudaMalloc(&a_d, sizeof(float) * N1 * M1); cudaMalloc(&b_d, sizeof(float) * N1 * M1); cudaMalloc(&c_d, sizeof(float) * N1 * M1); cudaMallocHost(&c_h, sizeof(float) * N1 * M1); print_matrixv(a_h, N1, M1, "MATRIX A\n"); print_matrixv(b_h, N1, M1, "MATRIX B\n"); cudaMemcpy(a_d, a_h, sizeof(float) * N1 * M1, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, sizeof(float) * N1 * M1, cudaMemcpyHostToDevice); dim3 threads(THREADS, THREADS); dim3 blocks(static_cast<unsigned>((ceil(N1 * 1.0 / THREADS))), static_cast<unsigned>((ceil(M1 * 1.0 / THREADS)))); sum << <blocks, threads >> > (a_d, b_d, c_d, N1, M1, N1, M1); if ((error = cudaGetLastError()) != cudaSuccess || (error = cudaDeviceSynchronize()) != cudaSuccess) std::cout << "Cuda error in matrix sum" << std::endl << "Error description: " << cudaGetErrorString(error) << std::endl << cudaGetErrorName(error) << std::endl; cudaMemcpy(c_h, c_d, sizeof(float) * N1 * M1, cudaMemcpyDeviceToHost); print_matrix(c_h, N1, M1, "SUM\n"); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaFree(c_h); } int main() { //test_multiply(); ex0(); ex1(2); ex2(); return 0; }
f601169af07af9a91edcf2664169542d33c6af97.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/operators/dirichlet_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.h" #include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h" #include "paddle/fluid/platform/for_range.h" #ifdef PADDLE_WITH_CUDA #include <hiprand/hiprand_kernel.h> #endif #ifdef PADDLE_WITH_HIP #include <hiprand_kernel.h> #endif #if defined(PADDLE_WITH_CUDA) using COMPAT_RANDSTATEPHILOX4_32_10_T = hiprandStatePhilox4_32_10_t; #define COMPAT_RAND_INIT hiprand_init #define COMPAT_RAND_UNIFORM hiprand_uniform #define COMPAT_RAND_NORMAL hiprand_normal #elif defined(PADDLE_WITH_HIP) using COMPAT_RANDSTATEPHILOX4_32_10_T = hiprandStatePhilox4_32_10_t; #define COMPAT_RAND_INIT hiprand_init #define COMPAT_RAND_UNIFORM hiprand_uniform #define COMPAT_RAND_NORMAL hiprand_normal #endif namespace paddle { namespace operators { template <typename T> struct GammaCUDAFunctor { GammaCUDAFunctor(const T* alpha, T* gamma, uint64_t seed, uint64_t offset) : alpha_(alpha), gamma_(gamma), seed_(seed), offset_(offset) {} DEVICE void operator()(int64_t index) { // hiprand initialization COMPAT_RANDSTATEPHILOX4_32_10_T state; COMPAT_RAND_INIT(/*seed=*/seed_, /*subsequence=*/index, /*offset=*/offset_, &state); // sample auto uniform_lambda = [&state]() { return COMPAT_RAND_UNIFORM(&state); }; BaseSampler<T, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state]() { return COMPAT_RAND_NORMAL(&state); }; BaseSampler<T, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<T, T, decltype(uniform_lambda), decltype(normal_lambda)>( alpha_[index], standard_uniform, standard_normal); gamma_[index] = ::max(std::numeric_limits<T>::min(), sample); } const T* alpha_; T* gamma_; const uint64_t seed_; const uint64_t offset_; }; template <typename T> struct DirichletSampler<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* alpha, framework::Tensor* out) { auto& dev_ctx = ctx.device_context<platform::CUDADeviceContext>(); // init state, seed & offset for all threads int device_id = ctx.GetPlace().GetDeviceId(); auto p_gen = framework::GetDefaultCUDAGenerator(device_id); auto seed_and_offset = p_gen->IncrementOffset(10); // hard-coded offset auto seed = seed_and_offset.first; auto offset = seed_and_offset.second; // sample from K gamma distributions, where K=alpha.numel() framework::Tensor gamma_samples; gamma_samples.mutable_data<T>(alpha->dims(), dev_ctx.GetPlace()); GammaCUDAFunctor<T> gamma_functor(alpha->data<T>(), gamma_samples.data<T>(), seed, offset); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, out->numel()); for_range(gamma_functor); // normalize them into a simplex, along the last axis framework::Tensor gamma_sum; auto new_shape = gamma_samples.dims(); new_shape[new_shape.size() - 1] = 1; gamma_sum.mutable_data<T>(new_shape, dev_ctx.GetPlace()); ReduceKernelFunctor<platform::CUDADeviceContext, T, SumFunctor>( &gamma_samples, &gamma_sum, {new_shape.size() - 1}, true, false, ctx) .template apply<T>(); ElementwiseComputeEx<DivFunctor<T>, platform::CUDADeviceContext, T, T>( ctx, &gamma_samples, &gamma_sum, -1, DivFunctor<T>(), out); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( dirichlet, ops::DirichletKernel<paddle::platform::CUDADeviceContext, float>, ops::DirichletKernel<paddle::platform::CUDADeviceContext, double>);
f601169af07af9a91edcf2664169542d33c6af97.cu
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/generator.h" #include "paddle/fluid/operators/dirichlet_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_function.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.h" #include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h" #include "paddle/fluid/platform/for_range.h" #ifdef PADDLE_WITH_CUDA #include <curand_kernel.h> #endif #ifdef PADDLE_WITH_HIP #include <hiprand_kernel.h> #endif #if defined(PADDLE_WITH_CUDA) using COMPAT_RANDSTATEPHILOX4_32_10_T = curandStatePhilox4_32_10_t; #define COMPAT_RAND_INIT curand_init #define COMPAT_RAND_UNIFORM curand_uniform #define COMPAT_RAND_NORMAL curand_normal #elif defined(PADDLE_WITH_HIP) using COMPAT_RANDSTATEPHILOX4_32_10_T = hiprandStatePhilox4_32_10_t; #define COMPAT_RAND_INIT hiprand_init #define COMPAT_RAND_UNIFORM hiprand_uniform #define COMPAT_RAND_NORMAL hiprand_normal #endif namespace paddle { namespace operators { template <typename T> struct GammaCUDAFunctor { GammaCUDAFunctor(const T* alpha, T* gamma, uint64_t seed, uint64_t offset) : alpha_(alpha), gamma_(gamma), seed_(seed), offset_(offset) {} DEVICE void operator()(int64_t index) { // curand initialization COMPAT_RANDSTATEPHILOX4_32_10_T state; COMPAT_RAND_INIT(/*seed=*/seed_, /*subsequence=*/index, /*offset=*/offset_, &state); // sample auto uniform_lambda = [&state]() { return COMPAT_RAND_UNIFORM(&state); }; BaseSampler<T, decltype(uniform_lambda)> standard_uniform(uniform_lambda); auto normal_lambda = [&state]() { return COMPAT_RAND_NORMAL(&state); }; BaseSampler<T, decltype(normal_lambda)> standard_normal(normal_lambda); auto sample = sample_gamma<T, T, decltype(uniform_lambda), decltype(normal_lambda)>( alpha_[index], standard_uniform, standard_normal); gamma_[index] = std::max(std::numeric_limits<T>::min(), sample); } const T* alpha_; T* gamma_; const uint64_t seed_; const uint64_t offset_; }; template <typename T> struct DirichletSampler<platform::CUDADeviceContext, T> { void operator()(const framework::ExecutionContext& ctx, const framework::Tensor* alpha, framework::Tensor* out) { auto& dev_ctx = ctx.device_context<platform::CUDADeviceContext>(); // init state, seed & offset for all threads int device_id = ctx.GetPlace().GetDeviceId(); auto p_gen = framework::GetDefaultCUDAGenerator(device_id); auto seed_and_offset = p_gen->IncrementOffset(10); // hard-coded offset auto seed = seed_and_offset.first; auto offset = seed_and_offset.second; // sample from K gamma distributions, where K=alpha.numel() framework::Tensor gamma_samples; gamma_samples.mutable_data<T>(alpha->dims(), dev_ctx.GetPlace()); GammaCUDAFunctor<T> gamma_functor(alpha->data<T>(), gamma_samples.data<T>(), seed, offset); platform::ForRange<platform::CUDADeviceContext> for_range(dev_ctx, out->numel()); for_range(gamma_functor); // normalize them into a simplex, along the last axis framework::Tensor gamma_sum; auto new_shape = gamma_samples.dims(); new_shape[new_shape.size() - 1] = 1; gamma_sum.mutable_data<T>(new_shape, dev_ctx.GetPlace()); ReduceKernelFunctor<platform::CUDADeviceContext, T, SumFunctor>( &gamma_samples, &gamma_sum, {new_shape.size() - 1}, true, false, ctx) .template apply<T>(); ElementwiseComputeEx<DivFunctor<T>, platform::CUDADeviceContext, T, T>( ctx, &gamma_samples, &gamma_sum, -1, DivFunctor<T>(), out); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( dirichlet, ops::DirichletKernel<paddle::platform::CUDADeviceContext, float>, ops::DirichletKernel<paddle::platform::CUDADeviceContext, double>);
e988b9fcfd6f729b92258d51e6f7eef7970a891c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "mpc_batch_norm_op.h" #include "core/common/paddle_tensor_impl.cu.h" namespace paddle { namespace operators { template <typename T> __global__ void cu_expand(T* dst, const T* src, int S, int N, int C, int sample_size) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < S * N * C * sample_size) { int share = col / (N * C * sample_size); int nc = (col / sample_size) % (N * C); dst[col] = src[nc % C + share * C]; col += blockDim.x * gridDim.x; } } template <typename T> struct Expand<platform::CUDADeviceContext, T> { void operator()(const Tensor* input, Tensor* output, int S, int N, int C, int sample_size) { // Expand tensor into specified shape // input shape: {S, C} // outout shape: {S, N, C, H, W}, sample_size = H * W const T* input_data = input->data<T>(); T* output_data = output->data<T>(); dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1); dim3 grid_size = dim3((S * N * C * sample_size + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1); hipLaunchKernelGGL(( cu_expand<T>), dim3(grid_size), dim3(block_size), 0, mpc::AbstractContext::_s_stream, output_data, input_data, S, N, C, sample_size); } }; template <typename T> __global__ void cu_compute_sum(T* dst, const T* src, int S, int N, int C, int sample_size) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < S * C) { int s = col / C; int c = col % C; dst[col] = 0; for (int i = 0; i < N * sample_size; ++i) { int n = i / sample_size; int i_ = i % sample_size; dst[col] += src[s * N * C * sample_size + n * C * sample_size + c * sample_size + i_]; } col += blockDim.x * gridDim.x; } } template <typename T> struct ComputeSum<platform::CUDADeviceContext, T> { void operator()(const Tensor* input, int C, Tensor* sum, const framework::ExecutionContext &ctx) { // Compute sum of each channel // input shape: {S, N, C, H, W} // output shape: {S, C} // H and W is optional, compute the sum of each channel. int S = input->dims()[0]; int N = input->dims()[1]; int sample_size = input->numel() / (S * N * C); dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1); dim3 grid_size = dim3((S * C + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1); hipLaunchKernelGGL(( cu_compute_sum<T>), dim3(grid_size), dim3(block_size), 0, mpc::AbstractContext::_s_stream, sum->data<T>(), input->data<T>(), S, N, C, sample_size); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( mpc_batch_norm, ops::MpcBatchNormKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( mpc_batch_norm_grad, ops::MpcBatchNormGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
e988b9fcfd6f729b92258d51e6f7eef7970a891c.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "mpc_batch_norm_op.h" #include "core/common/paddle_tensor_impl.cu.h" namespace paddle { namespace operators { template <typename T> __global__ void cu_expand(T* dst, const T* src, int S, int N, int C, int sample_size) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < S * N * C * sample_size) { int share = col / (N * C * sample_size); int nc = (col / sample_size) % (N * C); dst[col] = src[nc % C + share * C]; col += blockDim.x * gridDim.x; } } template <typename T> struct Expand<platform::CUDADeviceContext, T> { void operator()(const Tensor* input, Tensor* output, int S, int N, int C, int sample_size) { // Expand tensor into specified shape // input shape: {S, C} // outout shape: {S, N, C, H, W}, sample_size = H * W const T* input_data = input->data<T>(); T* output_data = output->data<T>(); dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1); dim3 grid_size = dim3((S * N * C * sample_size + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1); cu_expand<T><<<grid_size, block_size, 0, mpc::AbstractContext::_s_stream>>>( output_data, input_data, S, N, C, sample_size); } }; template <typename T> __global__ void cu_compute_sum(T* dst, const T* src, int S, int N, int C, int sample_size) { int col = blockIdx.x * blockDim.x + threadIdx.x; while (col < S * C) { int s = col / C; int c = col % C; dst[col] = 0; for (int i = 0; i < N * sample_size; ++i) { int n = i / sample_size; int i_ = i % sample_size; dst[col] += src[s * N * C * sample_size + n * C * sample_size + c * sample_size + i_]; } col += blockDim.x * gridDim.x; } } template <typename T> struct ComputeSum<platform::CUDADeviceContext, T> { void operator()(const Tensor* input, int C, Tensor* sum, const framework::ExecutionContext &ctx) { // Compute sum of each channel // input shape: {S, N, C, H, W} // output shape: {S, C} // H and W is optional, compute the sum of each channel. int S = input->dims()[0]; int N = input->dims()[1]; int sample_size = input->numel() / (S * N * C); dim3 block_size = dim3(PFL_CUDA_THREAD_SIZE, 1); dim3 grid_size = dim3((S * C + PFL_CUDA_THREAD_SIZE - 1) / PFL_CUDA_THREAD_SIZE, 1); cu_compute_sum<T><<<grid_size, block_size, 0, mpc::AbstractContext::_s_stream>>>( sum->data<T>(), input->data<T>(), S, N, C, sample_size); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( mpc_batch_norm, ops::MpcBatchNormKernel<paddle::platform::CUDADeviceContext, int64_t>); REGISTER_OP_CUDA_KERNEL( mpc_batch_norm_grad, ops::MpcBatchNormGradKernel<paddle::platform::CUDADeviceContext, int64_t>);
add99c19dd23f9d7386f1f35922e2b5609841bd5.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri * Optimized and modified by RB * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "voxel_backprojection.hpp" #include "TIGRE_common.hpp" #include <math.h> #include "GpuIds.hpp" // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * *--->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTexture(const GpuIds& gpuids,float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream, int nStreamDevice,bool allocate); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were: // PROJ_PER_KERNEL = 32 or 16 (very similar times) // VOXELS_PER_THREAD = 8 // Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code. // (e.g. 16.2 s vs. ~62 s). const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck. const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck. // We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection: // deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec // So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel // (they will be updated in the main loop before each kernel call). __constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device // We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above) // Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory // Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection) __constant__ float projSinCosArrayDev[5*PROJ_PER_KERNEL]; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ // // Function: kernelPixelBackprojectionFDK // // Description: Main FDK backprojection kernel //______________________________________________________________________________ __global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex) { // Old kernel call signature: // kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha); // We just read in most of the params from the constant memory instead of getting them from the param list. // This is because we now have MANY params, since single kernel processes more than one projection! /* __global__ void kernelPixelBackprojectionFDK(const Geometry geo, * float* image, * const int indAlpha, * const Point3D deltaX , * const Point3D deltaY, * const Point3D deltaZ, * const Point3D xyzOrigin, * const Point3D xyzOffset, * const Point3D uv0Offset, * const float sinalpha, * const float cosalpha){ */ unsigned long long indY = blockIdx.y * blockDim.y + threadIdx.y; unsigned long long indX = blockIdx.x * blockDim.x + threadIdx.x; // unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle unsigned long long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle //Make sure we don't go out of bounds if (indX>=geo.nVoxelX || indY>=geo.nVoxelY || startIndZ>=geo.nVoxelZ) return; // We'll keep a local auxiliary array of values of a column of voxels that this thread will update float voxelColumn[VOXELS_PER_THREAD]; // First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then // work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes unsigned long colIdx; #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY+indY*(unsigned long long)geo.nVoxelX + indX; voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. } // END copy 3D volume voxels to local array // Now iterate through projections #pragma unroll for(unsigned long projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++) { // Get the current parameters from parameter arrays in constant memory. unsigned long indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array // Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK. if(indAlpha>=totalNoOfProjections) break; Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaY = projParamsArrayDev[6*projNumber+1]; Point3D deltaZ = projParamsArrayDev[6*projNumber+2]; Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3]; Point3D xyzOffset = projParamsArrayDev[6*projNumber+4]; Point3D S = projParamsArrayDev[6*projNumber+5]; float sinalpha = projSinCosArrayDev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection float cosalpha = projSinCosArrayDev[5*projNumber+1]; float COR = projSinCosArrayDev[5*projNumber+2]; float DSD = projSinCosArrayDev[5*projNumber+3]; float DSO = projSinCosArrayDev[5*projNumber+4]; float auxCOR=COR/geo.dDetecU; // Now iterate through Z in our voxel column FOR A GIVEN PROJECTION #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. // "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-auxCOR; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=__fdividef(DSO-DSD-S.x,vectX); float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+(float)geo.nDetecU*0.5f; v=z+(float)geo.nDetecV*0.5f; float weight; float realx,realy; realx=-(geo.sVoxelX-geo.dVoxelX)*0.5f +indX*geo.dVoxelX +xyzOffset.x; realy=-(geo.sVoxelY-geo.dVoxelY)*0.5f +indY*geo.dVoxelY +xyzOffset.y+COR; weight=__fdividef(DSO+realy*sinalpha-realx*cosalpha,DSO); weight=__frcp_rd(weight*weight); // Get Value in the computed (U,V) and multiply by the corresponding weight. // indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!) #if IS_FOR_MATLAB_TIGRE voxelColumn[colIdx]+=tex3D<float>(tex, v, u ,indAlpha+0.5f)*weight; #else voxelColumn[colIdx]+=tex3D<float>(tex, u, v ,indAlpha+0.5f)*weight; #endif } // END iterating through column of voxels } // END iterating through multiple projections // And finally copy the updated local voxelColumn array back to our 3D volume (main memory) #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY+indY*(unsigned long long)geo.nVoxelX + indX; image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. // According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write. // We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is // better for avoiding memory congestion. } // END copy updated voxels from local array to our 3D volume } // END kernelPixelBackprojectionFDK //______________________________________________________________________________ // // Function: voxel_backprojection // // Description: Main host function for FDK backprojection (invokes the kernel) //______________________________________________________________________________ int voxel_backprojection(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha, const GpuIds& gpuids) { // printf("voxel_backprojection(geo.nDetector = %d, %d)\n", geo.nDetecU, geo.nDetecV); // printf("geo.nVoxel = %d, %d, %d\n", geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); // Prepare for MultiGPU int deviceCount = gpuids.GetLength(); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n"); } // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning thrown) // Check the available devices, and if they are the same if (!gpuids.AreEqualDevices()) { mexWarnMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed."); } int dev; // Split the CT problem unsigned int split_image; unsigned int split_projections; splitCTbackprojection(gpuids,geo,nalpha,&split_image,&split_projections); cudaCheckErrors("Error"); //Pagelock memory for synchronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus should have the same attributes. int isHostRegisterSupported = 0; #if CUDART_VERSION >= 9020 hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,gpuids[0]); #endif // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big. #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (split_image>1 |deviceCount>1)){ hipHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable); } if (isHostRegisterSupported ){ hipHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),hipHostRegisterPortable); } #endif cudaCheckErrors("Error pinning memory"); // Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the // image slices. The rest of the Geometry is the same Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry)); createGeoArray(split_image*deviceCount,geo,geoArray,nalpha); // Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly // in the previous section this should leave enough space for the textures. size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float); float** dimage=(float**)malloc(deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMalloc((void**)&dimage[dev], num_bytes_img); cudaCheckErrors("hipMalloc fail"); } //If it is the first time, lets make sure our image is zeroed. int nStreamDevice=2; int nStreams=deviceCount*nStreamDevice; hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));; for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); for (int i = 0; i < nStreamDevice; ++i){ hipStreamCreate(&stream[i+dev*nStreamDevice]); } } // Kernel auxiliary variables Point3D* projParamsArrayHost; hipHostMalloc((void**)&projParamsArrayHost,6*PROJ_PER_KERNEL*sizeof(Point3D)); float* projSinCosArrayHost; hipHostMalloc((void**)&projSinCosArrayHost,5*PROJ_PER_KERNEL*sizeof(float)); // Texture object variables hipTextureObject_t *texProj; hipArray **d_cuArrTex; texProj =(hipTextureObject_t*)malloc(deviceCount*2*sizeof(hipTextureObject_t)); d_cuArrTex =(hipArray**)malloc(deviceCount*2*sizeof(hipArray*)); // Auxiliary Host page-locked memory for fast and asycnornous memcpy. // Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop // as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution // of the code, as repeated allocation and deallocation only happens when the projection data is very very big, // and therefore allcoation time should be negligible, fluctuation of other computations should mask the time. unsigned long long proj_linear_idx_start; unsigned int proj_split_overlap_number; unsigned int current_proj_split_size,current_proj_overlap_split_size; size_t num_bytes_img_curr; size_t img_linear_idx_start; float** partial_projection; size_t* proj_split_size; for(unsigned int img_slice=0;img_slice<split_image;img_slice++){ // Initialize the memory if its the first time. for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMemset(dimage[dev],0,num_bytes_img); cudaCheckErrors("memset fail"); } for( unsigned int proj=0;proj<split_projections;proj++){ // What is the size of the current chunk of proejctions we need in? current_proj_split_size=(nalpha+split_projections-1)/split_projections; // if its the last one its probably less current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj; // We are going to split it in the same amount of kernels we need to execute. proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // Create pointer to pointers of projections and precompute their location and size. if(!proj && !img_slice){ partial_projection=(float**)malloc(proj_split_overlap_number*sizeof(float*)); proj_split_size=(size_t*)malloc(proj_split_overlap_number*sizeof(size_t*)); } for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){ // Crop the last one, as its likely its not completely divisible. // now lets split this for simultanoeus memcopy and compute. // We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration // current_proj_overlap_split_size units = angles current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL); current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size; //Get the linear index where the current memory chunk starts. proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV; proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV; //Store result proj_split_size[proj_block_split]=current_proj_overlap_split_size; partial_projection[proj_block_split]=&projections[proj_linear_idx_start]; } for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){ // Now get the projections on memory CreateTexture(gpuids, partial_projection[proj_block_split],geo, &d_cuArrTex[(proj_block_split%2)*deviceCount], proj_split_size[proj_block_split], &texProj [(proj_block_split%2)*deviceCount], stream, nStreamDevice, (proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipStreamSynchronize(stream[dev*nStreamDevice+1]); } // Pin the next chunk of projection data, unpin the current one. for (dev = 0; dev < deviceCount; dev++){ //Safety: // Depends on the amount of GPUs, the case where a image slice is zero hight can happen. // Just break the loop if we reached that point if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0) break; hipSetDevice(gpuids[dev]); int divx,divy,divz; // RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y). // I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so // let's stick with the values from Zinsser and Keck. divx=16; divy=32; divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks! dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz); dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1) ////////////////////////////////////////////////////////////////////////////////////// // Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// // Since we'll have multiple projections processed by a SINGLE kernel call, compute how many // kernel calls we'll need altogether. unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL for (unsigned int i=0; i<noOfKernelCalls; i++){ // Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it unsigned int j; for(j=0; j<PROJ_PER_KERNEL; j++){ unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j; unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel +proj*(nalpha+split_projections-1)/split_projections // index of the global projection split +proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split if(currProjNumber_slice>=proj_split_size[proj_block_split]) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. if(currProjNumber_global>=nalpha) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source; float sinalpha,cosalpha; geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now. geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1]; geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2]; // mexPrintf("%u %f \n",i,geoArray[img_slice*deviceCount+dev].alpha); // mexPrintf("%u \n",currProjNumber_global); sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha); cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha); projSinCosArrayHost[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection projSinCosArrayHost[5*j+1]=cosalpha; projSinCosArrayHost[5*j+2]=geo.COR[currProjNumber_global]; projSinCosArrayHost[5*j+3]=geo.DSD[currProjNumber_global]; projSinCosArrayHost[5*j+4]=geo.DSO[currProjNumber_global]; computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source); offOrig.x=geo.offOrigX[currProjNumber_global]; offOrig.y=geo.offOrigY[currProjNumber_global]; offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global]; projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[6*j+1]=deltaY; projParamsArrayHost[6*j+2]=deltaZ; projParamsArrayHost[6*j+3]=xyzOrigin; projParamsArrayHost[6*j+4]=offOrig; projParamsArrayHost[6*j+5]=source; } // END for (preparing params for kernel call) // Copy the prepared parameter arrays to constant memory to make it available for the kernel hipMemcpyToSymbolAsync(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*5*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]); hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL,0,hipMemcpyHostToDevice,stream[dev*nStreamDevice]); hipStreamSynchronize(stream[dev*nStreamDevice]); hipLaunchKernelGGL(( kernelPixelBackprojectionFDK), dim3(grid),dim3(block),0,stream[dev*nStreamDevice], geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]); } // END for ////////////////////////////////////////////////////////////////////////////////////// // END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// }// END for deviceCount } // END sub-split of current projection chunk for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); } } // END projection splits // Now we need to take the image out of the GPU for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); // We do not need to sycnronize because the array dealocators already do. num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float); img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev); hipMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, hipMemcpyDeviceToHost,stream[dev*nStreamDevice+1]); } for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDeviceSynchronize(); cudaCheckErrors("Main loop fail"); } } // end image splits ///////// Cleaning: bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1; for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1) if (!two_buffers_used && i==1) break; for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipDestroyTextureObject(texProj[i*deviceCount+dev]); hipFreeArray(d_cuArrTex[i*deviceCount+dev]); } } cudaCheckErrors("cudadestroy textures result fail"); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipFree(dimage[dev]); } hipHostFree(projSinCosArrayHost); hipHostFree(projParamsArrayHost); free(partial_projection); free(proj_split_size); freeGeoArray(split_image*deviceCount,geoArray); #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (split_image>1 |deviceCount>1)){ hipHostUnregister(result); } if (isHostRegisterSupported){ hipHostUnregister(projections); } #endif for (int i = 0; i < nStreams; ++i) hipStreamDestroy(stream[i]); cudaCheckErrors("hipFree fail"); hipDeviceReset(); // For the Nvidia Visual Profiler return 0; } // END voxel_backprojection // void splitCTbackprojection(const GpuIds& gpuids, Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){ // We don't know if the devices are being used. lets check that. and only use the amount of memory we need. size_t mem_GPU_global; checkFreeMemory(gpuids, &mem_GPU_global); const int deviceCount = gpuids.GetLength(); // Compute how much memory each of the relevant memory pieces need size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float); // Does everything fit in the GPU? if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){ // We only need to split if we have extra GPUs *split_image=1; *split_projections=1; } // We know we need to split, but: // Does all the image fit in the GPU, with some slack for a stack of projections?? else { // As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits. // Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image. size_t mem_free=mem_GPU_global-2*mem_proj*PROJ_PER_KERNEL; *split_image=(mem_image/deviceCount+mem_free-1)/mem_free; // Now knowing how many splits we have for images, we can recompute how many slices of projections actually // fit on the GPU. Must be more than 0 obviously. mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe *split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free; } } void CreateTexture(const GpuIds& gpuids, float* projectiondata,Geometry geo,hipArray** d_cuArrTex,unsigned int nangles, hipTextureObject_t *texImage,hipStream_t* stream,int nStreamDevice,bool allocate){ //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; #if IS_FOR_MATLAB_TIGRE const hipExtent extent =make_hipExtent(geo.nDetecV, geo.nDetecU, nangles); #else const hipExtent extent =make_hipExtent(geo.nDetecU, geo.nDetecV, nangles); #endif const unsigned int num_devices = gpuids.GetLength(); if (allocate){ for (unsigned int dev = 0; dev < num_devices; dev++){ hipSetDevice(gpuids[dev]); //hipArray Descriptor hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); //cuda Array hipMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent); } } for (unsigned int dev = 0; dev < num_devices; dev++){ hipSetDevice(gpuids[dev]); hipMemcpy3DParms copyParams = {0}; //Array creation copyParams.srcPtr = make_hipPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[dev]; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3DAsync(&copyParams,stream[dev*nStreamDevice+1]); } //Array creation End for (unsigned int dev = 0; dev < num_devices; dev++){ hipSetDevice(gpuids[dev]); hipResourceDesc texRes; memset(&texRes, 0, sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = d_cuArrTex[dev]; hipTextureDesc texDescr; memset(&texDescr, 0, sizeof(hipTextureDesc)); texDescr.normalizedCoords = false; texDescr.filterMode = hipFilterModeLinear; texDescr.addressMode[0] = hipAddressModeBorder; texDescr.addressMode[1] = hipAddressModeBorder; texDescr.addressMode[2] = hipAddressModeBorder; texDescr.readMode = hipReadModeElementType; hipCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL); } } //______________________________________________________________________________ // // Function: createGeoArray // // Description: This code generates the geometries needed to split the image properly in // cases where the entire image does not fit in the memory of the GPU //______________________________________________________________________________ void createGeoArray(unsigned int image_splits, Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned int splitsize=(geo.nVoxelZ+image_splits-1)/image_splits; for(unsigned int sp=0;sp<image_splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: max(geo.nVoxelZ-splitsize*sp,0); geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: computeDeltasCube // // Description: Computes relative increments for each projection (volume rotation). // Increments get passed to the backprojection kernel. //______________________________________________________________________________ void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S) { Point3D P, Px,Py,Pz; // Get coords of Img(0,0,0) P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x; Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y; Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ; // Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ eulerZYZT(geo,&P); eulerZYZT(geo,&Px); eulerZYZT(geo,&Py); eulerZYZT(geo,&Pz); //detector offset P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i]; Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i]; Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i]; Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i]; //Detector Roll pitch Yaw // // // first, we need to offset everything so (0,0,0) is the center of the detector // Only X is required for that P.x=P.x+(geo.DSD[i]-geo.DSO[i]); Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]); Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]); Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]); rollPitchYawT(geo,i,&P); rollPitchYawT(geo,i,&Px); rollPitchYawT(geo,i,&Py); rollPitchYawT(geo,i,&Pz); P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]); Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]); Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]); //Done for P, now source Point3D source; source.x=geo.DSD[i]; //already offseted for rotation source.y=-geo.offDetecU[i]; source.z=-geo.offDetecV[i]; rollPitchYawT(geo,i,&source); source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z; // mexPrintf("%f,%f,%f\n",source.x,source.y,source.z); // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU; // get deltas of the changes in voxels deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; *xyzorigin=P; *S=source; } void eulerZYZT(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x = auxPoint.x*(cos(geo.psi)*cos(geo.theta)*cos(geo.alpha)-sin(geo.psi)*sin(geo.alpha)) +auxPoint.y*(-cos(geo.psi)*cos(geo.theta)*sin(geo.alpha)-sin(geo.psi)*cos(geo.alpha)) +auxPoint.z*cos(geo.psi)*sin(geo.theta); point->y = auxPoint.x*(sin(geo.psi)*cos(geo.theta)*cos(geo.alpha)+cos(geo.psi)*sin(geo.alpha)) +auxPoint.y*(-sin(geo.psi)*cos(geo.theta)*sin(geo.alpha)+cos(geo.psi)*cos(geo.alpha)) +auxPoint.z*sin(geo.psi)*sin(geo.theta); point->z =-auxPoint.x*sin(geo.theta)*cos(geo.alpha) +auxPoint.y*sin(geo.theta)*sin(geo.alpha) +auxPoint.z*cos(geo.theta); } void rollPitchYawT(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y -sin(geo.dPitch[i])*auxPoint.z; point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z; point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){ size_t memfree; size_t memtotal; const int deviceCount = gpuids.GetLength(); for (int dev = 0; dev < deviceCount; dev++){ hipSetDevice(gpuids[dev]); hipMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
add99c19dd23f9d7386f1f35922e2b5609841bd5.cu
/*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri * Optimized and modified by RB * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "voxel_backprojection.hpp" #include "TIGRE_common.hpp" #include <math.h> #include "GpuIds.hpp" // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * *--->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTexture(const GpuIds& gpuids,float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream, int nStreamDevice,bool allocate); //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were: // PROJ_PER_KERNEL = 32 or 16 (very similar times) // VOXELS_PER_THREAD = 8 // Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code. // (e.g. 16.2 s vs. ~62 s). const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck. const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck. // We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection: // deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec // So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel // (they will be updated in the main loop before each kernel call). __constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device // We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above) // Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory // Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection) __constant__ float projSinCosArrayDev[5*PROJ_PER_KERNEL]; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ // // Function: kernelPixelBackprojectionFDK // // Description: Main FDK backprojection kernel //______________________________________________________________________________ __global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex) { // Old kernel call signature: // kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha); // We just read in most of the params from the constant memory instead of getting them from the param list. // This is because we now have MANY params, since single kernel processes more than one projection! /* __global__ void kernelPixelBackprojectionFDK(const Geometry geo, * float* image, * const int indAlpha, * const Point3D deltaX , * const Point3D deltaY, * const Point3D deltaZ, * const Point3D xyzOrigin, * const Point3D xyzOffset, * const Point3D uv0Offset, * const float sinalpha, * const float cosalpha){ */ unsigned long long indY = blockIdx.y * blockDim.y + threadIdx.y; unsigned long long indX = blockIdx.x * blockDim.x + threadIdx.x; // unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle unsigned long long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle //Make sure we don't go out of bounds if (indX>=geo.nVoxelX || indY>=geo.nVoxelY || startIndZ>=geo.nVoxelZ) return; // We'll keep a local auxiliary array of values of a column of voxels that this thread will update float voxelColumn[VOXELS_PER_THREAD]; // First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then // work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes unsigned long colIdx; #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY+indY*(unsigned long long)geo.nVoxelX + indX; voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. } // END copy 3D volume voxels to local array // Now iterate through projections #pragma unroll for(unsigned long projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++) { // Get the current parameters from parameter arrays in constant memory. unsigned long indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array // Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK. if(indAlpha>=totalNoOfProjections) break; Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaY = projParamsArrayDev[6*projNumber+1]; Point3D deltaZ = projParamsArrayDev[6*projNumber+2]; Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3]; Point3D xyzOffset = projParamsArrayDev[6*projNumber+4]; Point3D S = projParamsArrayDev[6*projNumber+5]; float sinalpha = projSinCosArrayDev[5*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection float cosalpha = projSinCosArrayDev[5*projNumber+1]; float COR = projSinCosArrayDev[5*projNumber+2]; float DSD = projSinCosArrayDev[5*projNumber+3]; float DSO = projSinCosArrayDev[5*projNumber+4]; float auxCOR=COR/geo.dDetecU; // Now iterate through Z in our voxel column FOR A GIVEN PROJECTION #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. // "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-auxCOR; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=__fdividef(DSO-DSD-S.x,vectX); float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+(float)geo.nDetecU*0.5f; v=z+(float)geo.nDetecV*0.5f; float weight; float realx,realy; realx=-(geo.sVoxelX-geo.dVoxelX)*0.5f +indX*geo.dVoxelX +xyzOffset.x; realy=-(geo.sVoxelY-geo.dVoxelY)*0.5f +indY*geo.dVoxelY +xyzOffset.y+COR; weight=__fdividef(DSO+realy*sinalpha-realx*cosalpha,DSO); weight=__frcp_rd(weight*weight); // Get Value in the computed (U,V) and multiply by the corresponding weight. // indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!) #if IS_FOR_MATLAB_TIGRE voxelColumn[colIdx]+=tex3D<float>(tex, v, u ,indAlpha+0.5f)*weight; #else voxelColumn[colIdx]+=tex3D<float>(tex, u, v ,indAlpha+0.5f)*weight; #endif } // END iterating through column of voxels } // END iterating through multiple projections // And finally copy the updated local voxelColumn array back to our 3D volume (main memory) #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY+indY*(unsigned long long)geo.nVoxelX + indX; image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. // According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write. // We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is // better for avoiding memory congestion. } // END copy updated voxels from local array to our 3D volume } // END kernelPixelBackprojectionFDK //______________________________________________________________________________ // // Function: voxel_backprojection // // Description: Main host function for FDK backprojection (invokes the kernel) //______________________________________________________________________________ int voxel_backprojection(float * projections, Geometry geo, float* result,float const * const alphas, int nalpha, const GpuIds& gpuids) { // printf("voxel_backprojection(geo.nDetector = %d, %d)\n", geo.nDetecU, geo.nDetecV); // printf("geo.nVoxel = %d, %d, %d\n", geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); // Prepare for MultiGPU int deviceCount = gpuids.GetLength(); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","There are no available device(s) that support CUDA\n"); } // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning thrown) // Check the available devices, and if they are the same if (!gpuids.AreEqualDevices()) { mexWarnMsgIdAndTxt("Atb:Voxel_backprojection:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed."); } int dev; // Split the CT problem unsigned int split_image; unsigned int split_projections; splitCTbackprojection(gpuids,geo,nalpha,&split_image,&split_projections); cudaCheckErrors("Error"); //Pagelock memory for synchronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus should have the same attributes. int isHostRegisterSupported = 0; #if CUDART_VERSION >= 9020 cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,gpuids[0]); #endif // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Synchronously launching the memcpys. This is only worth it when the image is too big. #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (split_image>1 |deviceCount>1)){ cudaHostRegister(result, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable); } if (isHostRegisterSupported ){ cudaHostRegister(projections, (size_t)geo.nDetecU*(size_t)geo.nDetecV*(size_t)nalpha*(size_t)sizeof(float),cudaHostRegisterPortable); } #endif cudaCheckErrors("Error pinning memory"); // Create the arrays for the geometry. The main difference is that geo.offZ has been tuned for the // image slices. The rest of the Geometry is the same Geometry* geoArray=(Geometry*)malloc(split_image*deviceCount*sizeof(Geometry)); createGeoArray(split_image*deviceCount,geo,geoArray,nalpha); // Now lest allocate all the image memory on the GPU, so we can use it later. If we have made our numbers correctly // in the previous section this should leave enough space for the textures. size_t num_bytes_img = (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ* sizeof(float); float** dimage=(float**)malloc(deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMalloc((void**)&dimage[dev], num_bytes_img); cudaCheckErrors("cudaMalloc fail"); } //If it is the first time, lets make sure our image is zeroed. int nStreamDevice=2; int nStreams=deviceCount*nStreamDevice; cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));; for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); for (int i = 0; i < nStreamDevice; ++i){ cudaStreamCreate(&stream[i+dev*nStreamDevice]); } } // Kernel auxiliary variables Point3D* projParamsArrayHost; cudaMallocHost((void**)&projParamsArrayHost,6*PROJ_PER_KERNEL*sizeof(Point3D)); float* projSinCosArrayHost; cudaMallocHost((void**)&projSinCosArrayHost,5*PROJ_PER_KERNEL*sizeof(float)); // Texture object variables cudaTextureObject_t *texProj; cudaArray **d_cuArrTex; texProj =(cudaTextureObject_t*)malloc(deviceCount*2*sizeof(cudaTextureObject_t)); d_cuArrTex =(cudaArray**)malloc(deviceCount*2*sizeof(cudaArray*)); // Auxiliary Host page-locked memory for fast and asycnornous memcpy. // Start with the main loop. The Projection data needs to be allocated and dealocated in the main loop // as due to the nature of cudaArrays, we can not reuse them. This should not be a problem for the fast execution // of the code, as repeated allocation and deallocation only happens when the projection data is very very big, // and therefore allcoation time should be negligible, fluctuation of other computations should mask the time. unsigned long long proj_linear_idx_start; unsigned int proj_split_overlap_number; unsigned int current_proj_split_size,current_proj_overlap_split_size; size_t num_bytes_img_curr; size_t img_linear_idx_start; float** partial_projection; size_t* proj_split_size; for(unsigned int img_slice=0;img_slice<split_image;img_slice++){ // Initialize the memory if its the first time. for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMemset(dimage[dev],0,num_bytes_img); cudaCheckErrors("memset fail"); } for( unsigned int proj=0;proj<split_projections;proj++){ // What is the size of the current chunk of proejctions we need in? current_proj_split_size=(nalpha+split_projections-1)/split_projections; // if its the last one its probably less current_proj_split_size=((proj+1)*current_proj_split_size<nalpha)? current_proj_split_size: nalpha-current_proj_split_size*proj; // We are going to split it in the same amount of kernels we need to execute. proj_split_overlap_number=(current_proj_split_size+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // Create pointer to pointers of projections and precompute their location and size. if(!proj && !img_slice){ partial_projection=(float**)malloc(proj_split_overlap_number*sizeof(float*)); proj_split_size=(size_t*)malloc(proj_split_overlap_number*sizeof(size_t*)); } for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){ // Crop the last one, as its likely its not completely divisible. // now lets split this for simultanoeus memcopy and compute. // We want to make sure that if we can, we run PROJ_PER_KERNEL projections, to maximize kernel acceleration // current_proj_overlap_split_size units = angles current_proj_overlap_split_size=max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL); current_proj_overlap_split_size=(proj_block_split<proj_split_overlap_number-1)?current_proj_overlap_split_size:current_proj_split_size-(proj_split_overlap_number-1)*current_proj_overlap_split_size; //Get the linear index where the current memory chunk starts. proj_linear_idx_start=(unsigned long long)((nalpha+split_projections-1)/split_projections)*(unsigned long long)proj*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV; proj_linear_idx_start+=proj_block_split*max((current_proj_split_size+proj_split_overlap_number-1)/proj_split_overlap_number,PROJ_PER_KERNEL)*(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV; //Store result proj_split_size[proj_block_split]=current_proj_overlap_split_size; partial_projection[proj_block_split]=&projections[proj_linear_idx_start]; } for(unsigned int proj_block_split=0; proj_block_split<proj_split_overlap_number;proj_block_split++){ // Now get the projections on memory CreateTexture(gpuids, partial_projection[proj_block_split],geo, &d_cuArrTex[(proj_block_split%2)*deviceCount], proj_split_size[proj_block_split], &texProj [(proj_block_split%2)*deviceCount], stream, nStreamDevice, (proj_block_split<2)&!proj&!img_slice);// Only allocate if its the first 2 calls for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaStreamSynchronize(stream[dev*nStreamDevice+1]); } // Pin the next chunk of projection data, unpin the current one. for (dev = 0; dev < deviceCount; dev++){ //Safety: // Depends on the amount of GPUs, the case where a image slice is zero hight can happen. // Just break the loop if we reached that point if(geoArray[img_slice*deviceCount+dev].nVoxelZ==0) break; cudaSetDevice(gpuids[dev]); int divx,divy,divz; // RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y). // I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so // let's stick with the values from Zinsser and Keck. divx=16; divy=32; divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks! dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geoArray[img_slice*deviceCount+dev].nVoxelZ+divz-1)/divz); dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1) ////////////////////////////////////////////////////////////////////////////////////// // Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// // Since we'll have multiple projections processed by a SINGLE kernel call, compute how many // kernel calls we'll need altogether. unsigned int noOfKernelCalls = (proj_split_size[proj_block_split]+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL for (unsigned int i=0; i<noOfKernelCalls; i++){ // Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it unsigned int j; for(j=0; j<PROJ_PER_KERNEL; j++){ unsigned int currProjNumber_slice=i*PROJ_PER_KERNEL+j; unsigned int currProjNumber_global=i*PROJ_PER_KERNEL+j // index within kernel +proj*(nalpha+split_projections-1)/split_projections // index of the global projection split +proj_block_split*max(current_proj_split_size/proj_split_overlap_number,PROJ_PER_KERNEL); // indexof overlap current split if(currProjNumber_slice>=proj_split_size[proj_block_split]) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. if(currProjNumber_global>=nalpha) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source; float sinalpha,cosalpha; geoArray[img_slice*deviceCount+dev].alpha=-alphas[currProjNumber_global*3];//we got 3 angles now. geoArray[img_slice*deviceCount+dev].theta=-alphas[currProjNumber_global*3+1]; geoArray[img_slice*deviceCount+dev].psi =-alphas[currProjNumber_global*3+2]; // mexPrintf("%u %f \n",i,geoArray[img_slice*deviceCount+dev].alpha); // mexPrintf("%u \n",currProjNumber_global); sinalpha=sin(geoArray[img_slice*deviceCount+dev].alpha); cosalpha=cos(geoArray[img_slice*deviceCount+dev].alpha); projSinCosArrayHost[5*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection projSinCosArrayHost[5*j+1]=cosalpha; projSinCosArrayHost[5*j+2]=geo.COR[currProjNumber_global]; projSinCosArrayHost[5*j+3]=geo.DSD[currProjNumber_global]; projSinCosArrayHost[5*j+4]=geo.DSO[currProjNumber_global]; computeDeltasCube(geoArray[img_slice*deviceCount+dev],currProjNumber_global,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source); offOrig.x=geo.offOrigX[currProjNumber_global]; offOrig.y=geo.offOrigY[currProjNumber_global]; offOrig.z=geoArray[img_slice*deviceCount+dev].offOrigZ[currProjNumber_global]; projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[6*j+1]=deltaY; projParamsArrayHost[6*j+2]=deltaZ; projParamsArrayHost[6*j+3]=xyzOrigin; projParamsArrayHost[6*j+4]=offOrig; projParamsArrayHost[6*j+5]=source; } // END for (preparing params for kernel call) // Copy the prepared parameter arrays to constant memory to make it available for the kernel cudaMemcpyToSymbolAsync(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*5*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]); cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL,0,cudaMemcpyHostToDevice,stream[dev*nStreamDevice]); cudaStreamSynchronize(stream[dev*nStreamDevice]); kernelPixelBackprojectionFDK<<<grid,block,0,stream[dev*nStreamDevice]>>>(geoArray[img_slice*deviceCount+dev],dimage[dev],i,proj_split_size[proj_block_split],texProj[(proj_block_split%2)*deviceCount+dev]); } // END for ////////////////////////////////////////////////////////////////////////////////////// // END RB code, Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// }// END for deviceCount } // END sub-split of current projection chunk for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); } } // END projection splits // Now we need to take the image out of the GPU for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); // We do not need to sycnronize because the array dealocators already do. num_bytes_img_curr=(size_t)geoArray[img_slice*deviceCount+dev].nVoxelX*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelY*(size_t)geoArray[img_slice*deviceCount+dev].nVoxelZ*sizeof(float); img_linear_idx_start=(size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geoArray[0].nVoxelZ*(size_t)(img_slice*deviceCount+dev); cudaMemcpyAsync(&result[img_linear_idx_start], dimage[dev], num_bytes_img_curr, cudaMemcpyDeviceToHost,stream[dev*nStreamDevice+1]); } for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDeviceSynchronize(); cudaCheckErrors("Main loop fail"); } } // end image splits ///////// Cleaning: bool two_buffers_used=((((nalpha+split_projections-1)/split_projections)+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL)>1; for(unsigned int i=0; i<2;i++){ // 2 buffers (if needed, maybe only 1) if (!two_buffers_used && i==1) break; for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaDestroyTextureObject(texProj[i*deviceCount+dev]); cudaFreeArray(d_cuArrTex[i*deviceCount+dev]); } } cudaCheckErrors("cudadestroy textures result fail"); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaFree(dimage[dev]); } cudaFreeHost(projSinCosArrayHost); cudaFreeHost(projParamsArrayHost); free(partial_projection); free(proj_split_size); freeGeoArray(split_image*deviceCount,geoArray); #ifndef NO_PINNED_MEMORY if (isHostRegisterSupported & (split_image>1 |deviceCount>1)){ cudaHostUnregister(result); } if (isHostRegisterSupported){ cudaHostUnregister(projections); } #endif for (int i = 0; i < nStreams; ++i) cudaStreamDestroy(stream[i]); cudaCheckErrors("cudaFree fail"); cudaDeviceReset(); // For the Nvidia Visual Profiler return 0; } // END voxel_backprojection // void splitCTbackprojection(const GpuIds& gpuids, Geometry geo,int nalpha, unsigned int* split_image, unsigned int * split_projections){ // We don't know if the devices are being used. lets check that. and only use the amount of memory we need. size_t mem_GPU_global; checkFreeMemory(gpuids, &mem_GPU_global); const int deviceCount = gpuids.GetLength(); // Compute how much memory each of the relevant memory pieces need size_t mem_image= (unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj= (unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV*sizeof(float); // Does everything fit in the GPU? if(mem_image/deviceCount+mem_proj*PROJ_PER_KERNEL*2<mem_GPU_global){ // We only need to split if we have extra GPUs *split_image=1; *split_projections=1; } // We know we need to split, but: // Does all the image fit in the GPU, with some slack for a stack of projections?? else { // As we can overlap memcpys from H2D of the projections, we should then minimize the amount of image splits. // Lets assume to start with that we only need 1 stack of PROJ_PER_KERNEL projections. The rest is for the image. size_t mem_free=mem_GPU_global-2*mem_proj*PROJ_PER_KERNEL; *split_image=(mem_image/deviceCount+mem_free-1)/mem_free; // Now knowing how many splits we have for images, we can recompute how many slices of projections actually // fit on the GPU. Must be more than 0 obviously. mem_free=mem_GPU_global-(mem_image/deviceCount)/(*split_image); // NOTE: There is some rounding error, but its in the order of bytes, and we have 5% of GPU free jsut in case. We are safe *split_projections=(mem_proj*PROJ_PER_KERNEL*2+mem_free-1)/mem_free; } } void CreateTexture(const GpuIds& gpuids, float* projectiondata,Geometry geo,cudaArray** d_cuArrTex,unsigned int nangles, cudaTextureObject_t *texImage,cudaStream_t* stream,int nStreamDevice,bool allocate){ //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; #if IS_FOR_MATLAB_TIGRE const cudaExtent extent =make_cudaExtent(geo.nDetecV, geo.nDetecU, nangles); #else const cudaExtent extent =make_cudaExtent(geo.nDetecU, geo.nDetecV, nangles); #endif const unsigned int num_devices = gpuids.GetLength(); if (allocate){ for (unsigned int dev = 0; dev < num_devices; dev++){ cudaSetDevice(gpuids[dev]); //cudaArray Descriptor cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); //cuda Array cudaMalloc3DArray(&d_cuArrTex[dev], &channelDesc, extent); } } for (unsigned int dev = 0; dev < num_devices; dev++){ cudaSetDevice(gpuids[dev]); cudaMemcpy3DParms copyParams = {0}; //Array creation copyParams.srcPtr = make_cudaPitchedPtr((void *)projectiondata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[dev]; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3DAsync(&copyParams,stream[dev*nStreamDevice+1]); } //Array creation End for (unsigned int dev = 0; dev < num_devices; dev++){ cudaSetDevice(gpuids[dev]); cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = d_cuArrTex[dev]; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = false; texDescr.filterMode = cudaFilterModeLinear; texDescr.addressMode[0] = cudaAddressModeBorder; texDescr.addressMode[1] = cudaAddressModeBorder; texDescr.addressMode[2] = cudaAddressModeBorder; texDescr.readMode = cudaReadModeElementType; cudaCreateTextureObject(&texImage[dev], &texRes, &texDescr, NULL); } } //______________________________________________________________________________ // // Function: createGeoArray // // Description: This code generates the geometries needed to split the image properly in // cases where the entire image does not fit in the memory of the GPU //______________________________________________________________________________ void createGeoArray(unsigned int image_splits, Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned int splitsize=(geo.nVoxelZ+image_splits-1)/image_splits; for(unsigned int sp=0;sp<image_splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: max(geo.nVoxelZ-splitsize*sp,0); geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: computeDeltasCube // // Description: Computes relative increments for each projection (volume rotation). // Increments get passed to the backprojection kernel. //______________________________________________________________________________ void computeDeltasCube(Geometry geo,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S) { Point3D P, Px,Py,Pz; // Get coords of Img(0,0,0) P.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px.x=P.x+geo.dVoxelX; Py.x=P.x; Pz.x=P.x; Px.y=P.y; Py.y=P.y+geo.dVoxelY; Pz.y=P.y; Px.z=P.z; Py.z=P.z; Pz.z=P.z+geo.dVoxelZ; // Rotate image around X axis (this is equivalent of rotating the source and detector) RZ RY RZ eulerZYZT(geo,&P); eulerZYZT(geo,&Px); eulerZYZT(geo,&Py); eulerZYZT(geo,&Pz); //detector offset P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i]; Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i]; Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i]; Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i]; //Detector Roll pitch Yaw // // // first, we need to offset everything so (0,0,0) is the center of the detector // Only X is required for that P.x=P.x+(geo.DSD[i]-geo.DSO[i]); Px.x=Px.x+(geo.DSD[i]-geo.DSO[i]); Py.x=Py.x+(geo.DSD[i]-geo.DSO[i]); Pz.x=Pz.x+(geo.DSD[i]-geo.DSO[i]); rollPitchYawT(geo,i,&P); rollPitchYawT(geo,i,&Px); rollPitchYawT(geo,i,&Py); rollPitchYawT(geo,i,&Pz); P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Px.x=Px.x-(geo.DSD[i]-geo.DSO[i]); Py.x=Py.x-(geo.DSD[i]-geo.DSO[i]); Pz.x=Pz.x-(geo.DSD[i]-geo.DSO[i]); //Done for P, now source Point3D source; source.x=geo.DSD[i]; //already offseted for rotation source.y=-geo.offDetecU[i]; source.z=-geo.offDetecV[i]; rollPitchYawT(geo,i,&source); source.x=source.x-(geo.DSD[i]-geo.DSO[i]);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z; // mexPrintf("%f,%f,%f\n",source.x,source.y,source.z); // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU; // get deltas of the changes in voxels deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; *xyzorigin=P; *S=source; } void eulerZYZT(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x = auxPoint.x*(cos(geo.psi)*cos(geo.theta)*cos(geo.alpha)-sin(geo.psi)*sin(geo.alpha)) +auxPoint.y*(-cos(geo.psi)*cos(geo.theta)*sin(geo.alpha)-sin(geo.psi)*cos(geo.alpha)) +auxPoint.z*cos(geo.psi)*sin(geo.theta); point->y = auxPoint.x*(sin(geo.psi)*cos(geo.theta)*cos(geo.alpha)+cos(geo.psi)*sin(geo.alpha)) +auxPoint.y*(-sin(geo.psi)*cos(geo.theta)*sin(geo.alpha)+cos(geo.psi)*cos(geo.alpha)) +auxPoint.z*sin(geo.psi)*sin(geo.theta); point->z =-auxPoint.x*sin(geo.theta)*cos(geo.alpha) +auxPoint.y*sin(geo.theta)*sin(geo.alpha) +auxPoint.z*cos(geo.theta); } void rollPitchYawT(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y -sin(geo.dPitch[i])*auxPoint.z; point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z; point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void checkFreeMemory(const GpuIds& gpuids,size_t *mem_GPU_global){ size_t memfree; size_t memtotal; const int deviceCount = gpuids.GetLength(); for (int dev = 0; dev < deviceCount; dev++){ cudaSetDevice(gpuids[dev]); cudaMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
5716a38174a68373c6c9cd520b6d4b10fed318c5.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <hip/hip_runtime.h> #include <gtest/gtest.h> #include <random> #include <vector> #include "dali/core/mm/async_pool.h" #include "dali/core/dev_buffer.h" #include "dali/core/mm/mm_test_utils.h" #include "dali/core/cuda_stream.h" #include "dali/core/mm/cuda_vm_resource.h" namespace dali { namespace mm { struct GPUHog { ~GPUHog() { if (mem) { CUDA_DTOR_CALL(hipFree(mem)); mem = nullptr; } } void init() { if (!mem) CUDA_CALL(hipMalloc(&mem, size)); } void run(hipStream_t stream, int count = 1) { for (int i = 0; i < count; i++) { CUDA_CALL(hipMemsetAsync(mem, i+1, size, stream)); } } uint8_t *mem = nullptr; size_t size = 16<<20; }; TEST(MMAsyncPool, SingleStreamReuse) { GPUHog hog; hog.init(); HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); test::test_device_resource upstream; async_pool_resource<memory_kind::device> pool(&upstream); stream_view sv(stream); int size1 = 1<<20; void *ptr = pool.allocate_async(size1, sv); hog.run(stream, 2); pool.deallocate_async(ptr, size1, sv); void *p2 = pool.allocate_async(size1, sv); CUDA_CALL(hipStreamSynchronize(stream)); EXPECT_EQ(ptr, p2); } TEST(MMAsyncPool, TwoStream) { mm::test::test_device_resource upstream; HIPStreamMasqueradingAsCUDA s1 = HIPStreamMasqueradingAsCUDA::Create(true); HIPStreamMasqueradingAsCUDA s2 = HIPStreamMasqueradingAsCUDA::Create(true); stream_view sv1(s1); stream_view sv2(s2); GPUHog hog; hog.init(); const int min_success = 10; const int max_not_busy = 100; int stream_not_busy = 0; int success = 0; while (success < min_success) { async_pool_resource<memory_kind::device> pool(&upstream); void *p1 = pool.allocate_async(1000, sv1); hog.run(s1); pool.deallocate_async(p1, 1000, sv1); void *p2 = pool.allocate_async(1000, sv2); void *p3 = pool.allocate_async(1000, sv1); hipError_t e = hipStreamQuery(s1); if (e != hipErrorNotReady) { std::cerr << "Stream s1 finished before attempt to allocate on s2 was made - retrying\n"; CUDA_CALL(hipGetLastError()); if (++stream_not_busy > max_not_busy) { FAIL() << "Stream s1 finished - test unreliable."; } continue; } stream_not_busy = 0; ASSERT_NE(p1, p2); ASSERT_EQ(p1, p3); CUDA_CALL(hipStreamSynchronize(s1)); success++; CUDA_CALL(hipStreamSynchronize(s2)); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } namespace { __global__ void Check(const void *ptr, size_t size, uint8_t fill, int *failures) { size_t idx = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (idx < size) { if (static_cast<const uint8_t*>(ptr)[idx] != fill) atomicAdd(failures, 1); } } struct block { void *ptr; size_t size; uint8_t fill; hipStream_t stream; }; template <typename Pool, typename Mutex> void AsyncPoolTest(Pool &pool, vector<block> &blocks, Mutex &mtx, HIPStreamMasqueradingAsCUDA &stream, int max_iters = 20000, bool use_hog = false) { stream_view sv(stream); std::mt19937_64 rng(12345); std::poisson_distribution<> size_dist(1024); const int max_size = 1 << 20; std::uniform_int_distribution<> sync_dist(10, 10); std::bernoulli_distribution action_dist; std::bernoulli_distribution hog_dist(0.05f); std::uniform_int_distribution<> fill_dist(1, 255); DeviceBuffer<int> failure_buf; int failures = 0; failure_buf.from_host(&failures, 1, sv.get()); GPUHog hog; if (use_hog) hog.init(); int hogs = 0; int max_hogs = sync_dist(rng); CUDAEvent event = CUDAEvent::Create(); for (int i = 0; i < max_iters; i++) { if (use_hog && hog_dist(rng)) { if (hogs++ > max_hogs) { CUDA_CALL(hipStreamSynchronize(stream)); max_hogs = sync_dist(rng); } hog.run(stream); } if (action_dist(rng) || blocks.empty()) { size_t size; do { size = size_dist(rng); } while (size > max_size); uint8_t fill = fill_dist(rng); void *ptr = stream ? pool.allocate_async(size, sv) : pool.allocate(size); CUDA_CALL(hipMemsetAsync(ptr, fill, size, stream)); { std::lock_guard<Mutex> guard(mtx); (void)guard; // for dummy mutexes blocks.push_back({ ptr, size, fill, stream }); } } else { block blk; { std::lock_guard<Mutex> guard(mtx); (void)guard; // for dummy mutexes if (blocks.empty()) continue; int i = std::uniform_int_distribution<>(0, blocks.size()-1)(rng); std::swap(blocks[i], blocks.back()); blk = blocks.back(); blocks.pop_back(); } if (blk.stream != stream) { if (stream) { CUDA_CALL(hipEventRecord(event, blk.stream)); CUDA_CALL(hipStreamWaitEvent(stream, event, 0)); } else { CUDA_CALL(hipStreamSynchronize(blk.stream)); } } hipLaunchKernelGGL(( Check), dim3(div_ceil(blk.size, 1024)), dim3(1024), 0, stream, blk.ptr, blk.size, blk.fill, failure_buf); if (stream) { pool.deallocate_async(blk.ptr, blk.size, sv); } else { CUDA_CALL(hipStreamSynchronize(stream)); pool.deallocate(blk.ptr, blk.size); } } } copyD2H<int>(&failures, failure_buf, 1, AccessOrder(stream)); CUDA_CALL(hipStreamSynchronize(stream)); ASSERT_EQ(failures, 0); } } // namespace TEST(MMAsyncPool, SingleStreamRandom) { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); } CUDA_CALL(hipStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedSingleStreamRandom) { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); mm::test::test_device_resource upstream; { vector<block> blocks; std::mutex mtx; async_pool_resource<memory_kind::device> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { AsyncPoolTest(pool, blocks, mtx, stream); })); } for (auto &t : threads) t.join(); } CUDA_CALL(hipStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedMultiStreamRandom) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); CUDA_CALL(hipStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiStreamRandomWithGPUHogs) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream, false); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { // 0-th thread uses null stream, which triggers non-async API usage HIPStreamMasqueradingAsCUDA stream = t ? HIPStreamMasqueradingAsCUDA::Create(true) : HIPStreamMasqueradingAsCUDA(); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream, 20000, true); CUDA_CALL(hipStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStream) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream, false); vector<std::thread> threads; vector<HIPStreamMasqueradingAsCUDA> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = HIPStreamMasqueradingAsCUDA::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t]); CUDA_CALL(hipStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStreamWithHogs) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream); vector<std::thread> threads; vector<HIPStreamMasqueradingAsCUDA> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = HIPStreamMasqueradingAsCUDA::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true); CUDA_CALL(hipStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } #if DALI_USE_CUDA_VM_MAP TEST(MM_VMAsyncPool, MultiThreadedSingleStreamRandom) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); { vector<block> blocks; std::mutex mtx; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { AsyncPoolTest(pool, blocks, mtx, stream); })); } for (auto &t : threads) t.join(); } } TEST(MM_VMAsyncPool, MultiThreadedMultiStreamRandom) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { HIPStreamMasqueradingAsCUDA stream = HIPStreamMasqueradingAsCUDA::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); CUDA_CALL(hipStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } TEST(MM_VMAsyncPool, MultiStreamRandomWithGPUHogs) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { // 0-th thread uses null stream, which triggers non-async API usage HIPStreamMasqueradingAsCUDA stream = t ? HIPStreamMasqueradingAsCUDA::Create(true) : HIPStreamMasqueradingAsCUDA(); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream, 20000, true); CUDA_CALL(hipStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } TEST(MM_VMAsyncPool, CrossStream) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; vector<HIPStreamMasqueradingAsCUDA> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = HIPStreamMasqueradingAsCUDA::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t]); CUDA_CALL(hipStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } TEST(MM_VMAsyncPool, CrossStreamWithHogs) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; vector<HIPStreamMasqueradingAsCUDA> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = HIPStreamMasqueradingAsCUDA::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true); CUDA_CALL(hipStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } #endif } // namespace mm } // namespace dali
5716a38174a68373c6c9cd520b6d4b10fed318c5.cu
// Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <cuda_runtime.h> #include <gtest/gtest.h> #include <random> #include <vector> #include "dali/core/mm/async_pool.h" #include "dali/core/dev_buffer.h" #include "dali/core/mm/mm_test_utils.h" #include "dali/core/cuda_stream.h" #include "dali/core/mm/cuda_vm_resource.h" namespace dali { namespace mm { struct GPUHog { ~GPUHog() { if (mem) { CUDA_DTOR_CALL(cudaFree(mem)); mem = nullptr; } } void init() { if (!mem) CUDA_CALL(cudaMalloc(&mem, size)); } void run(cudaStream_t stream, int count = 1) { for (int i = 0; i < count; i++) { CUDA_CALL(cudaMemsetAsync(mem, i+1, size, stream)); } } uint8_t *mem = nullptr; size_t size = 16<<20; }; TEST(MMAsyncPool, SingleStreamReuse) { GPUHog hog; hog.init(); CUDAStream stream = CUDAStream::Create(true); test::test_device_resource upstream; async_pool_resource<memory_kind::device> pool(&upstream); stream_view sv(stream); int size1 = 1<<20; void *ptr = pool.allocate_async(size1, sv); hog.run(stream, 2); pool.deallocate_async(ptr, size1, sv); void *p2 = pool.allocate_async(size1, sv); CUDA_CALL(cudaStreamSynchronize(stream)); EXPECT_EQ(ptr, p2); } TEST(MMAsyncPool, TwoStream) { mm::test::test_device_resource upstream; CUDAStream s1 = CUDAStream::Create(true); CUDAStream s2 = CUDAStream::Create(true); stream_view sv1(s1); stream_view sv2(s2); GPUHog hog; hog.init(); const int min_success = 10; const int max_not_busy = 100; int stream_not_busy = 0; int success = 0; while (success < min_success) { async_pool_resource<memory_kind::device> pool(&upstream); void *p1 = pool.allocate_async(1000, sv1); hog.run(s1); pool.deallocate_async(p1, 1000, sv1); void *p2 = pool.allocate_async(1000, sv2); void *p3 = pool.allocate_async(1000, sv1); cudaError_t e = cudaStreamQuery(s1); if (e != cudaErrorNotReady) { std::cerr << "Stream s1 finished before attempt to allocate on s2 was made - retrying\n"; CUDA_CALL(cudaGetLastError()); if (++stream_not_busy > max_not_busy) { FAIL() << "Stream s1 finished - test unreliable."; } continue; } stream_not_busy = 0; ASSERT_NE(p1, p2); ASSERT_EQ(p1, p3); CUDA_CALL(cudaStreamSynchronize(s1)); success++; CUDA_CALL(cudaStreamSynchronize(s2)); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } namespace { __global__ void Check(const void *ptr, size_t size, uint8_t fill, int *failures) { size_t idx = static_cast<size_t>(blockIdx.x) * blockDim.x + threadIdx.x; if (idx < size) { if (static_cast<const uint8_t*>(ptr)[idx] != fill) atomicAdd(failures, 1); } } struct block { void *ptr; size_t size; uint8_t fill; cudaStream_t stream; }; template <typename Pool, typename Mutex> void AsyncPoolTest(Pool &pool, vector<block> &blocks, Mutex &mtx, CUDAStream &stream, int max_iters = 20000, bool use_hog = false) { stream_view sv(stream); std::mt19937_64 rng(12345); std::poisson_distribution<> size_dist(1024); const int max_size = 1 << 20; std::uniform_int_distribution<> sync_dist(10, 10); std::bernoulli_distribution action_dist; std::bernoulli_distribution hog_dist(0.05f); std::uniform_int_distribution<> fill_dist(1, 255); DeviceBuffer<int> failure_buf; int failures = 0; failure_buf.from_host(&failures, 1, sv.get()); GPUHog hog; if (use_hog) hog.init(); int hogs = 0; int max_hogs = sync_dist(rng); CUDAEvent event = CUDAEvent::Create(); for (int i = 0; i < max_iters; i++) { if (use_hog && hog_dist(rng)) { if (hogs++ > max_hogs) { CUDA_CALL(cudaStreamSynchronize(stream)); max_hogs = sync_dist(rng); } hog.run(stream); } if (action_dist(rng) || blocks.empty()) { size_t size; do { size = size_dist(rng); } while (size > max_size); uint8_t fill = fill_dist(rng); void *ptr = stream ? pool.allocate_async(size, sv) : pool.allocate(size); CUDA_CALL(cudaMemsetAsync(ptr, fill, size, stream)); { std::lock_guard<Mutex> guard(mtx); (void)guard; // for dummy mutexes blocks.push_back({ ptr, size, fill, stream }); } } else { block blk; { std::lock_guard<Mutex> guard(mtx); (void)guard; // for dummy mutexes if (blocks.empty()) continue; int i = std::uniform_int_distribution<>(0, blocks.size()-1)(rng); std::swap(blocks[i], blocks.back()); blk = blocks.back(); blocks.pop_back(); } if (blk.stream != stream) { if (stream) { CUDA_CALL(cudaEventRecord(event, blk.stream)); CUDA_CALL(cudaStreamWaitEvent(stream, event, 0)); } else { CUDA_CALL(cudaStreamSynchronize(blk.stream)); } } Check<<<div_ceil(blk.size, 1024), 1024, 0, stream>>>( blk.ptr, blk.size, blk.fill, failure_buf); if (stream) { pool.deallocate_async(blk.ptr, blk.size, sv); } else { CUDA_CALL(cudaStreamSynchronize(stream)); pool.deallocate(blk.ptr, blk.size); } } } copyD2H<int>(&failures, failure_buf, 1, AccessOrder(stream)); CUDA_CALL(cudaStreamSynchronize(stream)); ASSERT_EQ(failures, 0); } } // namespace TEST(MMAsyncPool, SingleStreamRandom) { CUDAStream stream = CUDAStream::Create(true); test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); } CUDA_CALL(cudaStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedSingleStreamRandom) { CUDAStream stream = CUDAStream::Create(true); mm::test::test_device_resource upstream; { vector<block> blocks; std::mutex mtx; async_pool_resource<memory_kind::device> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { AsyncPoolTest(pool, blocks, mtx, stream); })); } for (auto &t : threads) t.join(); } CUDA_CALL(cudaStreamSynchronize(stream)); std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiThreadedMultiStreamRandom) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { CUDAStream stream = CUDAStream::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); CUDA_CALL(cudaStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, MultiStreamRandomWithGPUHogs) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream, false); vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { // 0-th thread uses null stream, which triggers non-async API usage CUDAStream stream = t ? CUDAStream::Create(true) : CUDAStream(); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream, 20000, true); CUDA_CALL(cudaStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStream) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream, false); vector<std::thread> threads; vector<CUDAStream> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = CUDAStream::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t]); CUDA_CALL(cudaStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } TEST(MMAsyncPool, CrossStreamWithHogs) { mm::test::test_device_resource upstream; { async_pool_resource<memory_kind::device> pool(&upstream); vector<std::thread> threads; vector<CUDAStream> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = CUDAStream::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true); CUDA_CALL(cudaStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } std::cerr << "Peak consumption: " << upstream.get_peak_size() << " bytes\n"; std::cerr << "Upstream allocations: " << upstream.get_num_allocs() << std::endl; upstream.check_leaks(); } #if DALI_USE_CUDA_VM_MAP TEST(MM_VMAsyncPool, MultiThreadedSingleStreamRandom) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; CUDAStream stream = CUDAStream::Create(true); { vector<block> blocks; std::mutex mtx; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { AsyncPoolTest(pool, blocks, mtx, stream); })); } for (auto &t : threads) t.join(); } } TEST(MM_VMAsyncPool, MultiThreadedMultiStreamRandom) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { CUDAStream stream = CUDAStream::Create(true); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream); CUDA_CALL(cudaStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } TEST(MM_VMAsyncPool, MultiStreamRandomWithGPUHogs) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; for (int t = 0; t < 10; t++) { threads.push_back(std::thread([&]() { // 0-th thread uses null stream, which triggers non-async API usage CUDAStream stream = t ? CUDAStream::Create(true) : CUDAStream(); vector<block> blocks; detail::dummy_lock mtx; AsyncPoolTest(pool, blocks, mtx, stream, 20000, true); CUDA_CALL(cudaStreamSynchronize(stream)); })); } for (auto &t : threads) t.join(); } TEST(MM_VMAsyncPool, CrossStream) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; vector<CUDAStream> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = CUDAStream::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t]); CUDA_CALL(cudaStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } TEST(MM_VMAsyncPool, CrossStreamWithHogs) { if (!cuvm::IsSupported()) GTEST_SKIP() << "Virtual memory management API is not supported on this machine."; async_pool_resource<memory_kind::device, cuda_vm_resource> pool; vector<std::thread> threads; vector<CUDAStream> streams; vector<block> blocks; std::mutex mtx; const int N = 10; streams.resize(N); for (int t = 0; t < N; t++) { if (t != 0) // keep empty stream at index 0 to mix sync/async allocations streams[t] = CUDAStream::Create(true); threads.push_back(std::thread([&, t]() { AsyncPoolTest(pool, blocks, mtx, streams[t], 10000, true); CUDA_CALL(cudaStreamSynchronize(streams[t])); })); } for (auto &t : threads) t.join(); } #endif } // namespace mm } // namespace dali
72e2b687d2d3cea64f12eb5c26518e098d4c5c5d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * rabin_karp_v3.cpp * * Created by Varun Pandey on 05-19-2015. * * Copyright (c) 2015 Varun Pandey * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the project's author nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "../include/common_headers.h" #include "../include/constant.h" __device__ bool d_found; __device__ int remove_byte(int64_t& current_digest, BYTE from_digest, size_t msb_pos_window); __device__ int insert_byte(int64_t& current_digest, BYTE from_digest); /* Cleanup rutine before successfull termination of this program * */ void OnExit (void) { std::cout << std::endl << "Terminating Now..." << std::endl; } /* * Cuda Kernel that will be used run independently on 2*(size of pattern) * to find the patern. * Returns true on success. * */ __global__ void find_pattern (BYTE* data, int64_t pattern_fingerprint, size_t window_size, size_t msb_multiplier) { int msb_pos_window = 0; int64_t text_digest = 0; int start_pos = threadIdx.x; BYTE *window = new BYTE [2*window_size]; if (!window) return; if (start_pos % (window_size*2)) { /* * start with adding window_size bytes to the digest * and try to matching the fingerprint for window_size+1 shifts * */ int i = 0; for (; i < window_size; i++) { insert_byte (text_digest, *(data+start_pos+i)); window[i] = *(data+start_pos+i); } for (++i; i < (2*window_size); i++) { if (text_digest == pattern_fingerprint) { d_found = true; break; } remove_byte (text_digest, window[msb_pos_window++], msb_multiplier); window[i] = *(data+start_pos+i); insert_byte (text_digest, *(data+start_pos+i)); } } } /* Assuing that the bytes is in the msb position, removes * the byte. * returns 0 on success * */ __device__ int remove_byte(int64_t& current_digest, BYTE from_digest, size_t msb_multiplier) { int ret_val = 0; current_digest = current_digest - (msb_multiplier * from_digest); //shift the byte /*After much head banging, I am adding this code. Underflowing hash, damn it!*/ while (current_digest < 0) current_digest += PRIME; return ret_val; } /* Insert single byte into the digest * Note: - The first byte will goto msb position. So data[0] * is at the highest order of the polynomial. Subsequently, * this is the first byte to be kicked out of our window. * Window size = pattern size (for now). For large patterns * proportional to text, window size can be controlled. * returns 0 on success * * */ __device__ int insert_byte(int64_t& current_digest, BYTE to_digest) { int ret_val = 0; current_digest *= RADIX; //shift the byte current_digest += to_digest; current_digest %= PRIME; return ret_val; } int insert_byte_host(int64_t& current_digest, BYTE to_digest, size_t msb_pos_window) { int ret_val = 0; current_digest *= RADIX; //shift the byte current_digest += to_digest; current_digest %= PRIME; return ret_val; } /* Process $size bytes and add it into the into the digest * returns 0 on success * */ int insert_bytes(int64_t& current_digest, BYTE* data, size_t size) { int ret_val = 0; for (int i = (size_t)0; i < size; i++){ insert_byte_host (current_digest, data[i], msb_multiplier); } return ret_val; } int main (int argc, char ** argv) { typeof(d_found) found = false; size_t i = 0; int64_t pattern_hash = 0; BYTE* read_buf = NULL; BYTE* window = NULL; std::ifstream file_stream; std::streampos stream_pos; if (argc != 3) USAGE_EXIT(argv[0]); atexit (OnExit); try { window_size = strlen(argv[2]); //Pattern size = window size for (i = 1; i < window_size; i++) msb_multiplier = (msb_multiplier*RADIX)%PRIME; // Check if file exist file_stream.open (argv[1], std::ios::in | std::ios::binary); if (file_stream.bad() || file_stream.fail()) THROW("Please check the file path and try again"); //Get File size size_t fsize = file_stream.tellg(); file_stream.seekg (0, std::ios::end); fsize = file_stream.tellg() - (std::streampos)fsize; if (fsize % window_size) fsize = fsize + (window_size - (fsize % window_size)); //create extra room for padding if needed //reset stream file_stream.clear(); // clear fail and eof bits file_stream.seekg(0, std::ios::beg); // back to the start! read_buf = new BYTE [fsize]; if (!read_buf) THROW("Bad allocation"); memset (read_buf, '@', fsize); // Pad the data with @ //Fill the window and get the file_stream.read(read_buf, fsize); insert_bytes (pattern_hash, argv[2], window_size); #ifdef DEBUG /*For Dedugging*/ std::cout << std::endl << "Pattern hash: " << pattern_hash << std::endl; #endif BYTE* d_text; hipMalloc ((void**)&d_text, fsize); hipMemcpy(d_text, read_buf, fsize, hipMemcpyHostToDevice ); hipLaunchKernelGGL(( find_pattern), dim3(1), dim3(1024), 0, 0, d_text, pattern_hash, window_size, msb_multiplier); hipMemcpyFromSymbol (&found, d_found, sizeof(found), hipMemcpyDeviceToHost); hipFree(d_text); } catch (const std::exception& e) { REPORT(e); } /*Display Result*/ PATTERN_REPORT(found, file_stream.tellg()); /*Cleanup*/ if (read_buf) delete [] read_buf, read_buf = NULL; if (window) delete [] window, window = NULL; file_stream.close(); return 0; }
72e2b687d2d3cea64f12eb5c26518e098d4c5c5d.cu
/* * rabin_karp_v3.cpp * * Created by Varun Pandey on 05-19-2015. * * Copyright (c) 2015 Varun Pandey * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the project's author nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "../include/common_headers.h" #include "../include/constant.h" __device__ bool d_found; __device__ int remove_byte(int64_t& current_digest, BYTE from_digest, size_t msb_pos_window); __device__ int insert_byte(int64_t& current_digest, BYTE from_digest); /* Cleanup rutine before successfull termination of this program * */ void OnExit (void) { std::cout << std::endl << "Terminating Now..." << std::endl; } /* * Cuda Kernel that will be used run independently on 2*(size of pattern) * to find the patern. * Returns true on success. * */ __global__ void find_pattern (BYTE* data, int64_t pattern_fingerprint, size_t window_size, size_t msb_multiplier) { int msb_pos_window = 0; int64_t text_digest = 0; int start_pos = threadIdx.x; BYTE *window = new BYTE [2*window_size]; if (!window) return; if (start_pos % (window_size*2)) { /* * start with adding window_size bytes to the digest * and try to matching the fingerprint for window_size+1 shifts * */ int i = 0; for (; i < window_size; i++) { insert_byte (text_digest, *(data+start_pos+i)); window[i] = *(data+start_pos+i); } for (++i; i < (2*window_size); i++) { if (text_digest == pattern_fingerprint) { d_found = true; break; } remove_byte (text_digest, window[msb_pos_window++], msb_multiplier); window[i] = *(data+start_pos+i); insert_byte (text_digest, *(data+start_pos+i)); } } } /* Assuing that the bytes is in the msb position, removes * the byte. * returns 0 on success * */ __device__ int remove_byte(int64_t& current_digest, BYTE from_digest, size_t msb_multiplier) { int ret_val = 0; current_digest = current_digest - (msb_multiplier * from_digest); //shift the byte /*After much head banging, I am adding this code. Underflowing hash, damn it!*/ while (current_digest < 0) current_digest += PRIME; return ret_val; } /* Insert single byte into the digest * Note: - The first byte will goto msb position. So data[0] * is at the highest order of the polynomial. Subsequently, * this is the first byte to be kicked out of our window. * Window size = pattern size (for now). For large patterns * proportional to text, window size can be controlled. * returns 0 on success * * */ __device__ int insert_byte(int64_t& current_digest, BYTE to_digest) { int ret_val = 0; current_digest *= RADIX; //shift the byte current_digest += to_digest; current_digest %= PRIME; return ret_val; } int insert_byte_host(int64_t& current_digest, BYTE to_digest, size_t msb_pos_window) { int ret_val = 0; current_digest *= RADIX; //shift the byte current_digest += to_digest; current_digest %= PRIME; return ret_val; } /* Process $size bytes and add it into the into the digest * returns 0 on success * */ int insert_bytes(int64_t& current_digest, BYTE* data, size_t size) { int ret_val = 0; for (int i = (size_t)0; i < size; i++){ insert_byte_host (current_digest, data[i], msb_multiplier); } return ret_val; } int main (int argc, char ** argv) { typeof(d_found) found = false; size_t i = 0; int64_t pattern_hash = 0; BYTE* read_buf = NULL; BYTE* window = NULL; std::ifstream file_stream; std::streampos stream_pos; if (argc != 3) USAGE_EXIT(argv[0]); atexit (OnExit); try { window_size = strlen(argv[2]); //Pattern size = window size for (i = 1; i < window_size; i++) msb_multiplier = (msb_multiplier*RADIX)%PRIME; // Check if file exist file_stream.open (argv[1], std::ios::in | std::ios::binary); if (file_stream.bad() || file_stream.fail()) THROW("Please check the file path and try again"); //Get File size size_t fsize = file_stream.tellg(); file_stream.seekg (0, std::ios::end); fsize = file_stream.tellg() - (std::streampos)fsize; if (fsize % window_size) fsize = fsize + (window_size - (fsize % window_size)); //create extra room for padding if needed //reset stream file_stream.clear(); // clear fail and eof bits file_stream.seekg(0, std::ios::beg); // back to the start! read_buf = new BYTE [fsize]; if (!read_buf) THROW("Bad allocation"); memset (read_buf, '@', fsize); // Pad the data with @ //Fill the window and get the file_stream.read(read_buf, fsize); insert_bytes (pattern_hash, argv[2], window_size); #ifdef DEBUG /*For Dedugging*/ std::cout << std::endl << "Pattern hash: " << pattern_hash << std::endl; #endif BYTE* d_text; cudaMalloc ((void**)&d_text, fsize); cudaMemcpy(d_text, read_buf, fsize, cudaMemcpyHostToDevice ); find_pattern<<<1, 1024>>>(d_text, pattern_hash, window_size, msb_multiplier); cudaMemcpyFromSymbol (&found, d_found, sizeof(found), cudaMemcpyDeviceToHost); cudaFree(d_text); } catch (const std::exception& e) { REPORT(e); } /*Display Result*/ PATTERN_REPORT(found, file_stream.tellg()); /*Cleanup*/ if (read_buf) delete [] read_buf, read_buf = NULL; if (window) delete [] window, window = NULL; file_stream.close(); return 0; }
781877e2fc0841bc36e546480123928aa8e84435.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include <opencv2\opencv.hpp> namespace caffe { template <typename Dtype> __global__ void SpatialPoolingForward(const int nthreads, const Dtype* const index_data, const int num, const int height, const int width, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { //CUDA KERNEL LOOP index nthread for - . top_data[index * 2 + 0] = (int)(index_data[index] / width) / (Dtype)width; top_data[index * 2 + 1] = ((unsigned int)index_data[index] % width) / (Dtype)height; // ...? } } template <typename Dtype> __global__ void SpatialPoolingBackward(const int nthreads, const Dtype* const top_diff, const int batchSize, const int nChannels, const int bottom_height, const int bottom_width, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { //CUDA KERNEL LOOP index nthread for - . //index = const int mapidx = index / (bottom_height * bottom_width); // const int inMapidx = index % (bottom_height * bottom_width); //map const int w = inMapidx % bottom_width; const int h = inMapidx / bottom_width; bottom_diff[index] = (w * top_diff[2 * mapidx + 0] / (Dtype)bottom_width) + (h * top_diff[2 * mapidx + 1] / (Dtype)bottom_height); } } template <typename Dtype> __global__ void kernel_features_maxidx(const int num, const int width, const int height, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { Dtype maxval = -FLT_MAX; for (int i = 0; i < height * width; i++){ if (maxval < data[(index*width*height) + i]){ maxval = data[(index*width*height) + i]; out[index] = i; } } } } template <typename Dtype> __global__ void pooling_backward(const int num, const int width, const int height, const Dtype* poolIndex, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { int channel = index / width / height; int innerIdx = index % (width*height); if (innerIdx == poolIndex[channel]){ out[index] /= data[index]; } else{ out[index] = 0; } } } template <typename Dtype> void SpatialPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int tWidth = bottom[0]->shape()[2]; int tHeight = bottom[0]->shape()[3]; ////////////////////////////////////////maxpooling///////////////////////////////////////////////////// //find max const int feature_count = bottom[0]->count() / (tWidth*tHeight); // 32*60 Dtype* index_data = index_.mutable_gpu_data(); //Max index finding kernel_features_maxidx<Dtype> << <CAFFE_GET_BLOCKS(feature_count), CAFFE_CUDA_NUM_THREADS >> >(feature_count, tWidth, tHeight, bottom_data, index_.mutable_gpu_data()); //////////////////////////////////extract feature postion/////////////////////////////////////////////// //<< , >> SpatialPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(feature_count), CAFFE_CUDA_NUM_THREADS >> >( feature_count, index_.gpu_data(), index_.num(), tHeight, tWidth, top_data); ///////////////////////////////////////////////////////////////////////////////////////////// ///softmax cv::Mat img; int batch_size = index_.shape()[0]; const int drawRow = 4; char buf[32]; Dtype posbox[64], sumbox[50]; int width = tWidth; int height = tHeight; int channel = feature_count / batch_size; img.create(width * (channel / drawRow), height * 2 * drawRow, CV_8UC1); // for (int b = 0; b < batch_size; b++){ // sprintf(buf, "pooling_spatial"); //hipMemcpy(posbox, &top[0]->gpu_data()[b * channel * 2], sizeof(Dtype) * channel * 2, hipMemcpyDeviceToHost); //hipMemcpy(sumbox, &index_.gpu_data()[b * channel], sizeof(Dtype) * channel, hipMemcpyDeviceToHost); // for (int i = 0; i < channel; i++){ // Dtype map[109 * 109]; // int s_row = i * 2 / (drawRow*2) * width; // int s_col = i * 2 % (drawRow*2) * width; // Dtype max = -1; // Dtype min = 9999; // Dtype sum = 0; // Dtype bmax = -1; // Dtype bmin = 9999; // for (int p = i * 2; p < (i + 1) * 2; p++){ // if (std::isnan(posbox[p])) // printf("pos error!\n"); // } // hipMemcpy(map, &bottom[0]->gpu_data()[b*channel*width*height + width* height * i], sizeof(Dtype) * width * height, hipMemcpyDeviceToHost); // for (int j = 0; j < width * height; j++){ // if (bmax < map[j]) bmax = map[j]; // if (bmin > map[j]) bmin = map[j]; // } // for (int j = 0; j < width * height; j++){ // img.at<uchar>(s_row + j / width, s_col + j%width) = (uchar)((map[j] - bmin) / (bmax - bmin) * 255.f); // img.at<uchar>(s_row + j / width, s_col + j%width + width) = (uchar)0; // } // // // cv::line(img, cv::Point(s_col, s_row), cv::Point(s_col, s_row + height), cv::Scalar(255)); // cv::line(img, cv::Point(s_col + width, s_row), cv::Point(s_col + width, s_row + height), cv::Scalar(255)); // // // cv::line(img, cv::Point(s_col, s_row), cv::Point(s_col + 2 * width, s_row), cv::Scalar(255)); // cv::circle(img, cv::Point((s_row + (int)(posbox[2 * i + 1] * height)), s_col + width + (int)(posbox[2 * i] * width)), 2, cv::Scalar(255), -1); // //cv::imshow(buf, img); // //cv::waitKey(0); // } // cv::imshow(buf, img); //cv::imwrite("ppp.bmp", img); // cv::waitKey(0); // } // ///////////////////////////////////////////////////////////////////////////////////////////////// } template <typename Dtype> void SpatialPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { //pooling layer return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_height = bottom[0]->shape()[2]; const int bottom_width = bottom[0]->shape()[3]; const int count = bottom[0]->count(); const int batchSize = bottom[0]->shape()[0]; const int nChannels = bottom[0]->shape()[1]; SpatialPoolingBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, batchSize, nChannels, bottom_height, bottom_width, bottom_diff); //pooling backward bottom_diff = bottom[0]->mutable_gpu_diff(); pooling_backward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_width, bottom_height, index_.gpu_data(), bottom[0]->gpu_data(), bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SpatialPoolingLayer); } // namespace caffe
781877e2fc0841bc36e546480123928aa8e84435.cu
#include <vector> #include "caffe/blob.hpp" #include "caffe/common.hpp" #include "caffe/filler.hpp" #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #include <opencv2\opencv.hpp> namespace caffe { template <typename Dtype> __global__ void SpatialPoolingForward(const int nthreads, const Dtype* const index_data, const int num, const int height, const int width, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { //CUDA KERNEL LOOP 함수는 index를 nthread까지 돌리는 for 루프 - 오프셋을 점검해야함. top_data[index * 2 + 0] = (int)(index_data[index] / width) / (Dtype)width; top_data[index * 2 + 1] = ((unsigned int)index_data[index] % width) / (Dtype)height; //왜 모듈로 연산 안됨...? } } template <typename Dtype> __global__ void SpatialPoolingBackward(const int nthreads, const Dtype* const top_diff, const int batchSize, const int nChannels, const int bottom_height, const int bottom_width, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { //CUDA KERNEL LOOP 함수는 index를 nthread까지 돌리는 for 루프 - 오프셋을 점검해야함. //index = const int mapidx = index / (bottom_height * bottom_width); //몇번째 맵인지 계산 const int inMapidx = index % (bottom_height * bottom_width); //map 안에서 몇번째 인덱스 인지 const int w = inMapidx % bottom_width; const int h = inMapidx / bottom_width; bottom_diff[index] = (w * top_diff[2 * mapidx + 0] / (Dtype)bottom_width) + (h * top_diff[2 * mapidx + 1] / (Dtype)bottom_height); } } template <typename Dtype> __global__ void kernel_features_maxidx(const int num, const int width, const int height, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { Dtype maxval = -FLT_MAX; for (int i = 0; i < height * width; i++){ if (maxval < data[(index*width*height) + i]){ maxval = data[(index*width*height) + i]; out[index] = i; } } } } template <typename Dtype> __global__ void pooling_backward(const int num, const int width, const int height, const Dtype* poolIndex, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num) { int channel = index / width / height; int innerIdx = index % (width*height); if (innerIdx == poolIndex[channel]){ out[index] /= data[index]; } else{ out[index] = 0; } } } template <typename Dtype> void SpatialPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int tWidth = bottom[0]->shape()[2]; int tHeight = bottom[0]->shape()[3]; ////////////////////////////////////////maxpooling///////////////////////////////////////////////////// //find max const int feature_count = bottom[0]->count() / (tWidth*tHeight); //맵 갯수 32*60 Dtype* index_data = index_.mutable_gpu_data(); //Max index finding kernel_features_maxidx<Dtype> << <CAFFE_GET_BLOCKS(feature_count), CAFFE_CUDA_NUM_THREADS >> >(feature_count, tWidth, tHeight, bottom_data, index_.mutable_gpu_data()); //////////////////////////////////extract feature postion/////////////////////////////////////////////// //<<앞은 블록수, 뒤는 블록당 쓰레드수>> SpatialPoolingForward<Dtype> << <CAFFE_GET_BLOCKS(feature_count), CAFFE_CUDA_NUM_THREADS >> >( feature_count, index_.gpu_data(), index_.num(), tHeight, tWidth, top_data); ///////////////////////////////////////////////////////////////////////////////////////////// ///softmax 그리기 cv::Mat img; int batch_size = index_.shape()[0]; const int drawRow = 4; char buf[32]; Dtype posbox[64], sumbox[50]; int width = tWidth; int height = tHeight; int channel = feature_count / batch_size; img.create(width * (channel / drawRow), height * 2 * drawRow, CV_8UC1); // for (int b = 0; b < batch_size; b++){ // sprintf(buf, "pooling_spatial"); //cudaMemcpy(posbox, &top[0]->gpu_data()[b * channel * 2], sizeof(Dtype) * channel * 2, cudaMemcpyDeviceToHost); //cudaMemcpy(sumbox, &index_.gpu_data()[b * channel], sizeof(Dtype) * channel, cudaMemcpyDeviceToHost); // for (int i = 0; i < channel; i++){ // Dtype map[109 * 109]; // int s_row = i * 2 / (drawRow*2) * width; // int s_col = i * 2 % (drawRow*2) * width; // Dtype max = -1; // Dtype min = 9999; // Dtype sum = 0; // Dtype bmax = -1; // Dtype bmin = 9999; // for (int p = i * 2; p < (i + 1) * 2; p++){ // if (std::isnan(posbox[p])) // printf("pos error!\n"); // } // cudaMemcpy(map, &bottom[0]->gpu_data()[b*channel*width*height + width* height * i], sizeof(Dtype) * width * height, cudaMemcpyDeviceToHost); // for (int j = 0; j < width * height; j++){ // if (bmax < map[j]) bmax = map[j]; // if (bmin > map[j]) bmin = map[j]; // } // for (int j = 0; j < width * height; j++){ // img.at<uchar>(s_row + j / width, s_col + j%width) = (uchar)((map[j] - bmin) / (bmax - bmin) * 255.f); // img.at<uchar>(s_row + j / width, s_col + j%width + width) = (uchar)0; // } // //수직 라인 // cv::line(img, cv::Point(s_col, s_row), cv::Point(s_col, s_row + height), cv::Scalar(255)); // cv::line(img, cv::Point(s_col + width, s_row), cv::Point(s_col + width, s_row + height), cv::Scalar(255)); // //수평 라인 // cv::line(img, cv::Point(s_col, s_row), cv::Point(s_col + 2 * width, s_row), cv::Scalar(255)); // cv::circle(img, cv::Point((s_row + (int)(posbox[2 * i + 1] * height)), s_col + width + (int)(posbox[2 * i] * width)), 2, cv::Scalar(255), -1); // //cv::imshow(buf, img); // //cv::waitKey(0); // } // cv::imshow(buf, img); //cv::imwrite("ppp.bmp", img); // cv::waitKey(0); // } // ///////////////////////////////////////////////////////////////////////////////////////////////// } template <typename Dtype> void SpatialPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { //pooling layer에서 따옴 return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int bottom_height = bottom[0]->shape()[2]; const int bottom_width = bottom[0]->shape()[3]; const int count = bottom[0]->count(); const int batchSize = bottom[0]->shape()[0]; const int nChannels = bottom[0]->shape()[1]; SpatialPoolingBackward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, top_diff, batchSize, nChannels, bottom_height, bottom_width, bottom_diff); //pooling backward bottom_diff = bottom[0]->mutable_gpu_diff(); pooling_backward<Dtype> << <CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS >> >( count, bottom_width, bottom_height, index_.gpu_data(), bottom[0]->gpu_data(), bottom_diff); } INSTANTIATE_LAYER_GPU_FUNCS(SpatialPoolingLayer); } // namespace caffe
96e91dfd7eb070f96044a418e237ba736d903d6c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "roi_align_impl.cuh" #include "util.cuh" #include "runtime/device/gpu/cuda_common.h" inline __device__ int roi_cast_int(float x) { return __float2int_rd(x); } inline __device__ int roi_cast_int(half x) { return __half2int_rd(x); } inline __device__ int roi_round_int(float x) { return __float2int_rn(x + 0.00007); } inline __device__ int roi_round_int(half x) { return __half2int_rn(x + static_cast<half>(0.00007)); } template <typename T> __device__ void bilinear_interpolate(const int height, const int width, T y, T x, int *x_low, int *y_low, int *x_high, int *y_high, T *w1, T *w2, T *w3, T *w4) { // return 0 if out of map boundary constexpr float eps = 0.00007; if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) { *w1 = *w2 = *w3 = *w4 = 0; *x_low = *x_high = *y_low = *y_high = -1; return; } // low bounder is at least zero y = y <= static_cast<T>(.0) ? static_cast<T>(.0) : y; x = x <= static_cast<T>(.0) ? static_cast<T>(.0) : x; // top left point *y_low = (y <= static_cast<T>(eps) ? 0 : roi_cast_int(y)); *x_low = (x <= static_cast<T>(eps) ? 0 : roi_cast_int(x)); // bottom right point if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } // distance to nearest points T lx, ly, hx, hy; ly = y - static_cast<T>(*y_low), lx = x - static_cast<T>(*x_low); hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx; // weight is evaluated by the distance to point away. // the closer to point home, the more weight, the farther to point away. *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> __device__ void bin_box(int thread_idx, const T *roi_boxes, int roi_cols, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int *offset, int *n, int *c, int *ph, int *pw, int *roi_bin_grid_h, int *roi_bin_grid_w, T *bin_size_h, T *bin_size_w, T *roi_start_h, T *roi_start_w) { // (n, c, ph, pw) is the base param of pooled map *pw = thread_idx % pooled_width; *ph = (thread_idx / pooled_width) % pooled_height; *c = (thread_idx / pooled_width / pooled_height) % channels; *n = thread_idx / pooled_width / pooled_height / channels; // Roi has // 1. 4 points, or // 2. indicator + 4 points (1 + 4) const T *roi_box = roi_boxes + (*n) * roi_cols; int roi_batch_ind = 0; if (roi_cols == 5) { roi_batch_ind = roi_round_int(roi_box[0]); roi_box++; } // Scale and shift ROI *roi_start_w = roi_box[0] * spatial_scale; *roi_start_h = roi_box[1] * spatial_scale; T roi_end_w = (roi_box[2] + static_cast<T>(roi_end_mode)) * spatial_scale; T roi_end_h = (roi_box[3] + static_cast<T>(roi_end_mode)) * spatial_scale; // New ROI height/width T roi_width = roi_end_w - (*roi_start_w); T roi_height = roi_end_h - (*roi_start_h); if (roi_end_mode == 0) { // backward compatibility // Force malformed ROIs to be 1x1 roi_width = roi_width > static_cast<T>(1.0) ? roi_width : static_cast<T>(1.0); roi_height = roi_height > static_cast<T>(1.0) ? roi_height : static_cast<T>(1.0); } // ratio of roi / pooled *bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); *bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); *offset = (roi_batch_ind * channels + (*c)) * height * width; // grid (int) by Sample ratio if defined, otherwise by pooled H/W *roi_bin_grid_h = (sample_num > 0) ? sample_num : roi_cast_int(roi_height / static_cast<T>(pooled_height)); *roi_bin_grid_w = (sample_num > 0) ? sample_num : roi_cast_int(roi_width / static_cast<T>(pooled_width)); return; } template <typename T> __global__ void ROIAlignKernel(size_t size, const T *input, const T *roi_boxes, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int n = thread_idx / pooled_width / pooled_height / channels; const T *roi_box = roi_boxes + n * roi_cols; // Skip if roi box is a line if (roi_box[1] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001) && roi_box[1] > static_cast<T>(-0.001) && roi_box[3] > static_cast<T>(-0.001)) { continue; } int offset = -1; int c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; T accumulate_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0 && y_low < height && y_high < height && x_low < width && x_high < width) { T v1 = input[offset + y_low * width + x_low]; T v2 = input[offset + y_low * width + x_high]; T v3 = input[offset + y_high * width + x_low]; T v4 = input[offset + y_high * width + x_high]; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); accumulate_val += val; } } } accumulate_val /= count_points_in_grid_cell; out_data[thread_idx] = accumulate_val; } } template <typename T> void ROIAlign(const T *x, const T *roi_boxes, int roi_rows, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream) { size_t size = roi_rows * channels * pooled_height * pooled_width; hipLaunchKernelGGL(( ROIAlignKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, x, roi_boxes, roi_cols, out_data, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlign<float>(const float *x, const float *roi_boxes, int roi_rows, int roi_cols, float *out_data, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream); template void ROIAlign<half>(const half *x, const half *roi_boxes, int roi_rows, int roi_cols, half *out_data, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream); template <typename T> __global__ void ROIAlignGradInitKernel(size_t size_init, T *dx) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size_init; thread_idx += blockDim.x * gridDim.x) { dx[thread_idx] = static_cast<T>(.0); } } template <typename T> __global__ void ROIAlignGradKernel(size_t size, const T *dy, const T *roi_boxes, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int n = thread_idx / pooled_width / pooled_height / channels; const T *roi_box = roi_boxes + n * roi_cols; if (roi_box[1] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001) && roi_box[1] > static_cast<T>(-0.001) && roi_box[3] > static_cast<T>(-0.001)) { continue; } int offset = -1; int c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T *offset_top_diff = dy + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0 && y_low < height && y_high < height && x_low < width && x_high < width) { T g1 = top_diff_this_bin * w1 / count_points_in_grid_cell; T g2 = top_diff_this_bin * w2 / count_points_in_grid_cell; T g3 = top_diff_this_bin * w3 / count_points_in_grid_cell; T g4 = top_diff_this_bin * w4 / count_points_in_grid_cell; T *dx_1 = dx + offset + y_low * width + x_low; T *dx_2 = dx + offset + y_low * width + x_high; T *dx_3 = dx + offset + y_high * width + x_low; T *dx_4 = dx + offset + y_high * width + x_high; MsAtomicAdd(dx_1, g1); MsAtomicAdd(dx_2, g2); MsAtomicAdd(dx_3, g3); MsAtomicAdd(dx_4, g4); } } } } } template <typename T> void ROIAlignGrad(const T *dy, const T *roi_boxes, int batch_size, int roi_rows, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream) { size_t size_init = batch_size * channels * height * width; hipLaunchKernelGGL(( ROIAlignGradInitKernel), dim3(GET_BLOCKS(size_init)), dim3(GET_THREADS), 0, cuda_stream, size_init, dx); size_t size = roi_rows * channels * pooled_height * pooled_width; hipLaunchKernelGGL(( ROIAlignGradKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, dy, roi_boxes, roi_cols, dx, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlignGrad<float>(const float *dy, const float *roi_boxes, int batch_size, int roi_rows, int roi_cols, float *dx, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream); template void ROIAlignGrad<half>(const half *dy, const half *roi_boxes, int batch_size, int roi_rows, int roi_cols, half *dx, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream);
96e91dfd7eb070f96044a418e237ba736d903d6c.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "roi_align_impl.cuh" #include "util.cuh" #include "runtime/device/gpu/cuda_common.h" inline __device__ int roi_cast_int(float x) { return __float2int_rd(x); } inline __device__ int roi_cast_int(half x) { return __half2int_rd(x); } inline __device__ int roi_round_int(float x) { return __float2int_rn(x + 0.00007); } inline __device__ int roi_round_int(half x) { return __half2int_rn(x + static_cast<half>(0.00007)); } template <typename T> __device__ void bilinear_interpolate(const int height, const int width, T y, T x, int *x_low, int *y_low, int *x_high, int *y_high, T *w1, T *w2, T *w3, T *w4) { // return 0 if out of map boundary constexpr float eps = 0.00007; if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) { *w1 = *w2 = *w3 = *w4 = 0; *x_low = *x_high = *y_low = *y_high = -1; return; } // low bounder is at least zero y = y <= static_cast<T>(.0) ? static_cast<T>(.0) : y; x = x <= static_cast<T>(.0) ? static_cast<T>(.0) : x; // top left point *y_low = (y <= static_cast<T>(eps) ? 0 : roi_cast_int(y)); *x_low = (x <= static_cast<T>(eps) ? 0 : roi_cast_int(x)); // bottom right point if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } // distance to nearest points T lx, ly, hx, hy; ly = y - static_cast<T>(*y_low), lx = x - static_cast<T>(*x_low); hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx; // weight is evaluated by the distance to point away. // the closer to point home, the more weight, the farther to point away. *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> __device__ void bin_box(int thread_idx, const T *roi_boxes, int roi_cols, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int *offset, int *n, int *c, int *ph, int *pw, int *roi_bin_grid_h, int *roi_bin_grid_w, T *bin_size_h, T *bin_size_w, T *roi_start_h, T *roi_start_w) { // (n, c, ph, pw) is the base param of pooled map *pw = thread_idx % pooled_width; *ph = (thread_idx / pooled_width) % pooled_height; *c = (thread_idx / pooled_width / pooled_height) % channels; *n = thread_idx / pooled_width / pooled_height / channels; // Roi has // 1. 4 points, or // 2. indicator + 4 points (1 + 4) const T *roi_box = roi_boxes + (*n) * roi_cols; int roi_batch_ind = 0; if (roi_cols == 5) { roi_batch_ind = roi_round_int(roi_box[0]); roi_box++; } // Scale and shift ROI *roi_start_w = roi_box[0] * spatial_scale; *roi_start_h = roi_box[1] * spatial_scale; T roi_end_w = (roi_box[2] + static_cast<T>(roi_end_mode)) * spatial_scale; T roi_end_h = (roi_box[3] + static_cast<T>(roi_end_mode)) * spatial_scale; // New ROI height/width T roi_width = roi_end_w - (*roi_start_w); T roi_height = roi_end_h - (*roi_start_h); if (roi_end_mode == 0) { // backward compatibility // Force malformed ROIs to be 1x1 roi_width = roi_width > static_cast<T>(1.0) ? roi_width : static_cast<T>(1.0); roi_height = roi_height > static_cast<T>(1.0) ? roi_height : static_cast<T>(1.0); } // ratio of roi / pooled *bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); *bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); *offset = (roi_batch_ind * channels + (*c)) * height * width; // grid (int) by Sample ratio if defined, otherwise by pooled H/W *roi_bin_grid_h = (sample_num > 0) ? sample_num : roi_cast_int(roi_height / static_cast<T>(pooled_height)); *roi_bin_grid_w = (sample_num > 0) ? sample_num : roi_cast_int(roi_width / static_cast<T>(pooled_width)); return; } template <typename T> __global__ void ROIAlignKernel(size_t size, const T *input, const T *roi_boxes, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int n = thread_idx / pooled_width / pooled_height / channels; const T *roi_box = roi_boxes + n * roi_cols; // Skip if roi box is a line if (roi_box[1] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001) && roi_box[1] > static_cast<T>(-0.001) && roi_box[3] > static_cast<T>(-0.001)) { continue; } int offset = -1; int c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; T accumulate_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0 && y_low < height && y_high < height && x_low < width && x_high < width) { T v1 = input[offset + y_low * width + x_low]; T v2 = input[offset + y_low * width + x_high]; T v3 = input[offset + y_high * width + x_low]; T v4 = input[offset + y_high * width + x_high]; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); accumulate_val += val; } } } accumulate_val /= count_points_in_grid_cell; out_data[thread_idx] = accumulate_val; } } template <typename T> void ROIAlign(const T *x, const T *roi_boxes, int roi_rows, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream) { size_t size = roi_rows * channels * pooled_height * pooled_width; ROIAlignKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, x, roi_boxes, roi_cols, out_data, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlign<float>(const float *x, const float *roi_boxes, int roi_rows, int roi_cols, float *out_data, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream); template void ROIAlign<half>(const half *x, const half *roi_boxes, int roi_rows, int roi_cols, half *out_data, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream); template <typename T> __global__ void ROIAlignGradInitKernel(size_t size_init, T *dx) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size_init; thread_idx += blockDim.x * gridDim.x) { dx[thread_idx] = static_cast<T>(.0); } } template <typename T> __global__ void ROIAlignGradKernel(size_t size, const T *dy, const T *roi_boxes, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int n = thread_idx / pooled_width / pooled_height / channels; const T *roi_box = roi_boxes + n * roi_cols; if (roi_box[1] < static_cast<T>(0.001) && roi_box[3] < static_cast<T>(0.001) && roi_box[1] > static_cast<T>(-0.001) && roi_box[3] > static_cast<T>(-0.001)) { continue; } int offset = -1; int c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T *offset_top_diff = dy + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0 && y_low < height && y_high < height && x_low < width && x_high < width) { T g1 = top_diff_this_bin * w1 / count_points_in_grid_cell; T g2 = top_diff_this_bin * w2 / count_points_in_grid_cell; T g3 = top_diff_this_bin * w3 / count_points_in_grid_cell; T g4 = top_diff_this_bin * w4 / count_points_in_grid_cell; T *dx_1 = dx + offset + y_low * width + x_low; T *dx_2 = dx + offset + y_low * width + x_high; T *dx_3 = dx + offset + y_high * width + x_low; T *dx_4 = dx + offset + y_high * width + x_high; MsAtomicAdd(dx_1, g1); MsAtomicAdd(dx_2, g2); MsAtomicAdd(dx_3, g3); MsAtomicAdd(dx_4, g4); } } } } } template <typename T> void ROIAlignGrad(const T *dy, const T *roi_boxes, int batch_size, int roi_rows, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream) { size_t size_init = batch_size * channels * height * width; ROIAlignGradInitKernel<<<GET_BLOCKS(size_init), GET_THREADS, 0, cuda_stream>>>(size_init, dx); size_t size = roi_rows * channels * pooled_height * pooled_width; ROIAlignGradKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>( size, dy, roi_boxes, roi_cols, dx, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlignGrad<float>(const float *dy, const float *roi_boxes, int batch_size, int roi_rows, int roi_cols, float *dx, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream); template void ROIAlignGrad<half>(const half *dy, const half *roi_boxes, int batch_size, int roi_rows, int roi_cols, half *dx, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream);
205164efc3beca5a3404a99f24b987ba83be0fad.hip
// !!! This is a file automatically generated by hipify!!! #include "simple_yolo.hpp" #include <NvInfer.h> #include <NvOnnxParser.h> #include <hip/hip_runtime.h> #include <algorithm> #include <fstream> #include <memory> #include <string> #include <future> #include <condition_variable> #include <mutex> #include <thread> #include <queue> #if defined(_WIN32) # include <Windows.h> # include <wingdi.h> # include <Shlwapi.h> # pragma comment(lib, "shlwapi.lib") # undef min # undef max #else # include <dirent.h> # include <sys/types.h> # include <sys/stat.h> # include <unistd.h> # include <stdarg.h> #endif namespace SimpleYolo{ using namespace nvinfer1; using namespace std; using namespace cv; #define CURRENT_DEVICE_ID -1 #define GPU_BLOCK_THREADS 512 #define KernelPositionBlock \ int position = (blockDim.x * blockIdx.x + threadIdx.x); \ if (position >= (edge)) return; #define checkCudaRuntime(call) check_runtime(call, #call, __LINE__, __FILE__) static bool check_runtime(hipError_t e, const char* call, int line, const char *file); #define checkCudaKernel(...) \ __VA_ARGS__; \ do{hipError_t cudaStatus = hipPeekAtLastError(); \ if (cudaStatus != hipSuccess){ \ INFOE("launch failed: %s", hipGetErrorString(cudaStatus)); \ }} while(0); #define Assert(op) \ do{ \ bool cond = !(!(op)); \ if(!cond){ \ INFOF("Assert failed, " #op); \ } \ }while(false) /* level */ #define CURRENT_LOG_LEVEL LogLevel::Info #define INFOD(...) __log_func(__FILE__, __LINE__, LogLevel::Debug, __VA_ARGS__) #define INFOV(...) __log_func(__FILE__, __LINE__, LogLevel::Verbose, __VA_ARGS__) #define INFO(...) __log_func(__FILE__, __LINE__, LogLevel::Info, __VA_ARGS__) #define INFOW(...) __log_func(__FILE__, __LINE__, LogLevel::Warning, __VA_ARGS__) #define INFOE(...) __log_func(__FILE__, __LINE__, LogLevel::Error, __VA_ARGS__) #define INFOF(...) __log_func(__FILE__, __LINE__, LogLevel::Fatal, __VA_ARGS__) enum class NormType : int{ None = 0, MeanStd = 1, AlphaBeta = 2 }; enum class ChannelType : int{ None = 0, SwapRB = 1 }; /* alpha betaswap RB */ struct Norm{ float mean[3]; float std[3]; float alpha, beta; NormType type = NormType::None; ChannelType channel_type = ChannelType::None; // out = (x * alpha - mean) / std static Norm mean_std(const float mean[3], const float std[3], float alpha = 1/255.0f, ChannelType channel_type=ChannelType::None); // out = x * alpha + beta static Norm alpha_beta(float alpha, float beta = 0, ChannelType channel_type=ChannelType::None); // None static Norm None(); }; Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){ Norm out; out.type = NormType::MeanStd; out.alpha = alpha; out.channel_type = channel_type; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta, ChannelType channel_type){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; out.channel_type = channel_type; return out; } Norm Norm::None(){ return Norm(); } /* gpuidgpuid */ class AutoDevice{ public: AutoDevice(int device_id = 0){ hipGetDevice(&old_); if(old_ != device_id && device_id != -1) checkCudaRuntime(hipSetDevice(device_id)); } virtual ~AutoDevice(){ if(old_ != -1) checkCudaRuntime(hipSetDevice(old_)); } private: int old_ = -1; }; enum class LogLevel : int{ Debug = 5, Verbose = 4, Info = 3, Warning = 2, Error = 1, Fatal = 0 }; static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...); inline int upbound(int n, int align = 32){return (n + align - 1) / align * align;} static bool check_runtime(hipError_t e, const char* call, int line, const char *file){ if (e != hipSuccess) { INFOE("CUDA Runtime error %s # %s, code = %s [ %d ] in file %s:%d", call, hipGetErrorString(e), hipGetErrorName(e), e, file, line); return false; } return true; } #define TRT_STR(v) #v #define TRT_VERSION_STRING(major, minor, patch, build) TRT_STR(major) "." TRT_STR(minor) "." TRT_STR(patch) "." TRT_STR(build) const char* trt_version(){ return TRT_VERSION_STRING(NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, NV_TENSORRT_BUILD); } static bool check_device_id(int device_id){ int device_count = -1; checkCudaRuntime(hipGetDeviceCount(&device_count)); if(device_id < 0 || device_id >= device_count){ INFOE("Invalid device id: %d, count = %d", device_id, device_count); return false; } return true; } static bool exists(const string& path){ #ifdef _WIN32 return ::PathFileExistsA(path.c_str()); #else return access(path.c_str(), R_OK) == 0; #endif } static const char* level_string(LogLevel level){ switch (level){ case LogLevel::Debug: return "debug"; case LogLevel::Verbose: return "verbo"; case LogLevel::Info: return "info"; case LogLevel::Warning: return "warn"; case LogLevel::Error: return "error"; case LogLevel::Fatal: return "fatal"; default: return "unknow"; } } template<typename _T> static string join_dims(const vector<_T>& dims){ stringstream output; char buf[64]; const char* fmts[] = {"%d", " x %d"}; for(int i = 0; i < dims.size(); ++i){ snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]); output << buf; } return output.str(); } static bool save_file(const string& file, const void* data, size_t length){ FILE* f = fopen(file.c_str(), "wb"); if (!f) return false; if (data and length > 0){ if (fwrite(data, 1, length, f) not_eq length){ fclose(f); return false; } } fclose(f); return true; } static bool save_file(const string& file, const vector<uint8_t>& data){ return save_file(file, data.data(), data.size()); } static string file_name(const string& path, bool include_suffix){ if (path.empty()) return ""; int p = path.rfind('/'); #ifdef U_OS_WINDOWS int e = path.rfind('\\'); p = ::max(p, e); #endif p += 1; //include suffix if (include_suffix) return path.substr(p); int u = path.rfind('.'); if (u == -1) return path.substr(p); if (u <= p) u = path.size(); return path.substr(p, u - p); } vector<string> glob_image_files(const string& directory){ /* "*.jpg;*.png;*.bmp;*.jpeg;*.tiff" */ vector<string> files, output; set<string> pattern_set{"jpg", "png", "bmp", "jpeg", "tiff"}; if(directory.empty()){ INFOE("Glob images from folder failed, folder is empty"); return output; } try{ cv::glob(directory + "/*", files, true); }catch(...){ INFOE("Glob %s failed", directory.c_str()); return output; } for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; int p = file.rfind("."); if(p == -1) continue; auto suffix = file.substr(p+1); std::transform(suffix.begin(), suffix.end(), suffix.begin(), [](char c){ if(c >= 'A' && c <= 'Z') c -= 'A' + 'a'; return c; }); if(pattern_set.find(suffix) != pattern_set.end()) output.push_back(file); } return output; } static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...){ if(level > CURRENT_LOG_LEVEL) return; va_list vl; va_start(vl, fmt); char buffer[2048]; string filename = file_name(file, true); int n = snprintf(buffer, sizeof(buffer), "[%s][%s:%d]:", level_string(level), filename.c_str(), line); vsnprintf(buffer + n, sizeof(buffer) - n, fmt, vl); fprintf(stdout, "%s\n", buffer); if (level == LogLevel::Fatal) { fflush(stdout); abort(); } } static dim3 grid_dims(int numJobs) { int numBlockThreads = numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; return dim3(((numJobs + numBlockThreads - 1) / (float)numBlockThreads)); } static dim3 block_dims(int numJobs) { return numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; } static int get_device(int device_id){ if(device_id != CURRENT_DEVICE_ID){ check_device_id(device_id); return device_id; } checkCudaRuntime(hipGetDevice(&device_id)); return device_id; } void set_device(int device_id) { if (device_id == -1) return; checkCudaRuntime(hipSetDevice(device_id)); } /////////////////////////////CUDA kernels//////////////////////////////////////////////// const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float* invert_affine_matrix, float* parray, int max_objects){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + (5 + num_classes) * position; float objectness = pitem[4]; if(objectness < confidence_threshold) return; float* class_confidence = pitem + 5; float confidence = *class_confidence++; int label = 0; for(int i = 1; i < num_classes; ++i, ++class_confidence){ if(*class_confidence > confidence){ confidence = *class_confidence; label = i; } } confidence *= objectness; if(confidence < confidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float cx = *pitem++; float cy = *pitem++; float width = *pitem++; float height = *pitem++; float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = confidence; *pout_item++ = label; *pout_item++ = 1; // 1 = keep, 0 = ignore } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void fast_nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; // left, top, right, bottom, confidence, class, keepflag float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; if(pitem[4] >= pcurrent[4]){ if(pitem[4] == pcurrent[4] && i < position) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ pcurrent[6] = 0; // 1=keep, 0=ignore return; } } } } static void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, hipStream_t stream){ auto grid = grid_dims(num_bboxes); auto block = block_dims(num_bboxes); /* */ hipLaunchKernelGGL(( checkCudaKernel(decode_kernel), dim3(grid), dim3(block), 0, stream, predict, num_bboxes, num_classes, confidence_threshold, invert_affine_matrix, parray, max_objects)); grid = grid_dims(max_objects); block = block_dims(max_objects); hipLaunchKernelGGL(( checkCudaKernel(fast_nms_kernel), dim3(grid), dim3(block), 0, stream, parray, max_objects, nms_threshold)); } static __global__ void warp_affine_bilinear_and_normalize_plane_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = m_x1 * dx + m_y1 * dy + m_z1; float src_y = m_x2 * dx + m_y2 * dy + m_z2; float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::SwapRB){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } static void warp_affine_bilinear_and_normalize_plane( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, hipStream_t stream) { int jobs = dst_width * dst_height; auto grid = grid_dims(jobs); auto block = block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_plane_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } //////////////////////////////class MixMemory///////////////////////////////////////////////// /* gpu/cpu gpucpu cpupinned memorygpu cudaMallocHostcuda context */ class MixMemory { public: MixMemory(int device_id = CURRENT_DEVICE_ID); MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); virtual ~MixMemory(); void* gpu(size_t size); void* cpu(size_t size); void release_gpu(); void release_cpu(); void release_all(); inline bool owner_gpu() const{return owner_gpu_;} inline bool owner_cpu() const{return owner_cpu_;} inline size_t cpu_size() const{return cpu_size_;} inline size_t gpu_size() const{return gpu_size_;} inline int device_id() const{return device_id_;} inline void* gpu() const { return gpu_; } // Pinned Memory inline void* cpu() const { return cpu_; } void reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); private: void* cpu_ = nullptr; size_t cpu_size_ = 0; bool owner_cpu_ = true; int device_id_ = 0; void* gpu_ = nullptr; size_t gpu_size_ = 0; bool owner_gpu_ = true; }; MixMemory::MixMemory(int device_id){ device_id_ = get_device(device_id); } MixMemory::MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ reference_data(cpu, cpu_size, gpu, gpu_size); } void MixMemory::reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ release_all(); if(cpu == nullptr || cpu_size == 0){ cpu = nullptr; cpu_size = 0; } if(gpu == nullptr || gpu_size == 0){ gpu = nullptr; gpu_size = 0; } this->cpu_ = cpu; this->cpu_size_ = cpu_size; this->gpu_ = gpu; this->gpu_size_ = gpu_size; this->owner_cpu_ = !(cpu && cpu_size > 0); this->owner_gpu_ = !(gpu && gpu_size > 0); checkCudaRuntime(hipGetDevice(&device_id_)); } MixMemory::~MixMemory() { release_all(); } void* MixMemory::gpu(size_t size) { if (gpu_size_ < size) { release_gpu(); gpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipMalloc(&gpu_, size)); checkCudaRuntime(hipMemset(gpu_, 0, size)); } return gpu_; } void* MixMemory::cpu(size_t size) { if (cpu_size_ < size) { release_cpu(); cpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipHostMalloc(&cpu_, size)); Assert(cpu_ != nullptr); memset(cpu_, 0, size); } return cpu_; } void MixMemory::release_cpu() { if (cpu_) { if(owner_cpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipHostFree(cpu_)); } cpu_ = nullptr; } cpu_size_ = 0; } void MixMemory::release_gpu() { if (gpu_) { if(owner_gpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(hipFree(gpu_)); } gpu_ = nullptr; } gpu_size_ = 0; } void MixMemory::release_all() { release_cpu(); release_gpu(); } /////////////////////////////////class Tensor//////////////////////////////////////////////// /* Tensor NN save_to_filepython */ enum class DataHead : int{ Init = 0, Device = 1, Host = 2 }; class Tensor { public: Tensor(const Tensor& other) = delete; Tensor& operator = (const Tensor& other) = delete; explicit Tensor(std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int n, int c, int h, int w, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int ndims, const int* dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(const std::vector<int>& dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); virtual ~Tensor(); int numel() const; inline int ndims() const{return shape_.size();} inline int size(int index) const{return shape_[index];} inline int shape(int index) const{return shape_[index];} inline int batch() const{return shape_[0];} inline int channel() const{return shape_[1];} inline int height() const{return shape_[2];} inline int width() const{return shape_[3];} inline const std::vector<int>& dims() const { return shape_; } inline int bytes() const { return bytes_; } inline int bytes(int start_axis) const { return count(start_axis) * element_size(); } inline int element_size() const { return sizeof(float); } inline DataHead head() const { return head_; } std::shared_ptr<Tensor> clone() const; Tensor& release(); Tensor& set_to(float value); bool empty() const; template<typename ... _Args> int offset(int index, _Args ... index_args) const{ const int index_array[] = {index, index_args...}; return offset_array(sizeof...(index_args) + 1, index_array); } int offset_array(const std::vector<int>& index) const; int offset_array(size_t size, const int* index_array) const; template<typename ... _Args> Tensor& resize(int dim_size, _Args ... dim_size_args){ const int dim_size_array[] = {dim_size, dim_size_args...}; return resize(sizeof...(dim_size_args) + 1, dim_size_array); } Tensor& resize(int ndims, const int* dims); Tensor& resize(const std::vector<int>& dims); Tensor& resize_single_dim(int idim, int size); int count(int start_axis = 0) const; int device() const{return device_id_;} Tensor& to_gpu(bool copy=true); Tensor& to_cpu(bool copy=true); inline void* cpu() const { ((Tensor*)this)->to_cpu(); return data_->cpu(); } inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); } template<typename DType> inline const DType* cpu() const { return (DType*)cpu(); } template<typename DType> inline DType* cpu() { return (DType*)cpu(); } template<typename DType, typename ... _Args> inline DType* cpu(int i, _Args&& ... args) { return cpu<DType>() + offset(i, args...); } template<typename DType> inline const DType* gpu() const { return (DType*)gpu(); } template<typename DType> inline DType* gpu() { return (DType*)gpu(); } template<typename DType, typename ... _Args> inline DType* gpu(int i, _Args&& ... args) { return gpu<DType>() + offset(i, args...); } template<typename DType, typename ... _Args> inline DType& at(int i, _Args&& ... args) { return *(cpu<DType>() + offset(i, args...)); } std::shared_ptr<MixMemory> get_data() const {return data_;} std::shared_ptr<MixMemory> get_workspace() const {return workspace_;} Tensor& set_workspace(std::shared_ptr<MixMemory> workspace) {workspace_ = workspace; return *this;} hipStream_t get_stream() const{return stream_;} Tensor& set_stream(hipStream_t stream){stream_ = stream; return *this;} Tensor& set_mat (int n, const cv::Mat& image); Tensor& set_norm_mat(int n, const cv::Mat& image, float mean[3], float std[3]); cv::Mat at_mat(int n = 0, int c = 0) { return cv::Mat(height(), width(), CV_32F, cpu<float>(n, c)); } Tensor& synchronize(); const char* shape_string() const{return shape_string_;} const char* descriptor() const; Tensor& copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id = CURRENT_DEVICE_ID); /** # pythonTensor import numpy as np def load_tensor(file): with open(file, "rb") as f: binary_data = f.read() magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0) assert magic_number == 0xFCCFE2E2, f"{file} not a tensor file." dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4) if dtype == 0: np_dtype = np.float32 elif dtype == 1: np_dtype = np.float16 else: assert False, f"Unsupport dtype = {dtype}, can not convert to numpy dtype" return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims) **/ bool save_to_file(const std::string& file) const; private: Tensor& compute_shape_string(); Tensor& adajust_memory_by_update_dims_or_type(); void setup_data(std::shared_ptr<MixMemory> data); private: std::vector<int> shape_; size_t bytes_ = 0; DataHead head_ = DataHead::Init; hipStream_t stream_ = nullptr; int device_id_ = 0; char shape_string_[100]; char descriptor_string_[100]; std::shared_ptr<MixMemory> data_; std::shared_ptr<MixMemory> workspace_; }; Tensor::Tensor(int n, int c, int h, int w, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(n, c, h, w); } Tensor::Tensor(const std::vector<int>& dims, shared_ptr<MixMemory> data, int device_id){ this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(dims); } Tensor::Tensor(int ndims, const int* dims, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(ndims, dims); } Tensor::Tensor(shared_ptr<MixMemory> data, int device_id){ shape_string_[0] = 0; descriptor_string_[0] = 0; this->device_id_ = get_device(device_id); setup_data(data); } Tensor::~Tensor() { release(); } const char* Tensor::descriptor() const{ char* descriptor_ptr = (char*)descriptor_string_; int device_id = device(); snprintf(descriptor_ptr, sizeof(descriptor_string_), "Tensor:%p, %s, CUDA:%d", data_.get(), shape_string_, device_id ); return descriptor_ptr; } Tensor& Tensor::compute_shape_string(){ // clean string shape_string_[0] = 0; char* buffer = shape_string_; size_t buffer_size = sizeof(shape_string_); for(int i = 0; i < shape_.size(); ++i){ int size = 0; if(i < shape_.size() - 1) size = snprintf(buffer, buffer_size, "%d x ", shape_[i]); else size = snprintf(buffer, buffer_size, "%d", shape_[i]); buffer += size; buffer_size -= size; } return *this; } void Tensor::setup_data(shared_ptr<MixMemory> data){ data_ = data; if(data_ == nullptr){ data_ = make_shared<MixMemory>(device_id_); }else{ device_id_ = data_->device_id(); } head_ = DataHead::Init; if(data_->cpu()){ head_ = DataHead::Host; } if(data_->gpu()){ head_ = DataHead::Device; } } Tensor& Tensor::copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id){ if(head_ == DataHead::Init) to_gpu(false); size_t offset_location = offset * element_size(); if(offset_location >= bytes_){ INFOE("Offset location[%lld] >= bytes_[%lld], out of range", offset_location, bytes_); return *this; } size_t copyed_bytes = num_element * element_size(); size_t remain_bytes = bytes_ - offset_location; if(copyed_bytes > remain_bytes){ INFOE("Copyed bytes[%lld] > remain bytes[%lld], out of range", copyed_bytes, remain_bytes); return *this; } if(head_ == DataHead::Device){ int current_device_id = get_device(device_id); int gpu_device_id = device(); if(current_device_id != gpu_device_id){ checkCudaRuntime(hipMemcpyPeerAsync(gpu<unsigned char>() + offset_location, gpu_device_id, src, current_device_id, copyed_bytes, stream_)); //checkCudaRuntime(hipMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, hipMemcpyDeviceToDevice, stream_)); } else{ checkCudaRuntime(hipMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, hipMemcpyDeviceToDevice, stream_)); } }else if(head_ == DataHead::Host){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipMemcpyAsync(cpu<unsigned char>() + offset_location, src, copyed_bytes, hipMemcpyDeviceToHost, stream_)); }else{ INFOE("Unsupport head type %d", head_); } return *this; } Tensor& Tensor::release() { data_->release_all(); shape_.clear(); bytes_ = 0; head_ = DataHead::Init; return *this; } bool Tensor::empty() const{ return data_->cpu() == nullptr && data_->gpu() == nullptr; } int Tensor::count(int start_axis) const { if(start_axis >= 0 && start_axis < shape_.size()){ int size = 1; for (int i = start_axis; i < shape_.size(); ++i) size *= shape_[i]; return size; }else{ return 0; } } Tensor& Tensor::resize(const std::vector<int>& dims) { return resize(dims.size(), dims.data()); } int Tensor::numel() const{ int value = shape_.empty() ? 0 : 1; for(int i = 0; i < shape_.size(); ++i){ value *= shape_[i]; } return value; } Tensor& Tensor::resize_single_dim(int idim, int size){ Assert(idim >= 0 && idim < shape_.size()); auto new_shape = shape_; new_shape[idim] = size; return resize(new_shape); } Tensor& Tensor::resize(int ndims, const int* dims) { vector<int> setup_dims(ndims); for(int i = 0; i < ndims; ++i){ int dim = dims[i]; if(dim == -1){ Assert(ndims == shape_.size()); dim = shape_[i]; } setup_dims[i] = dim; } this->shape_ = setup_dims; this->adajust_memory_by_update_dims_or_type(); this->compute_shape_string(); return *this; } Tensor& Tensor::adajust_memory_by_update_dims_or_type(){ int needed_size = this->numel() * element_size(); if(needed_size > this->bytes_){ head_ = DataHead::Init; } this->bytes_ = needed_size; return *this; } Tensor& Tensor::synchronize(){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipStreamSynchronize(stream_)); return *this; } Tensor& Tensor::to_gpu(bool copy) { if (head_ == DataHead::Device) return *this; head_ = DataHead::Device; data_->gpu(bytes_); if (copy && data_->cpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipMemcpyAsync(data_->gpu(), data_->cpu(), bytes_, hipMemcpyHostToDevice, stream_)); } return *this; } Tensor& Tensor::to_cpu(bool copy) { if (head_ == DataHead::Host) return *this; head_ = DataHead::Host; data_->cpu(bytes_); if (copy && data_->gpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(hipMemcpyAsync(data_->cpu(), data_->gpu(), bytes_, hipMemcpyDeviceToHost, stream_)); checkCudaRuntime(hipStreamSynchronize(stream_)); } return *this; } int Tensor::offset_array(size_t size, const int* index_array) const{ Assert(size <= shape_.size()); int value = 0; for(int i = 0; i < shape_.size(); ++i){ if(i < size) value += index_array[i]; if(i + 1 < shape_.size()) value *= shape_[i+1]; } return value; } int Tensor::offset_array(const std::vector<int>& index_array) const{ return offset_array(index_array.size(), index_array.data()); } bool Tensor::save_to_file(const std::string& file) const{ if(empty()) return false; FILE* f = fopen(file.c_str(), "wb"); if(f == nullptr) return false; int ndims = this->ndims(); int dtype_ = 0; unsigned int head[3] = {0xFCCFE2E2, ndims, static_cast<unsigned int>(dtype_)}; fwrite(head, 1, sizeof(head), f); fwrite(shape_.data(), 1, sizeof(shape_[0]) * shape_.size(), f); fwrite(cpu(), 1, bytes_, f); fclose(f); return true; } /////////////////////////////////class TRTInferImpl//////////////////////////////////////////////// class Logger : public ILogger { public: virtual void log(Severity severity, const char* msg) noexcept override { if (severity == Severity::kINTERNAL_ERROR) { INFOE("NVInfer INTERNAL_ERROR: %s", msg); abort(); }else if (severity == Severity::kERROR) { INFOE("NVInfer: %s", msg); } else if (severity == Severity::kWARNING) { INFOW("NVInfer: %s", msg); } else if (severity == Severity::kINFO) { INFOD("NVInfer: %s", msg); } else { INFOD("%s", msg); } } }; static Logger gLogger; template<typename _T> static void destroy_nvidia_pointer(_T* ptr) { if (ptr) ptr->destroy(); } class EngineContext { public: virtual ~EngineContext() { destroy(); } void set_stream(hipStream_t stream){ if(owner_stream_){ if (stream_) {hipStreamDestroy(stream_);} owner_stream_ = false; } stream_ = stream; } bool build_model(const void* pdata, size_t size) { destroy(); if(pdata == nullptr || size == 0) return false; owner_stream_ = true; checkCudaRuntime(hipStreamCreate(&stream_)); if(stream_ == nullptr) return false; runtime_ = shared_ptr<IRuntime>(createInferRuntime(gLogger), destroy_nvidia_pointer<IRuntime>); if (runtime_ == nullptr) return false; engine_ = shared_ptr<ICudaEngine>(runtime_->deserializeCudaEngine(pdata, size, nullptr), destroy_nvidia_pointer<ICudaEngine>); if (engine_ == nullptr) return false; //runtime_->setDLACore(0); context_ = shared_ptr<IExecutionContext>(engine_->createExecutionContext(), destroy_nvidia_pointer<IExecutionContext>); return context_ != nullptr; } private: void destroy() { context_.reset(); engine_.reset(); runtime_.reset(); if(owner_stream_){ if (stream_) {hipStreamDestroy(stream_);} } stream_ = nullptr; } public: hipStream_t stream_ = nullptr; bool owner_stream_ = false; shared_ptr<IExecutionContext> context_; shared_ptr<ICudaEngine> engine_; shared_ptr<IRuntime> runtime_ = nullptr; }; class TRTInferImpl{ public: virtual ~TRTInferImpl(); bool load(const std::string& file); bool load_from_memory(const void* pdata, size_t size); void destroy(); void forward(bool sync); int get_max_batch_size(); hipStream_t get_stream(); void set_stream(hipStream_t stream); void synchronize(); size_t get_device_memory_size(); std::shared_ptr<MixMemory> get_workspace(); std::shared_ptr<Tensor> input(int index = 0); std::string get_input_name(int index = 0); std::shared_ptr<Tensor> output(int index = 0); std::string get_output_name(int index = 0); std::shared_ptr<Tensor> tensor(const std::string& name); bool is_output_name(const std::string& name); bool is_input_name(const std::string& name); void set_input (int index, std::shared_ptr<Tensor> tensor); void set_output(int index, std::shared_ptr<Tensor> tensor); std::shared_ptr<std::vector<uint8_t>> serial_engine(); void print(); int num_output(); int num_input(); int device(); private: void build_engine_input_and_outputs_mapper(); private: std::vector<std::shared_ptr<Tensor>> inputs_; std::vector<std::shared_ptr<Tensor>> outputs_; std::vector<int> inputs_map_to_ordered_index_; std::vector<int> outputs_map_to_ordered_index_; std::vector<std::string> inputs_name_; std::vector<std::string> outputs_name_; std::vector<std::shared_ptr<Tensor>> orderdBlobs_; std::map<std::string, int> blobsNameMapper_; std::shared_ptr<EngineContext> context_; std::vector<void*> bindingsPtr_; std::shared_ptr<MixMemory> workspace_; int device_ = 0; }; //////////////////////////////////////////////////////////////////////////////////// TRTInferImpl::~TRTInferImpl(){ destroy(); } void TRTInferImpl::destroy() { int old_device = 0; checkCudaRuntime(hipGetDevice(&old_device)); checkCudaRuntime(hipSetDevice(device_)); this->context_.reset(); this->blobsNameMapper_.clear(); this->outputs_.clear(); this->inputs_.clear(); this->inputs_name_.clear(); this->outputs_name_.clear(); checkCudaRuntime(hipSetDevice(old_device)); } void TRTInferImpl::print(){ if(!context_){ INFOW("Infer print, nullptr."); return; } INFO("Infer %p detail", this); INFO("\tMax Batch Size: %d", this->get_max_batch_size()); INFO("\tInputs: %d", inputs_.size()); for(int i = 0; i < inputs_.size(); ++i){ auto& tensor = inputs_[i]; auto& name = inputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } INFO("\tOutputs: %d", outputs_.size()); for(int i = 0; i < outputs_.size(); ++i){ auto& tensor = outputs_[i]; auto& name = outputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } } std::shared_ptr<std::vector<uint8_t>> TRTInferImpl::serial_engine() { auto memory = this->context_->engine_->serialize(); auto output = make_shared<std::vector<uint8_t>>((uint8_t*)memory->data(), (uint8_t*)memory->data()+memory->size()); memory->destroy(); return output; } bool TRTInferImpl::load_from_memory(const void* pdata, size_t size) { if (pdata == nullptr || size == 0) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(pdata, size)) { context_.reset(); return false; } workspace_.reset(new MixMemory()); hipGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } static std::vector<uint8_t> load_file(const string& file){ ifstream in(file, ios::in | ios::binary); if (!in.is_open()) return {}; in.seekg(0, ios::end); size_t length = in.tellg(); std::vector<uint8_t> data; if (length > 0){ in.seekg(0, ios::beg); data.resize(length); in.read((char*)&data[0], length); } in.close(); return data; } bool TRTInferImpl::load(const std::string& file) { auto data = load_file(file); if (data.empty()) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(data.data(), data.size())) { context_.reset(); return false; } workspace_.reset(new MixMemory()); hipGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } size_t TRTInferImpl::get_device_memory_size() { EngineContext* context = (EngineContext*)this->context_.get(); return context->context_->getEngine().getDeviceMemorySize(); } void TRTInferImpl::build_engine_input_and_outputs_mapper() { EngineContext* context = (EngineContext*)this->context_.get(); int nbBindings = context->engine_->getNbBindings(); int max_batchsize = context->engine_->getMaxBatchSize(); inputs_.clear(); inputs_name_.clear(); outputs_.clear(); outputs_name_.clear(); orderdBlobs_.clear(); bindingsPtr_.clear(); blobsNameMapper_.clear(); for (int i = 0; i < nbBindings; ++i) { auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); const char* bindingName = context->engine_->getBindingName(i); dims.d[0] = max_batchsize; auto newTensor = make_shared<Tensor>(dims.nbDims, dims.d); newTensor->set_stream(this->context_->stream_); newTensor->set_workspace(this->workspace_); if (context->engine_->bindingIsInput(i)) { //if is input inputs_.push_back(newTensor); inputs_name_.push_back(bindingName); inputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } else { //if is output outputs_.push_back(newTensor); outputs_name_.push_back(bindingName); outputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } blobsNameMapper_[bindingName] = i; orderdBlobs_.push_back(newTensor); } bindingsPtr_.resize(orderdBlobs_.size()); } void TRTInferImpl::set_stream(hipStream_t stream){ this->context_->set_stream(stream); for(auto& t : orderdBlobs_) t->set_stream(stream); } hipStream_t TRTInferImpl::get_stream() { return this->context_->stream_; } int TRTInferImpl::device() { return device_; } void TRTInferImpl::synchronize() { checkCudaRuntime(hipStreamSynchronize(context_->stream_)); } bool TRTInferImpl::is_output_name(const std::string& name){ return std::find(outputs_name_.begin(), outputs_name_.end(), name) != outputs_name_.end(); } bool TRTInferImpl::is_input_name(const std::string& name){ return std::find(inputs_name_.begin(), inputs_name_.end(), name) != inputs_name_.end(); } void TRTInferImpl::forward(bool sync) { EngineContext* context = (EngineContext*)context_.get(); int inputBatchSize = inputs_[0]->size(0); for(int i = 0; i < context->engine_->getNbBindings(); ++i){ auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); dims.d[0] = inputBatchSize; if(context->engine_->bindingIsInput(i)){ context->context_->setBindingDimensions(i, dims); } } for (int i = 0; i < outputs_.size(); ++i) { outputs_[i]->resize_single_dim(0, inputBatchSize); outputs_[i]->to_gpu(false); } for (int i = 0; i < orderdBlobs_.size(); ++i) bindingsPtr_[i] = orderdBlobs_[i]->gpu(); void** bindingsptr = bindingsPtr_.data(); //bool execute_result = context->context_->enqueue(inputBatchSize, bindingsptr, context->stream_, nullptr); bool execute_result = context->context_->enqueueV2(bindingsptr, context->stream_, nullptr); if(!execute_result){ auto code = hipGetLastError(); INFOF("execute fail, code %d[%s], message %s", code, hipGetErrorName(code), hipGetErrorString(code)); } if (sync) { synchronize(); } } std::shared_ptr<MixMemory> TRTInferImpl::get_workspace() { return workspace_; } int TRTInferImpl::num_input() { return this->inputs_.size(); } int TRTInferImpl::num_output() { return this->outputs_.size(); } void TRTInferImpl::set_input (int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < inputs_.size()); this->inputs_[index] = tensor; int order_index = inputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } void TRTInferImpl::set_output(int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < outputs_.size()); this->outputs_[index] = tensor; int order_index = outputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } std::shared_ptr<Tensor> TRTInferImpl::input(int index) { Assert(index >= 0 && index < inputs_name_.size()); return this->inputs_[index]; } std::string TRTInferImpl::get_input_name(int index){ Assert(index >= 0 && index < inputs_name_.size()); return inputs_name_[index]; } std::shared_ptr<Tensor> TRTInferImpl::output(int index) { Assert(index >= 0 && index < outputs_.size()); return outputs_[index]; } std::string TRTInferImpl::get_output_name(int index){ Assert(index >= 0 && index < outputs_name_.size()); return outputs_name_[index]; } int TRTInferImpl::get_max_batch_size() { Assert(this->context_ != nullptr); return this->context_->engine_->getMaxBatchSize(); } std::shared_ptr<Tensor> TRTInferImpl::tensor(const std::string& name) { Assert(this->blobsNameMapper_.find(name) != this->blobsNameMapper_.end()); return orderdBlobs_[blobsNameMapper_[name]]; } std::shared_ptr<TRTInferImpl> load_infer(const string& file) { std::shared_ptr<TRTInferImpl> infer(new TRTInferImpl()); if (!infer->load(file)) infer.reset(); return infer; } //////////////////////////////class MonopolyAllocator////////////////////////////////////// /* tensormax_batch * 2tensorquery tensor */ template<class _ItemType> class MonopolyAllocator{ public: class MonopolyData{ public: std::shared_ptr<_ItemType>& data(){ return data_; } void release(){manager_->release_one(this);} private: MonopolyData(MonopolyAllocator* pmanager){manager_ = pmanager;} private: friend class MonopolyAllocator; MonopolyAllocator* manager_ = nullptr; std::shared_ptr<_ItemType> data_; bool available_ = true; }; typedef std::shared_ptr<MonopolyData> MonopolyDataPointer; MonopolyAllocator(int size){ capacity_ = size; num_available_ = size; datas_.resize(size); for(int i = 0; i < size; ++i) datas_[i] = std::shared_ptr<MonopolyData>(new MonopolyData(this)); } virtual ~MonopolyAllocator(){ run_ = false; cv_.notify_all(); std::unique_lock<std::mutex> l(lock_); cv_exit_.wait(l, [&](){ return num_wait_thread_ == 0; }); } MonopolyDataPointer query(int timeout = 10000){ std::unique_lock<std::mutex> l(lock_); if(!run_) return nullptr; if(num_available_ == 0){ num_wait_thread_++; auto state = cv_.wait_for(l, std::chrono::milliseconds(timeout), [&](){ return num_available_ > 0 || !run_; }); num_wait_thread_--; cv_exit_.notify_one(); // timeout, no available, exit program if(!state || num_available_ == 0 || !run_) return nullptr; } auto item = std::find_if(datas_.begin(), datas_.end(), [](MonopolyDataPointer& item){return item->available_;}); if(item == datas_.end()) return nullptr; (*item)->available_ = false; num_available_--; return *item; } int num_available(){ return num_available_; } int capacity(){ return capacity_; } private: void release_one(MonopolyData* prq){ std::unique_lock<std::mutex> l(lock_); if(!prq->available_){ prq->available_ = true; num_available_++; cv_.notify_one(); } } private: std::mutex lock_; std::condition_variable cv_; std::condition_variable cv_exit_; std::vector<MonopolyDataPointer> datas_; int capacity_ = 0; volatile int num_available_ = 0; volatile int num_wait_thread_ = 0; volatile bool run_ = true; }; /////////////////////////////////////////class ThreadSafedAsyncInfer///////////////////////////////////////////// /* future */ template<class Input, class Output, class StartParam=std::tuple<std::string, int>, class JobAdditional=int> class ThreadSafedAsyncInfer{ public: struct Job{ Input input; Output output; JobAdditional additional; MonopolyAllocator<Tensor>::MonopolyDataPointer mono_tensor; std::shared_ptr<std::promise<Output>> pro; }; virtual ~ThreadSafedAsyncInfer(){ stop(); } void stop(){ run_ = false; cond_.notify_all(); ////////////////////////////////////////// cleanup jobs { std::unique_lock<std::mutex> l(jobs_lock_); while(!jobs_.empty()){ auto& item = jobs_.front(); if(item.pro) item.pro->set_value(Output()); jobs_.pop(); } }; if(worker_){ worker_->join(); worker_.reset(); } } bool startup(const StartParam& param){ run_ = true; std::promise<bool> pro; start_param_ = param; worker_ = std::make_shared<std::thread>(&ThreadSafedAsyncInfer::worker, this, std::ref(pro)); return pro.get_future().get(); } virtual std::shared_future<Output> commit(const Input& input){ Job job; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, input)){ job.pro->set_value(Output()); return job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); jobs_.push(job); }; cond_.notify_one(); return job.pro->get_future(); } virtual std::vector<std::shared_future<Output>> commits(const std::vector<Input>& inputs){ int batch_size = ::min((int)inputs.size(), this->tensor_allocator_->capacity()); std::vector<Job> jobs(inputs.size()); std::vector<std::shared_future<Output>> results(inputs.size()); int nepoch = (inputs.size() + batch_size - 1) / batch_size; for(int epoch = 0; epoch < nepoch; ++epoch){ int begin = epoch * batch_size; int end = ::min((int)inputs.size(), begin + batch_size); for(int i = begin; i < end; ++i){ Job& job = jobs[i]; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, inputs[i])){ job.pro->set_value(Output()); } results[i] = job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); for(int i = begin; i < end; ++i){ jobs_.emplace(std::move(jobs[i])); }; } cond_.notify_one(); } return results; } protected: virtual void worker(std::promise<bool>& result) = 0; virtual bool preprocess(Job& job, const Input& input) = 0; virtual bool get_jobs_and_wait(std::vector<Job>& fetch_jobs, int max_size){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_jobs.clear(); for(int i = 0; i < max_size && !jobs_.empty(); ++i){ fetch_jobs.emplace_back(std::move(jobs_.front())); jobs_.pop(); } return true; } virtual bool get_job_and_wait(Job& fetch_job){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_job = std::move(jobs_.front()); jobs_.pop(); return true; } protected: StartParam start_param_; std::atomic<bool> run_; std::mutex jobs_lock_; std::queue<Job> jobs_; std::shared_ptr<std::thread> worker_; std::condition_variable cond_; std::shared_ptr<MonopolyAllocator<Tensor>> tensor_allocator_; }; ///////////////////////////////////class YoloTRTInferImpl////////////////////////////////////// /* Yolo batch yolo */ const char* type_name(Type type){ switch(type){ case Type::V5: return "YoloV5"; case Type::X: return "YoloX"; default: return "Unknow"; } } struct AffineMatrix{ float i2d[6]; // image to dst(network), 2x3 matrix float d2i[6]; // dst to image, 2x3 matrix void compute(const cv::Size& from, const cv::Size& to){ float scale_x = to.width / (float)from.width; float scale_y = to.height / (float)from.height; float scale = ::min(scale_x, scale_y); i2d[0] = scale; i2d[1] = 0; i2d[2] = -scale * from.width * 0.5 + to.width * 0.5 + scale * 0.5 - 0.5; i2d[3] = 0; i2d[4] = scale; i2d[5] = -scale * from.height * 0.5 + to.height * 0.5 + scale * 0.5 - 0.5; cv::Mat m2x3_i2d(2, 3, CV_32F, i2d); cv::Mat m2x3_d2i(2, 3, CV_32F, d2i); cv::invertAffineTransform(m2x3_i2d, m2x3_d2i); } cv::Mat i2d_mat(){ return cv::Mat(2, 3, CV_32F, i2d); } }; using ThreadSafedAsyncInferImpl = ThreadSafedAsyncInfer < cv::Mat, // input BoxArray, // output tuple<string, int>, // start param AffineMatrix // additional >; class YoloTRTInferImpl : public Infer, public ThreadSafedAsyncInferImpl{ public: /** TRTInferImplstopstop **/ virtual ~YoloTRTInferImpl(){ stop(); } virtual bool startup(const string& file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ if(type == Type::V5){ normalize_ = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = Norm::mean_std(mean, std, 1/255.0f, ChannelType::Invert); normalize_ = Norm::None(); }else{ INFOE("Unsupport type %d", type); } confidence_threshold_ = confidence_threshold; nms_threshold_ = nms_threshold; return ThreadSafedAsyncInferImpl::startup(make_tuple(file, gpuid)); } virtual void worker(promise<bool>& result) override{ string file = get<0>(start_param_); int gpuid = get<1>(start_param_); set_device(gpuid); auto engine = load_infer(file); if(engine == nullptr){ INFOE("Engine %s load failed", file.c_str()); result.set_value(false); return; } engine->print(); const int MAX_IMAGE_BBOX = 1024; const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag Tensor affin_matrix_device; Tensor output_array_device; int max_batch_size = engine->get_max_batch_size(); auto input = engine->tensor("images"); auto output = engine->tensor("output"); int num_classes = output->size(2) - 5; input_width_ = input->size(3); input_height_ = input->size(2); tensor_allocator_ = make_shared<MonopolyAllocator<Tensor>>(max_batch_size * 2); stream_ = engine->get_stream(); gpu_ = gpuid; result.set_value(true); input->resize_single_dim(0, max_batch_size).to_gpu(); affin_matrix_device.set_stream(stream_); // 8 8 * sizeof(float) % 32 == 0 affin_matrix_device.resize(max_batch_size, 8).to_gpu(); // 1 + MAX_IMAGE_BBOXcounter + bboxes ... output_array_device.resize(max_batch_size, 1 + MAX_IMAGE_BBOX * NUM_BOX_ELEMENT).to_gpu(); vector<Job> fetch_jobs; while(get_jobs_and_wait(fetch_jobs, max_batch_size)){ int infer_batch_size = fetch_jobs.size(); input->resize_single_dim(0, infer_batch_size); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; auto& mono = job.mono_tensor->data(); affin_matrix_device.copy_from_gpu(affin_matrix_device.offset(ibatch), mono->get_workspace()->gpu(), 6); input->copy_from_gpu(input->offset(ibatch), mono->gpu(), mono->count()); job.mono_tensor->release(); } engine->forward(false); output_array_device.to_gpu(false); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; float* image_based_output = output->gpu<float>(ibatch); float* output_array_ptr = output_array_device.gpu<float>(ibatch); auto affine_matrix = affin_matrix_device.gpu<float>(ibatch); checkCudaRuntime(hipMemsetAsync(output_array_ptr, 0, sizeof(int), stream_)); decode_kernel_invoker(image_based_output, output->size(1), num_classes, confidence_threshold_, nms_threshold_, affine_matrix, output_array_ptr, MAX_IMAGE_BBOX, stream_); } output_array_device.to_cpu(); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ float* parray = output_array_device.cpu<float>(ibatch); int count = min(MAX_IMAGE_BBOX, (int)*parray); auto& job = fetch_jobs[ibatch]; auto& image_based_boxes = job.output; for(int i = 0; i < count; ++i){ float* pbox = parray + 1 + i * NUM_BOX_ELEMENT; int label = pbox[5]; int keepflag = pbox[6]; if(keepflag == 1){ image_based_boxes.emplace_back(pbox[0], pbox[1], pbox[2], pbox[3], pbox[4], label); } } job.pro->set_value(image_based_boxes); } fetch_jobs.clear(); } stream_ = nullptr; tensor_allocator_.reset(); INFO("Engine destroy."); } virtual bool preprocess(Job& job, const Mat& image) override{ if(tensor_allocator_ == nullptr){ INFOE("tensor_allocator_ is nullptr"); return false; } job.mono_tensor = tensor_allocator_->query(); if(job.mono_tensor == nullptr){ INFOE("Tensor allocator query failed."); return false; } AutoDevice auto_device(gpu_); auto& tensor = job.mono_tensor->data(); if(tensor == nullptr){ // not init tensor = make_shared<Tensor>(); tensor->set_workspace(make_shared<MixMemory>()); } Size input_size(input_width_, input_height_); job.additional.compute(image.size(), input_size); tensor->set_stream(stream_); tensor->resize(1, 3, input_height_, input_width_); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(job.additional.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; //checkCudaRuntime(hipMemcpyAsync(image_host, image.data, size_image, hipMemcpyHostToHost, stream_)); // speed up memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, job.additional.d2i, sizeof(job.additional.d2i)); checkCudaRuntime(hipMemcpyAsync(image_device, image_host, size_image, hipMemcpyHostToDevice, stream_)); checkCudaRuntime(hipMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(job.additional.d2i), hipMemcpyHostToDevice, stream_)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(), input_width_, input_height_, affine_matrix_device, 114, normalize_, stream_ ); return true; } virtual vector<shared_future<BoxArray>> commits(const vector<Mat>& images) override{ return ThreadSafedAsyncInferImpl::commits(images); } virtual std::shared_future<BoxArray> commit(const Mat& image) override{ return ThreadSafedAsyncInferImpl::commit(image); } private: int input_width_ = 0; int input_height_ = 0; int gpu_ = 0; float confidence_threshold_ = 0; float nms_threshold_ = 0; hipStream_t stream_ = nullptr; Norm normalize_; }; void image_to_tensor(const cv::Mat& image, shared_ptr<Tensor>& tensor, Type type, int ibatch){ Norm normalize; if(type == Type::V5){ normalize = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = CUDAKernel::Norm::mean_std(mean, std, 1/255.0f, CUDAKernel::ChannelType::Invert); normalize = Norm::None(); }else{ INFOE("Unsupport type %d", type); } Size input_size(tensor->size(3), tensor->size(2)); AffineMatrix affine; affine.compute(image.size(), input_size); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(affine.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; auto stream = tensor->get_stream(); memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, affine.d2i, sizeof(affine.d2i)); checkCudaRuntime(hipMemcpyAsync(image_device, image_host, size_image, hipMemcpyHostToDevice, stream)); checkCudaRuntime(hipMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(affine.d2i), hipMemcpyHostToDevice, stream)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(ibatch), input_size.width, input_size.height, affine_matrix_device, 114, normalize, stream ); } shared_ptr<Infer> create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ shared_ptr<YoloTRTInferImpl> instance(new YoloTRTInferImpl()); if(!instance->startup(engine_file, type, gpuid, confidence_threshold, nms_threshold)){ instance.reset(); } return instance; } //////////////////////////////////////Compile Model///////////////////////////////////////////////////////////// const char* mode_string(Mode type) { switch (type) { case Mode::FP32: return "FP32"; case Mode::FP16: return "FP16"; case Mode::INT8: return "INT8"; default: return "UnknowCompileMode"; } } typedef std::function<void(int current, int count, const std::vector<std::string>& files, std::shared_ptr<Tensor>& tensor)> Int8Process; class Int8EntropyCalibrator : public IInt8EntropyCalibrator2{ public: Int8EntropyCalibrator(const vector<string>& imagefiles, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->allimgs_ = imagefiles; this->preprocess_ = preprocess; this->fromCalibratorData_ = false; files_.resize(dims.d[0]); checkCudaRuntime(hipStreamCreate(&stream_)); } Int8EntropyCalibrator(const vector<uint8_t>& entropyCalibratorData, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->entropyCalibratorData_ = entropyCalibratorData; this->preprocess_ = preprocess; this->fromCalibratorData_ = true; files_.resize(dims.d[0]); checkCudaRuntime(hipStreamCreate(&stream_)); } virtual ~Int8EntropyCalibrator(){ checkCudaRuntime(hipStreamDestroy(stream_)); } int getBatchSize() const noexcept { return dims_.d[0]; } bool next() { int batch_size = dims_.d[0]; if (cursor_ + batch_size > allimgs_.size()) return false; int old_cursor = cursor_; for(int i = 0; i < batch_size; ++i) files_[i] = allimgs_[cursor_++]; if (!tensor_){ tensor_.reset(new Tensor(dims_.nbDims, dims_.d)); tensor_->set_stream(stream_); tensor_->set_workspace(make_shared<MixMemory>()); } preprocess_(old_cursor, allimgs_.size(), files_, tensor_); return true; } bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept { if (!next()) return false; bindings[0] = tensor_->gpu(); return true; } const vector<uint8_t>& getEntropyCalibratorData() { return entropyCalibratorData_; } const void* readCalibrationCache(size_t& length) noexcept { if (fromCalibratorData_) { length = this->entropyCalibratorData_.size(); return this->entropyCalibratorData_.data(); } length = 0; return nullptr; } virtual void writeCalibrationCache(const void* cache, size_t length) noexcept { entropyCalibratorData_.assign((uint8_t*)cache, (uint8_t*)cache + length); } private: Int8Process preprocess_; vector<string> allimgs_; size_t batchCudaSize_ = 0; int cursor_ = 0; nvinfer1::Dims dims_; vector<string> files_; shared_ptr<Tensor> tensor_; vector<uint8_t> entropyCalibratorData_; bool fromCalibratorData_ = false; hipStream_t stream_ = nullptr; }; bool compile( Mode mode, Type type, unsigned int max_batch_size, const string& source_onnx, const string& saveto, size_t max_workspace_size, const std::string& int8_images_folder, const std::string& int8_entropy_calibrator_cache_file) { bool hasEntropyCalibrator = false; vector<uint8_t> entropyCalibratorData; vector<string> entropyCalibratorFiles; auto int8process = [=](int current, int count, const vector<string>& files, shared_ptr<Tensor>& tensor){ for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; INFO("Int8 load %d / %d, %s", current + i + 1, count, file.c_str()); auto image = cv::imread(file); if(image.empty()){ INFOE("Load image failed, %s", file.c_str()); continue; } image_to_tensor(image, tensor, type, i); } tensor->synchronize(); }; if (mode == Mode::INT8) { if (!int8_entropy_calibrator_cache_file.empty()) { if (exists(int8_entropy_calibrator_cache_file)) { entropyCalibratorData = load_file(int8_entropy_calibrator_cache_file); if (entropyCalibratorData.empty()) { INFOE("entropyCalibratorFile is set as: %s, but we read is empty.", int8_entropy_calibrator_cache_file.c_str()); return false; } hasEntropyCalibrator = true; } } if (hasEntropyCalibrator) { if (!int8_images_folder.empty()) { INFOW("int8_images_folder is ignore, when int8_entropy_calibrator_cache_file is set"); } } else { entropyCalibratorFiles = glob_image_files(int8_images_folder); if (entropyCalibratorFiles.empty()) { INFOE("Can not find any images(jpg/png/bmp/jpeg/tiff) from directory: %s", int8_images_folder.c_str()); return false; } if(entropyCalibratorFiles.size() < max_batch_size){ INFOW("Too few images provided, %d[provided] < %d[max batch size], image copy will be performed", entropyCalibratorFiles.size(), max_batch_size); for(int i = entropyCalibratorFiles.size(); i < max_batch_size; ++i) entropyCalibratorFiles.push_back(entropyCalibratorFiles[i % entropyCalibratorFiles.size()]); } } } else { if (hasEntropyCalibrator) { INFOW("int8_entropy_calibrator_cache_file is ignore, when Mode is '%s'", mode_string(mode)); } } INFO("Compile %s %s.", mode_string(mode), source_onnx.c_str()); shared_ptr<IBuilder> builder(createInferBuilder(gLogger), destroy_nvidia_pointer<IBuilder>); if (builder == nullptr) { INFOE("Can not create builder."); return false; } shared_ptr<IBuilderConfig> config(builder->createBuilderConfig(), destroy_nvidia_pointer<IBuilderConfig>); if (mode == Mode::FP16) { if (!builder->platformHasFastFp16()) { INFOW("Platform not have fast fp16 support"); } config->setFlag(BuilderFlag::kFP16); } else if (mode == Mode::INT8) { if (!builder->platformHasFastInt8()) { INFOW("Platform not have fast int8 support"); } config->setFlag(BuilderFlag::kINT8); } shared_ptr<INetworkDefinition> network; shared_ptr<nvonnxparser::IParser> onnxParser; const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(explicitBatch), destroy_nvidia_pointer<INetworkDefinition>); //from onnx is not markOutput onnxParser.reset(nvonnxparser::createParser(*network, gLogger), destroy_nvidia_pointer<nvonnxparser::IParser>); if (onnxParser == nullptr) { INFOE("Can not create parser."); return false; } if (!onnxParser->parseFromFile(source_onnx.c_str(), 1)) { INFOE("Can not parse OnnX file: %s", source_onnx.c_str()); return false; } auto inputTensor = network->getInput(0); auto inputDims = inputTensor->getDimensions(); shared_ptr<Int8EntropyCalibrator> int8Calibrator; if (mode == Mode::INT8) { auto calibratorDims = inputDims; calibratorDims.d[0] = max_batch_size; if (hasEntropyCalibrator) { INFO("Using exist entropy calibrator data[%d bytes]: %s", entropyCalibratorData.size(), int8_entropy_calibrator_cache_file.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorData, calibratorDims, int8process )); } else { INFO("Using image list[%d files]: %s", entropyCalibratorFiles.size(), int8_images_folder.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorFiles, calibratorDims, int8process )); } config->setInt8Calibrator(int8Calibrator.get()); } INFO("Input shape is %s", join_dims(vector<int>(inputDims.d, inputDims.d + inputDims.nbDims)).c_str()); INFO("Set max batch size = %d", max_batch_size); INFO("Set max workspace size = %.2f MB", max_workspace_size / 1024.0f / 1024.0f); int net_num_input = network->getNbInputs(); INFO("Network has %d inputs:", net_num_input); vector<string> input_names(net_num_input); for(int i = 0; i < net_num_input; ++i){ auto tensor = network->getInput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); input_names[i] = tensor->getName(); } int net_num_output = network->getNbOutputs(); INFO("Network has %d outputs:", net_num_output); for(int i = 0; i < net_num_output; ++i){ auto tensor = network->getOutput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); } int net_num_layers = network->getNbLayers(); INFO("Network has %d layers", net_num_layers); builder->setMaxBatchSize(max_batch_size); config->setMaxWorkspaceSize(max_workspace_size); auto profile = builder->createOptimizationProfile(); for(int i = 0; i < net_num_input; ++i){ auto input = network->getInput(i); auto input_dims = input->getDimensions(); input_dims.d[0] = 1; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims); profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims); input_dims.d[0] = max_batch_size; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims); } config->addOptimizationProfile(profile); INFO("Building engine..."); auto time_start = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); shared_ptr<ICudaEngine> engine(builder->buildEngineWithConfig(*network, *config), destroy_nvidia_pointer<ICudaEngine>); if (engine == nullptr) { INFOE("engine is nullptr"); return false; } if (mode == Mode::INT8) { if (!hasEntropyCalibrator) { if (!int8_entropy_calibrator_cache_file.empty()) { INFO("Save calibrator to: %s", int8_entropy_calibrator_cache_file.c_str()); save_file(int8_entropy_calibrator_cache_file, int8Calibrator->getEntropyCalibratorData()); } else { INFO("No set entropyCalibratorFile, and entropyCalibrator will not save."); } } } auto time_end = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); INFO("Build done %lld ms !", time_end - time_start); // serialize the engine, then close everything down shared_ptr<IHostMemory> seridata(engine->serialize(), destroy_nvidia_pointer<IHostMemory>); return save_file(saveto, seridata->data(), seridata->size()); } };
205164efc3beca5a3404a99f24b987ba83be0fad.cu
#include "simple_yolo.hpp" #include <NvInfer.h> #include <NvOnnxParser.h> #include <cuda_runtime.h> #include <algorithm> #include <fstream> #include <memory> #include <string> #include <future> #include <condition_variable> #include <mutex> #include <thread> #include <queue> #if defined(_WIN32) # include <Windows.h> # include <wingdi.h> # include <Shlwapi.h> # pragma comment(lib, "shlwapi.lib") # undef min # undef max #else # include <dirent.h> # include <sys/types.h> # include <sys/stat.h> # include <unistd.h> # include <stdarg.h> #endif namespace SimpleYolo{ using namespace nvinfer1; using namespace std; using namespace cv; #define CURRENT_DEVICE_ID -1 #define GPU_BLOCK_THREADS 512 #define KernelPositionBlock \ int position = (blockDim.x * blockIdx.x + threadIdx.x); \ if (position >= (edge)) return; #define checkCudaRuntime(call) check_runtime(call, #call, __LINE__, __FILE__) static bool check_runtime(cudaError_t e, const char* call, int line, const char *file); #define checkCudaKernel(...) \ __VA_ARGS__; \ do{cudaError_t cudaStatus = cudaPeekAtLastError(); \ if (cudaStatus != cudaSuccess){ \ INFOE("launch failed: %s", cudaGetErrorString(cudaStatus)); \ }} while(0); #define Assert(op) \ do{ \ bool cond = !(!(op)); \ if(!cond){ \ INFOF("Assert failed, " #op); \ } \ }while(false) /* 修改这个level来实现修改日志输出级别 */ #define CURRENT_LOG_LEVEL LogLevel::Info #define INFOD(...) __log_func(__FILE__, __LINE__, LogLevel::Debug, __VA_ARGS__) #define INFOV(...) __log_func(__FILE__, __LINE__, LogLevel::Verbose, __VA_ARGS__) #define INFO(...) __log_func(__FILE__, __LINE__, LogLevel::Info, __VA_ARGS__) #define INFOW(...) __log_func(__FILE__, __LINE__, LogLevel::Warning, __VA_ARGS__) #define INFOE(...) __log_func(__FILE__, __LINE__, LogLevel::Error, __VA_ARGS__) #define INFOF(...) __log_func(__FILE__, __LINE__, LogLevel::Fatal, __VA_ARGS__) enum class NormType : int{ None = 0, MeanStd = 1, AlphaBeta = 2 }; enum class ChannelType : int{ None = 0, SwapRB = 1 }; /* 归一化操作,可以支持均值标准差,alpha beta,和swap RB */ struct Norm{ float mean[3]; float std[3]; float alpha, beta; NormType type = NormType::None; ChannelType channel_type = ChannelType::None; // out = (x * alpha - mean) / std static Norm mean_std(const float mean[3], const float std[3], float alpha = 1/255.0f, ChannelType channel_type=ChannelType::None); // out = x * alpha + beta static Norm alpha_beta(float alpha, float beta = 0, ChannelType channel_type=ChannelType::None); // None static Norm None(); }; Norm Norm::mean_std(const float mean[3], const float std[3], float alpha, ChannelType channel_type){ Norm out; out.type = NormType::MeanStd; out.alpha = alpha; out.channel_type = channel_type; memcpy(out.mean, mean, sizeof(out.mean)); memcpy(out.std, std, sizeof(out.std)); return out; } Norm Norm::alpha_beta(float alpha, float beta, ChannelType channel_type){ Norm out; out.type = NormType::AlphaBeta; out.alpha = alpha; out.beta = beta; out.channel_type = channel_type; return out; } Norm Norm::None(){ return Norm(); } /* 构造时设置当前gpuid,析构时修改为原来的gpuid */ class AutoDevice{ public: AutoDevice(int device_id = 0){ cudaGetDevice(&old_); if(old_ != device_id && device_id != -1) checkCudaRuntime(cudaSetDevice(device_id)); } virtual ~AutoDevice(){ if(old_ != -1) checkCudaRuntime(cudaSetDevice(old_)); } private: int old_ = -1; }; enum class LogLevel : int{ Debug = 5, Verbose = 4, Info = 3, Warning = 2, Error = 1, Fatal = 0 }; static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...); inline int upbound(int n, int align = 32){return (n + align - 1) / align * align;} static bool check_runtime(cudaError_t e, const char* call, int line, const char *file){ if (e != cudaSuccess) { INFOE("CUDA Runtime error %s # %s, code = %s [ %d ] in file %s:%d", call, cudaGetErrorString(e), cudaGetErrorName(e), e, file, line); return false; } return true; } #define TRT_STR(v) #v #define TRT_VERSION_STRING(major, minor, patch, build) TRT_STR(major) "." TRT_STR(minor) "." TRT_STR(patch) "." TRT_STR(build) const char* trt_version(){ return TRT_VERSION_STRING(NV_TENSORRT_MAJOR, NV_TENSORRT_MINOR, NV_TENSORRT_PATCH, NV_TENSORRT_BUILD); } static bool check_device_id(int device_id){ int device_count = -1; checkCudaRuntime(cudaGetDeviceCount(&device_count)); if(device_id < 0 || device_id >= device_count){ INFOE("Invalid device id: %d, count = %d", device_id, device_count); return false; } return true; } static bool exists(const string& path){ #ifdef _WIN32 return ::PathFileExistsA(path.c_str()); #else return access(path.c_str(), R_OK) == 0; #endif } static const char* level_string(LogLevel level){ switch (level){ case LogLevel::Debug: return "debug"; case LogLevel::Verbose: return "verbo"; case LogLevel::Info: return "info"; case LogLevel::Warning: return "warn"; case LogLevel::Error: return "error"; case LogLevel::Fatal: return "fatal"; default: return "unknow"; } } template<typename _T> static string join_dims(const vector<_T>& dims){ stringstream output; char buf[64]; const char* fmts[] = {"%d", " x %d"}; for(int i = 0; i < dims.size(); ++i){ snprintf(buf, sizeof(buf), fmts[i != 0], dims[i]); output << buf; } return output.str(); } static bool save_file(const string& file, const void* data, size_t length){ FILE* f = fopen(file.c_str(), "wb"); if (!f) return false; if (data and length > 0){ if (fwrite(data, 1, length, f) not_eq length){ fclose(f); return false; } } fclose(f); return true; } static bool save_file(const string& file, const vector<uint8_t>& data){ return save_file(file, data.data(), data.size()); } static string file_name(const string& path, bool include_suffix){ if (path.empty()) return ""; int p = path.rfind('/'); #ifdef U_OS_WINDOWS int e = path.rfind('\\'); p = std::max(p, e); #endif p += 1; //include suffix if (include_suffix) return path.substr(p); int u = path.rfind('.'); if (u == -1) return path.substr(p); if (u <= p) u = path.size(); return path.substr(p, u - p); } vector<string> glob_image_files(const string& directory){ /* 检索目录下的所有图像:"*.jpg;*.png;*.bmp;*.jpeg;*.tiff" */ vector<string> files, output; set<string> pattern_set{"jpg", "png", "bmp", "jpeg", "tiff"}; if(directory.empty()){ INFOE("Glob images from folder failed, folder is empty"); return output; } try{ cv::glob(directory + "/*", files, true); }catch(...){ INFOE("Glob %s failed", directory.c_str()); return output; } for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; int p = file.rfind("."); if(p == -1) continue; auto suffix = file.substr(p+1); std::transform(suffix.begin(), suffix.end(), suffix.begin(), [](char c){ if(c >= 'A' && c <= 'Z') c -= 'A' + 'a'; return c; }); if(pattern_set.find(suffix) != pattern_set.end()) output.push_back(file); } return output; } static void __log_func(const char* file, int line, LogLevel level, const char* fmt, ...){ if(level > CURRENT_LOG_LEVEL) return; va_list vl; va_start(vl, fmt); char buffer[2048]; string filename = file_name(file, true); int n = snprintf(buffer, sizeof(buffer), "[%s][%s:%d]:", level_string(level), filename.c_str(), line); vsnprintf(buffer + n, sizeof(buffer) - n, fmt, vl); fprintf(stdout, "%s\n", buffer); if (level == LogLevel::Fatal) { fflush(stdout); abort(); } } static dim3 grid_dims(int numJobs) { int numBlockThreads = numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; return dim3(((numJobs + numBlockThreads - 1) / (float)numBlockThreads)); } static dim3 block_dims(int numJobs) { return numJobs < GPU_BLOCK_THREADS ? numJobs : GPU_BLOCK_THREADS; } static int get_device(int device_id){ if(device_id != CURRENT_DEVICE_ID){ check_device_id(device_id); return device_id; } checkCudaRuntime(cudaGetDevice(&device_id)); return device_id; } void set_device(int device_id) { if (device_id == -1) return; checkCudaRuntime(cudaSetDevice(device_id)); } /////////////////////////////CUDA kernels//////////////////////////////////////////////// const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag static __device__ void affine_project(float* matrix, float x, float y, float* ox, float* oy){ *ox = matrix[0] * x + matrix[1] * y + matrix[2]; *oy = matrix[3] * x + matrix[4] * y + matrix[5]; } static __global__ void decode_kernel(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float* invert_affine_matrix, float* parray, int max_objects){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= num_bboxes) return; float* pitem = predict + (5 + num_classes) * position; float objectness = pitem[4]; if(objectness < confidence_threshold) return; float* class_confidence = pitem + 5; float confidence = *class_confidence++; int label = 0; for(int i = 1; i < num_classes; ++i, ++class_confidence){ if(*class_confidence > confidence){ confidence = *class_confidence; label = i; } } confidence *= objectness; if(confidence < confidence_threshold) return; int index = atomicAdd(parray, 1); if(index >= max_objects) return; float cx = *pitem++; float cy = *pitem++; float width = *pitem++; float height = *pitem++; float left = cx - width * 0.5f; float top = cy - height * 0.5f; float right = cx + width * 0.5f; float bottom = cy + height * 0.5f; affine_project(invert_affine_matrix, left, top, &left, &top); affine_project(invert_affine_matrix, right, bottom, &right, &bottom); float* pout_item = parray + 1 + index * NUM_BOX_ELEMENT; *pout_item++ = left; *pout_item++ = top; *pout_item++ = right; *pout_item++ = bottom; *pout_item++ = confidence; *pout_item++ = label; *pout_item++ = 1; // 1 = keep, 0 = ignore } static __device__ float box_iou( float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom ){ float cleft = max(aleft, bleft); float ctop = max(atop, btop); float cright = min(aright, bright); float cbottom = min(abottom, bbottom); float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f); if(c_area == 0.0f) return 0.0f; float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop); float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop); return c_area / (a_area + b_area - c_area); } static __global__ void fast_nms_kernel(float* bboxes, int max_objects, float threshold){ int position = (blockDim.x * blockIdx.x + threadIdx.x); int count = min((int)*bboxes, max_objects); if (position >= count) return; // left, top, right, bottom, confidence, class, keepflag float* pcurrent = bboxes + 1 + position * NUM_BOX_ELEMENT; for(int i = 0; i < count; ++i){ float* pitem = bboxes + 1 + i * NUM_BOX_ELEMENT; if(i == position || pcurrent[5] != pitem[5]) continue; if(pitem[4] >= pcurrent[4]){ if(pitem[4] == pcurrent[4] && i < position) continue; float iou = box_iou( pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3], pitem[0], pitem[1], pitem[2], pitem[3] ); if(iou > threshold){ pcurrent[6] = 0; // 1=keep, 0=ignore return; } } } } static void decode_kernel_invoker(float* predict, int num_bboxes, int num_classes, float confidence_threshold, float nms_threshold, float* invert_affine_matrix, float* parray, int max_objects, cudaStream_t stream){ auto grid = grid_dims(num_bboxes); auto block = block_dims(num_bboxes); /* 如果核函数有波浪线,没关系,他是正常的,你只是看不顺眼罢了 */ checkCudaKernel(decode_kernel<<<grid, block, 0, stream>>>(predict, num_bboxes, num_classes, confidence_threshold, invert_affine_matrix, parray, max_objects)); grid = grid_dims(max_objects); block = block_dims(max_objects); checkCudaKernel(fast_nms_kernel<<<grid, block, 0, stream>>>(parray, max_objects, nms_threshold)); } static __global__ void warp_affine_bilinear_and_normalize_plane_kernel(uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, uint8_t const_value_st, float* warp_affine_matrix_2_3, Norm norm, int edge){ int position = blockDim.x * blockIdx.x + threadIdx.x; if (position >= edge) return; float m_x1 = warp_affine_matrix_2_3[0]; float m_y1 = warp_affine_matrix_2_3[1]; float m_z1 = warp_affine_matrix_2_3[2]; float m_x2 = warp_affine_matrix_2_3[3]; float m_y2 = warp_affine_matrix_2_3[4]; float m_z2 = warp_affine_matrix_2_3[5]; int dx = position % dst_width; int dy = position / dst_width; float src_x = m_x1 * dx + m_y1 * dy + m_z1; float src_y = m_x2 * dx + m_y2 * dy + m_z2; float c0, c1, c2; if(src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height){ // out of range c0 = const_value_st; c1 = const_value_st; c2 = const_value_st; }else{ int y_low = floorf(src_y); int x_low = floorf(src_x); int y_high = y_low + 1; int x_high = x_low + 1; uint8_t const_value[] = {const_value_st, const_value_st, const_value_st}; float ly = src_y - y_low; float lx = src_x - x_low; float hy = 1 - ly; float hx = 1 - lx; float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; uint8_t* v1 = const_value; uint8_t* v2 = const_value; uint8_t* v3 = const_value; uint8_t* v4 = const_value; if(y_low >= 0){ if (x_low >= 0) v1 = src + y_low * src_line_size + x_low * 3; if (x_high < src_width) v2 = src + y_low * src_line_size + x_high * 3; } if(y_high < src_height){ if (x_low >= 0) v3 = src + y_high * src_line_size + x_low * 3; if (x_high < src_width) v4 = src + y_high * src_line_size + x_high * 3; } // same to opencv c0 = floorf(w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0] + 0.5f); c1 = floorf(w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1] + 0.5f); c2 = floorf(w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2] + 0.5f); } if(norm.channel_type == ChannelType::SwapRB){ float t = c2; c2 = c0; c0 = t; } if(norm.type == NormType::MeanStd){ c0 = (c0 * norm.alpha - norm.mean[0]) / norm.std[0]; c1 = (c1 * norm.alpha - norm.mean[1]) / norm.std[1]; c2 = (c2 * norm.alpha - norm.mean[2]) / norm.std[2]; }else if(norm.type == NormType::AlphaBeta){ c0 = c0 * norm.alpha + norm.beta; c1 = c1 * norm.alpha + norm.beta; c2 = c2 * norm.alpha + norm.beta; } int area = dst_width * dst_height; float* pdst_c0 = dst + dy * dst_width + dx; float* pdst_c1 = pdst_c0 + area; float* pdst_c2 = pdst_c1 + area; *pdst_c0 = c0; *pdst_c1 = c1; *pdst_c2 = c2; } static void warp_affine_bilinear_and_normalize_plane( uint8_t* src, int src_line_size, int src_width, int src_height, float* dst, int dst_width, int dst_height, float* matrix_2_3, uint8_t const_value, const Norm& norm, cudaStream_t stream) { int jobs = dst_width * dst_height; auto grid = grid_dims(jobs); auto block = block_dims(jobs); checkCudaKernel(warp_affine_bilinear_and_normalize_plane_kernel << <grid, block, 0, stream >> > ( src, src_line_size, src_width, src_height, dst, dst_width, dst_height, const_value, matrix_2_3, norm, jobs )); } //////////////////////////////class MixMemory///////////////////////////////////////////////// /* gpu/cpu内存管理 自动对gpu和cpu内存进行分配和释放 这里的cpu使用的是pinned memory,当对gpu做内存复制时,性能比较好 因为是cudaMallocHost分配的,因此他与cuda context有关联 */ class MixMemory { public: MixMemory(int device_id = CURRENT_DEVICE_ID); MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); virtual ~MixMemory(); void* gpu(size_t size); void* cpu(size_t size); void release_gpu(); void release_cpu(); void release_all(); inline bool owner_gpu() const{return owner_gpu_;} inline bool owner_cpu() const{return owner_cpu_;} inline size_t cpu_size() const{return cpu_size_;} inline size_t gpu_size() const{return gpu_size_;} inline int device_id() const{return device_id_;} inline void* gpu() const { return gpu_; } // Pinned Memory inline void* cpu() const { return cpu_; } void reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size); private: void* cpu_ = nullptr; size_t cpu_size_ = 0; bool owner_cpu_ = true; int device_id_ = 0; void* gpu_ = nullptr; size_t gpu_size_ = 0; bool owner_gpu_ = true; }; MixMemory::MixMemory(int device_id){ device_id_ = get_device(device_id); } MixMemory::MixMemory(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ reference_data(cpu, cpu_size, gpu, gpu_size); } void MixMemory::reference_data(void* cpu, size_t cpu_size, void* gpu, size_t gpu_size){ release_all(); if(cpu == nullptr || cpu_size == 0){ cpu = nullptr; cpu_size = 0; } if(gpu == nullptr || gpu_size == 0){ gpu = nullptr; gpu_size = 0; } this->cpu_ = cpu; this->cpu_size_ = cpu_size; this->gpu_ = gpu; this->gpu_size_ = gpu_size; this->owner_cpu_ = !(cpu && cpu_size > 0); this->owner_gpu_ = !(gpu && gpu_size > 0); checkCudaRuntime(cudaGetDevice(&device_id_)); } MixMemory::~MixMemory() { release_all(); } void* MixMemory::gpu(size_t size) { if (gpu_size_ < size) { release_gpu(); gpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaMalloc(&gpu_, size)); checkCudaRuntime(cudaMemset(gpu_, 0, size)); } return gpu_; } void* MixMemory::cpu(size_t size) { if (cpu_size_ < size) { release_cpu(); cpu_size_ = size; AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaMallocHost(&cpu_, size)); Assert(cpu_ != nullptr); memset(cpu_, 0, size); } return cpu_; } void MixMemory::release_cpu() { if (cpu_) { if(owner_cpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaFreeHost(cpu_)); } cpu_ = nullptr; } cpu_size_ = 0; } void MixMemory::release_gpu() { if (gpu_) { if(owner_gpu_){ AutoDevice auto_device_exchange(device_id_); checkCudaRuntime(cudaFree(gpu_)); } gpu_ = nullptr; } gpu_size_ = 0; } void MixMemory::release_all() { release_cpu(); release_gpu(); } /////////////////////////////////class Tensor//////////////////////////////////////////////// /* Tensor类,实现张量的管理 由于NN多用张量,必须有个类进行管理才方便,实现内存自动分配,计算索引等等 如果要调试,可以执行save_to_file,储存为文件后,在python中加载并查看 */ enum class DataHead : int{ Init = 0, Device = 1, Host = 2 }; class Tensor { public: Tensor(const Tensor& other) = delete; Tensor& operator = (const Tensor& other) = delete; explicit Tensor(std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int n, int c, int h, int w, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(int ndims, const int* dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); explicit Tensor(const std::vector<int>& dims, std::shared_ptr<MixMemory> data = nullptr, int device_id = CURRENT_DEVICE_ID); virtual ~Tensor(); int numel() const; inline int ndims() const{return shape_.size();} inline int size(int index) const{return shape_[index];} inline int shape(int index) const{return shape_[index];} inline int batch() const{return shape_[0];} inline int channel() const{return shape_[1];} inline int height() const{return shape_[2];} inline int width() const{return shape_[3];} inline const std::vector<int>& dims() const { return shape_; } inline int bytes() const { return bytes_; } inline int bytes(int start_axis) const { return count(start_axis) * element_size(); } inline int element_size() const { return sizeof(float); } inline DataHead head() const { return head_; } std::shared_ptr<Tensor> clone() const; Tensor& release(); Tensor& set_to(float value); bool empty() const; template<typename ... _Args> int offset(int index, _Args ... index_args) const{ const int index_array[] = {index, index_args...}; return offset_array(sizeof...(index_args) + 1, index_array); } int offset_array(const std::vector<int>& index) const; int offset_array(size_t size, const int* index_array) const; template<typename ... _Args> Tensor& resize(int dim_size, _Args ... dim_size_args){ const int dim_size_array[] = {dim_size, dim_size_args...}; return resize(sizeof...(dim_size_args) + 1, dim_size_array); } Tensor& resize(int ndims, const int* dims); Tensor& resize(const std::vector<int>& dims); Tensor& resize_single_dim(int idim, int size); int count(int start_axis = 0) const; int device() const{return device_id_;} Tensor& to_gpu(bool copy=true); Tensor& to_cpu(bool copy=true); inline void* cpu() const { ((Tensor*)this)->to_cpu(); return data_->cpu(); } inline void* gpu() const { ((Tensor*)this)->to_gpu(); return data_->gpu(); } template<typename DType> inline const DType* cpu() const { return (DType*)cpu(); } template<typename DType> inline DType* cpu() { return (DType*)cpu(); } template<typename DType, typename ... _Args> inline DType* cpu(int i, _Args&& ... args) { return cpu<DType>() + offset(i, args...); } template<typename DType> inline const DType* gpu() const { return (DType*)gpu(); } template<typename DType> inline DType* gpu() { return (DType*)gpu(); } template<typename DType, typename ... _Args> inline DType* gpu(int i, _Args&& ... args) { return gpu<DType>() + offset(i, args...); } template<typename DType, typename ... _Args> inline DType& at(int i, _Args&& ... args) { return *(cpu<DType>() + offset(i, args...)); } std::shared_ptr<MixMemory> get_data() const {return data_;} std::shared_ptr<MixMemory> get_workspace() const {return workspace_;} Tensor& set_workspace(std::shared_ptr<MixMemory> workspace) {workspace_ = workspace; return *this;} cudaStream_t get_stream() const{return stream_;} Tensor& set_stream(cudaStream_t stream){stream_ = stream; return *this;} Tensor& set_mat (int n, const cv::Mat& image); Tensor& set_norm_mat(int n, const cv::Mat& image, float mean[3], float std[3]); cv::Mat at_mat(int n = 0, int c = 0) { return cv::Mat(height(), width(), CV_32F, cpu<float>(n, c)); } Tensor& synchronize(); const char* shape_string() const{return shape_string_;} const char* descriptor() const; Tensor& copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id = CURRENT_DEVICE_ID); /** # 以下代码是python中加载Tensor import numpy as np def load_tensor(file): with open(file, "rb") as f: binary_data = f.read() magic_number, ndims, dtype = np.frombuffer(binary_data, np.uint32, count=3, offset=0) assert magic_number == 0xFCCFE2E2, f"{file} not a tensor file." dims = np.frombuffer(binary_data, np.uint32, count=ndims, offset=3 * 4) if dtype == 0: np_dtype = np.float32 elif dtype == 1: np_dtype = np.float16 else: assert False, f"Unsupport dtype = {dtype}, can not convert to numpy dtype" return np.frombuffer(binary_data, np_dtype, offset=(ndims + 3) * 4).reshape(*dims) **/ bool save_to_file(const std::string& file) const; private: Tensor& compute_shape_string(); Tensor& adajust_memory_by_update_dims_or_type(); void setup_data(std::shared_ptr<MixMemory> data); private: std::vector<int> shape_; size_t bytes_ = 0; DataHead head_ = DataHead::Init; cudaStream_t stream_ = nullptr; int device_id_ = 0; char shape_string_[100]; char descriptor_string_[100]; std::shared_ptr<MixMemory> data_; std::shared_ptr<MixMemory> workspace_; }; Tensor::Tensor(int n, int c, int h, int w, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(n, c, h, w); } Tensor::Tensor(const std::vector<int>& dims, shared_ptr<MixMemory> data, int device_id){ this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(dims); } Tensor::Tensor(int ndims, const int* dims, shared_ptr<MixMemory> data, int device_id) { this->device_id_ = get_device(device_id); descriptor_string_[0] = 0; setup_data(data); resize(ndims, dims); } Tensor::Tensor(shared_ptr<MixMemory> data, int device_id){ shape_string_[0] = 0; descriptor_string_[0] = 0; this->device_id_ = get_device(device_id); setup_data(data); } Tensor::~Tensor() { release(); } const char* Tensor::descriptor() const{ char* descriptor_ptr = (char*)descriptor_string_; int device_id = device(); snprintf(descriptor_ptr, sizeof(descriptor_string_), "Tensor:%p, %s, CUDA:%d", data_.get(), shape_string_, device_id ); return descriptor_ptr; } Tensor& Tensor::compute_shape_string(){ // clean string shape_string_[0] = 0; char* buffer = shape_string_; size_t buffer_size = sizeof(shape_string_); for(int i = 0; i < shape_.size(); ++i){ int size = 0; if(i < shape_.size() - 1) size = snprintf(buffer, buffer_size, "%d x ", shape_[i]); else size = snprintf(buffer, buffer_size, "%d", shape_[i]); buffer += size; buffer_size -= size; } return *this; } void Tensor::setup_data(shared_ptr<MixMemory> data){ data_ = data; if(data_ == nullptr){ data_ = make_shared<MixMemory>(device_id_); }else{ device_id_ = data_->device_id(); } head_ = DataHead::Init; if(data_->cpu()){ head_ = DataHead::Host; } if(data_->gpu()){ head_ = DataHead::Device; } } Tensor& Tensor::copy_from_gpu(size_t offset, const void* src, size_t num_element, int device_id){ if(head_ == DataHead::Init) to_gpu(false); size_t offset_location = offset * element_size(); if(offset_location >= bytes_){ INFOE("Offset location[%lld] >= bytes_[%lld], out of range", offset_location, bytes_); return *this; } size_t copyed_bytes = num_element * element_size(); size_t remain_bytes = bytes_ - offset_location; if(copyed_bytes > remain_bytes){ INFOE("Copyed bytes[%lld] > remain bytes[%lld], out of range", copyed_bytes, remain_bytes); return *this; } if(head_ == DataHead::Device){ int current_device_id = get_device(device_id); int gpu_device_id = device(); if(current_device_id != gpu_device_id){ checkCudaRuntime(cudaMemcpyPeerAsync(gpu<unsigned char>() + offset_location, gpu_device_id, src, current_device_id, copyed_bytes, stream_)); //checkCudaRuntime(cudaMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_)); } else{ checkCudaRuntime(cudaMemcpyAsync(gpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToDevice, stream_)); } }else if(head_ == DataHead::Host){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(cpu<unsigned char>() + offset_location, src, copyed_bytes, cudaMemcpyDeviceToHost, stream_)); }else{ INFOE("Unsupport head type %d", head_); } return *this; } Tensor& Tensor::release() { data_->release_all(); shape_.clear(); bytes_ = 0; head_ = DataHead::Init; return *this; } bool Tensor::empty() const{ return data_->cpu() == nullptr && data_->gpu() == nullptr; } int Tensor::count(int start_axis) const { if(start_axis >= 0 && start_axis < shape_.size()){ int size = 1; for (int i = start_axis; i < shape_.size(); ++i) size *= shape_[i]; return size; }else{ return 0; } } Tensor& Tensor::resize(const std::vector<int>& dims) { return resize(dims.size(), dims.data()); } int Tensor::numel() const{ int value = shape_.empty() ? 0 : 1; for(int i = 0; i < shape_.size(); ++i){ value *= shape_[i]; } return value; } Tensor& Tensor::resize_single_dim(int idim, int size){ Assert(idim >= 0 && idim < shape_.size()); auto new_shape = shape_; new_shape[idim] = size; return resize(new_shape); } Tensor& Tensor::resize(int ndims, const int* dims) { vector<int> setup_dims(ndims); for(int i = 0; i < ndims; ++i){ int dim = dims[i]; if(dim == -1){ Assert(ndims == shape_.size()); dim = shape_[i]; } setup_dims[i] = dim; } this->shape_ = setup_dims; this->adajust_memory_by_update_dims_or_type(); this->compute_shape_string(); return *this; } Tensor& Tensor::adajust_memory_by_update_dims_or_type(){ int needed_size = this->numel() * element_size(); if(needed_size > this->bytes_){ head_ = DataHead::Init; } this->bytes_ = needed_size; return *this; } Tensor& Tensor::synchronize(){ AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaStreamSynchronize(stream_)); return *this; } Tensor& Tensor::to_gpu(bool copy) { if (head_ == DataHead::Device) return *this; head_ = DataHead::Device; data_->gpu(bytes_); if (copy && data_->cpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(data_->gpu(), data_->cpu(), bytes_, cudaMemcpyHostToDevice, stream_)); } return *this; } Tensor& Tensor::to_cpu(bool copy) { if (head_ == DataHead::Host) return *this; head_ = DataHead::Host; data_->cpu(bytes_); if (copy && data_->gpu() != nullptr) { AutoDevice auto_device_exchange(this->device()); checkCudaRuntime(cudaMemcpyAsync(data_->cpu(), data_->gpu(), bytes_, cudaMemcpyDeviceToHost, stream_)); checkCudaRuntime(cudaStreamSynchronize(stream_)); } return *this; } int Tensor::offset_array(size_t size, const int* index_array) const{ Assert(size <= shape_.size()); int value = 0; for(int i = 0; i < shape_.size(); ++i){ if(i < size) value += index_array[i]; if(i + 1 < shape_.size()) value *= shape_[i+1]; } return value; } int Tensor::offset_array(const std::vector<int>& index_array) const{ return offset_array(index_array.size(), index_array.data()); } bool Tensor::save_to_file(const std::string& file) const{ if(empty()) return false; FILE* f = fopen(file.c_str(), "wb"); if(f == nullptr) return false; int ndims = this->ndims(); int dtype_ = 0; unsigned int head[3] = {0xFCCFE2E2, ndims, static_cast<unsigned int>(dtype_)}; fwrite(head, 1, sizeof(head), f); fwrite(shape_.data(), 1, sizeof(shape_[0]) * shape_.size(), f); fwrite(cpu(), 1, bytes_, f); fclose(f); return true; } /////////////////////////////////class TRTInferImpl//////////////////////////////////////////////// class Logger : public ILogger { public: virtual void log(Severity severity, const char* msg) noexcept override { if (severity == Severity::kINTERNAL_ERROR) { INFOE("NVInfer INTERNAL_ERROR: %s", msg); abort(); }else if (severity == Severity::kERROR) { INFOE("NVInfer: %s", msg); } else if (severity == Severity::kWARNING) { INFOW("NVInfer: %s", msg); } else if (severity == Severity::kINFO) { INFOD("NVInfer: %s", msg); } else { INFOD("%s", msg); } } }; static Logger gLogger; template<typename _T> static void destroy_nvidia_pointer(_T* ptr) { if (ptr) ptr->destroy(); } class EngineContext { public: virtual ~EngineContext() { destroy(); } void set_stream(cudaStream_t stream){ if(owner_stream_){ if (stream_) {cudaStreamDestroy(stream_);} owner_stream_ = false; } stream_ = stream; } bool build_model(const void* pdata, size_t size) { destroy(); if(pdata == nullptr || size == 0) return false; owner_stream_ = true; checkCudaRuntime(cudaStreamCreate(&stream_)); if(stream_ == nullptr) return false; runtime_ = shared_ptr<IRuntime>(createInferRuntime(gLogger), destroy_nvidia_pointer<IRuntime>); if (runtime_ == nullptr) return false; engine_ = shared_ptr<ICudaEngine>(runtime_->deserializeCudaEngine(pdata, size, nullptr), destroy_nvidia_pointer<ICudaEngine>); if (engine_ == nullptr) return false; //runtime_->setDLACore(0); context_ = shared_ptr<IExecutionContext>(engine_->createExecutionContext(), destroy_nvidia_pointer<IExecutionContext>); return context_ != nullptr; } private: void destroy() { context_.reset(); engine_.reset(); runtime_.reset(); if(owner_stream_){ if (stream_) {cudaStreamDestroy(stream_);} } stream_ = nullptr; } public: cudaStream_t stream_ = nullptr; bool owner_stream_ = false; shared_ptr<IExecutionContext> context_; shared_ptr<ICudaEngine> engine_; shared_ptr<IRuntime> runtime_ = nullptr; }; class TRTInferImpl{ public: virtual ~TRTInferImpl(); bool load(const std::string& file); bool load_from_memory(const void* pdata, size_t size); void destroy(); void forward(bool sync); int get_max_batch_size(); cudaStream_t get_stream(); void set_stream(cudaStream_t stream); void synchronize(); size_t get_device_memory_size(); std::shared_ptr<MixMemory> get_workspace(); std::shared_ptr<Tensor> input(int index = 0); std::string get_input_name(int index = 0); std::shared_ptr<Tensor> output(int index = 0); std::string get_output_name(int index = 0); std::shared_ptr<Tensor> tensor(const std::string& name); bool is_output_name(const std::string& name); bool is_input_name(const std::string& name); void set_input (int index, std::shared_ptr<Tensor> tensor); void set_output(int index, std::shared_ptr<Tensor> tensor); std::shared_ptr<std::vector<uint8_t>> serial_engine(); void print(); int num_output(); int num_input(); int device(); private: void build_engine_input_and_outputs_mapper(); private: std::vector<std::shared_ptr<Tensor>> inputs_; std::vector<std::shared_ptr<Tensor>> outputs_; std::vector<int> inputs_map_to_ordered_index_; std::vector<int> outputs_map_to_ordered_index_; std::vector<std::string> inputs_name_; std::vector<std::string> outputs_name_; std::vector<std::shared_ptr<Tensor>> orderdBlobs_; std::map<std::string, int> blobsNameMapper_; std::shared_ptr<EngineContext> context_; std::vector<void*> bindingsPtr_; std::shared_ptr<MixMemory> workspace_; int device_ = 0; }; //////////////////////////////////////////////////////////////////////////////////// TRTInferImpl::~TRTInferImpl(){ destroy(); } void TRTInferImpl::destroy() { int old_device = 0; checkCudaRuntime(cudaGetDevice(&old_device)); checkCudaRuntime(cudaSetDevice(device_)); this->context_.reset(); this->blobsNameMapper_.clear(); this->outputs_.clear(); this->inputs_.clear(); this->inputs_name_.clear(); this->outputs_name_.clear(); checkCudaRuntime(cudaSetDevice(old_device)); } void TRTInferImpl::print(){ if(!context_){ INFOW("Infer print, nullptr."); return; } INFO("Infer %p detail", this); INFO("\tMax Batch Size: %d", this->get_max_batch_size()); INFO("\tInputs: %d", inputs_.size()); for(int i = 0; i < inputs_.size(); ++i){ auto& tensor = inputs_[i]; auto& name = inputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } INFO("\tOutputs: %d", outputs_.size()); for(int i = 0; i < outputs_.size(); ++i){ auto& tensor = outputs_[i]; auto& name = outputs_name_[i]; INFO("\t\t%d.%s : shape {%s}", i, name.c_str(), tensor->shape_string()); } } std::shared_ptr<std::vector<uint8_t>> TRTInferImpl::serial_engine() { auto memory = this->context_->engine_->serialize(); auto output = make_shared<std::vector<uint8_t>>((uint8_t*)memory->data(), (uint8_t*)memory->data()+memory->size()); memory->destroy(); return output; } bool TRTInferImpl::load_from_memory(const void* pdata, size_t size) { if (pdata == nullptr || size == 0) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(pdata, size)) { context_.reset(); return false; } workspace_.reset(new MixMemory()); cudaGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } static std::vector<uint8_t> load_file(const string& file){ ifstream in(file, ios::in | ios::binary); if (!in.is_open()) return {}; in.seekg(0, ios::end); size_t length = in.tellg(); std::vector<uint8_t> data; if (length > 0){ in.seekg(0, ios::beg); data.resize(length); in.read((char*)&data[0], length); } in.close(); return data; } bool TRTInferImpl::load(const std::string& file) { auto data = load_file(file); if (data.empty()) return false; context_.reset(new EngineContext()); //build model if (!context_->build_model(data.data(), data.size())) { context_.reset(); return false; } workspace_.reset(new MixMemory()); cudaGetDevice(&device_); build_engine_input_and_outputs_mapper(); return true; } size_t TRTInferImpl::get_device_memory_size() { EngineContext* context = (EngineContext*)this->context_.get(); return context->context_->getEngine().getDeviceMemorySize(); } void TRTInferImpl::build_engine_input_and_outputs_mapper() { EngineContext* context = (EngineContext*)this->context_.get(); int nbBindings = context->engine_->getNbBindings(); int max_batchsize = context->engine_->getMaxBatchSize(); inputs_.clear(); inputs_name_.clear(); outputs_.clear(); outputs_name_.clear(); orderdBlobs_.clear(); bindingsPtr_.clear(); blobsNameMapper_.clear(); for (int i = 0; i < nbBindings; ++i) { auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); const char* bindingName = context->engine_->getBindingName(i); dims.d[0] = max_batchsize; auto newTensor = make_shared<Tensor>(dims.nbDims, dims.d); newTensor->set_stream(this->context_->stream_); newTensor->set_workspace(this->workspace_); if (context->engine_->bindingIsInput(i)) { //if is input inputs_.push_back(newTensor); inputs_name_.push_back(bindingName); inputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } else { //if is output outputs_.push_back(newTensor); outputs_name_.push_back(bindingName); outputs_map_to_ordered_index_.push_back(orderdBlobs_.size()); } blobsNameMapper_[bindingName] = i; orderdBlobs_.push_back(newTensor); } bindingsPtr_.resize(orderdBlobs_.size()); } void TRTInferImpl::set_stream(cudaStream_t stream){ this->context_->set_stream(stream); for(auto& t : orderdBlobs_) t->set_stream(stream); } cudaStream_t TRTInferImpl::get_stream() { return this->context_->stream_; } int TRTInferImpl::device() { return device_; } void TRTInferImpl::synchronize() { checkCudaRuntime(cudaStreamSynchronize(context_->stream_)); } bool TRTInferImpl::is_output_name(const std::string& name){ return std::find(outputs_name_.begin(), outputs_name_.end(), name) != outputs_name_.end(); } bool TRTInferImpl::is_input_name(const std::string& name){ return std::find(inputs_name_.begin(), inputs_name_.end(), name) != inputs_name_.end(); } void TRTInferImpl::forward(bool sync) { EngineContext* context = (EngineContext*)context_.get(); int inputBatchSize = inputs_[0]->size(0); for(int i = 0; i < context->engine_->getNbBindings(); ++i){ auto dims = context->engine_->getBindingDimensions(i); auto type = context->engine_->getBindingDataType(i); dims.d[0] = inputBatchSize; if(context->engine_->bindingIsInput(i)){ context->context_->setBindingDimensions(i, dims); } } for (int i = 0; i < outputs_.size(); ++i) { outputs_[i]->resize_single_dim(0, inputBatchSize); outputs_[i]->to_gpu(false); } for (int i = 0; i < orderdBlobs_.size(); ++i) bindingsPtr_[i] = orderdBlobs_[i]->gpu(); void** bindingsptr = bindingsPtr_.data(); //bool execute_result = context->context_->enqueue(inputBatchSize, bindingsptr, context->stream_, nullptr); bool execute_result = context->context_->enqueueV2(bindingsptr, context->stream_, nullptr); if(!execute_result){ auto code = cudaGetLastError(); INFOF("execute fail, code %d[%s], message %s", code, cudaGetErrorName(code), cudaGetErrorString(code)); } if (sync) { synchronize(); } } std::shared_ptr<MixMemory> TRTInferImpl::get_workspace() { return workspace_; } int TRTInferImpl::num_input() { return this->inputs_.size(); } int TRTInferImpl::num_output() { return this->outputs_.size(); } void TRTInferImpl::set_input (int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < inputs_.size()); this->inputs_[index] = tensor; int order_index = inputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } void TRTInferImpl::set_output(int index, std::shared_ptr<Tensor> tensor){ Assert(index >= 0 && index < outputs_.size()); this->outputs_[index] = tensor; int order_index = outputs_map_to_ordered_index_[index]; this->orderdBlobs_[order_index] = tensor; } std::shared_ptr<Tensor> TRTInferImpl::input(int index) { Assert(index >= 0 && index < inputs_name_.size()); return this->inputs_[index]; } std::string TRTInferImpl::get_input_name(int index){ Assert(index >= 0 && index < inputs_name_.size()); return inputs_name_[index]; } std::shared_ptr<Tensor> TRTInferImpl::output(int index) { Assert(index >= 0 && index < outputs_.size()); return outputs_[index]; } std::string TRTInferImpl::get_output_name(int index){ Assert(index >= 0 && index < outputs_name_.size()); return outputs_name_[index]; } int TRTInferImpl::get_max_batch_size() { Assert(this->context_ != nullptr); return this->context_->engine_->getMaxBatchSize(); } std::shared_ptr<Tensor> TRTInferImpl::tensor(const std::string& name) { Assert(this->blobsNameMapper_.find(name) != this->blobsNameMapper_.end()); return orderdBlobs_[blobsNameMapper_[name]]; } std::shared_ptr<TRTInferImpl> load_infer(const string& file) { std::shared_ptr<TRTInferImpl> infer(new TRTInferImpl()); if (!infer->load(file)) infer.reset(); return infer; } //////////////////////////////class MonopolyAllocator////////////////////////////////////// /* 独占分配器 通过对tensor做独占管理,具有max_batch * 2个tensor,通过query获取一个 当推理结束后,该tensor释放使用权,即可交给下一个图像使用,内存实现复用 */ template<class _ItemType> class MonopolyAllocator{ public: class MonopolyData{ public: std::shared_ptr<_ItemType>& data(){ return data_; } void release(){manager_->release_one(this);} private: MonopolyData(MonopolyAllocator* pmanager){manager_ = pmanager;} private: friend class MonopolyAllocator; MonopolyAllocator* manager_ = nullptr; std::shared_ptr<_ItemType> data_; bool available_ = true; }; typedef std::shared_ptr<MonopolyData> MonopolyDataPointer; MonopolyAllocator(int size){ capacity_ = size; num_available_ = size; datas_.resize(size); for(int i = 0; i < size; ++i) datas_[i] = std::shared_ptr<MonopolyData>(new MonopolyData(this)); } virtual ~MonopolyAllocator(){ run_ = false; cv_.notify_all(); std::unique_lock<std::mutex> l(lock_); cv_exit_.wait(l, [&](){ return num_wait_thread_ == 0; }); } MonopolyDataPointer query(int timeout = 10000){ std::unique_lock<std::mutex> l(lock_); if(!run_) return nullptr; if(num_available_ == 0){ num_wait_thread_++; auto state = cv_.wait_for(l, std::chrono::milliseconds(timeout), [&](){ return num_available_ > 0 || !run_; }); num_wait_thread_--; cv_exit_.notify_one(); // timeout, no available, exit program if(!state || num_available_ == 0 || !run_) return nullptr; } auto item = std::find_if(datas_.begin(), datas_.end(), [](MonopolyDataPointer& item){return item->available_;}); if(item == datas_.end()) return nullptr; (*item)->available_ = false; num_available_--; return *item; } int num_available(){ return num_available_; } int capacity(){ return capacity_; } private: void release_one(MonopolyData* prq){ std::unique_lock<std::mutex> l(lock_); if(!prq->available_){ prq->available_ = true; num_available_++; cv_.notify_one(); } } private: std::mutex lock_; std::condition_variable cv_; std::condition_variable cv_exit_; std::vector<MonopolyDataPointer> datas_; int capacity_ = 0; volatile int num_available_ = 0; volatile int num_wait_thread_ = 0; volatile bool run_ = true; }; /////////////////////////////////////////class ThreadSafedAsyncInfer///////////////////////////////////////////// /* 异步线程安全的推理器 通过异步线程启动,使得调用方允许任意线程调用把图像做输入,并通过future来获取异步结果 */ template<class Input, class Output, class StartParam=std::tuple<std::string, int>, class JobAdditional=int> class ThreadSafedAsyncInfer{ public: struct Job{ Input input; Output output; JobAdditional additional; MonopolyAllocator<Tensor>::MonopolyDataPointer mono_tensor; std::shared_ptr<std::promise<Output>> pro; }; virtual ~ThreadSafedAsyncInfer(){ stop(); } void stop(){ run_ = false; cond_.notify_all(); ////////////////////////////////////////// cleanup jobs { std::unique_lock<std::mutex> l(jobs_lock_); while(!jobs_.empty()){ auto& item = jobs_.front(); if(item.pro) item.pro->set_value(Output()); jobs_.pop(); } }; if(worker_){ worker_->join(); worker_.reset(); } } bool startup(const StartParam& param){ run_ = true; std::promise<bool> pro; start_param_ = param; worker_ = std::make_shared<std::thread>(&ThreadSafedAsyncInfer::worker, this, std::ref(pro)); return pro.get_future().get(); } virtual std::shared_future<Output> commit(const Input& input){ Job job; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, input)){ job.pro->set_value(Output()); return job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); jobs_.push(job); }; cond_.notify_one(); return job.pro->get_future(); } virtual std::vector<std::shared_future<Output>> commits(const std::vector<Input>& inputs){ int batch_size = std::min((int)inputs.size(), this->tensor_allocator_->capacity()); std::vector<Job> jobs(inputs.size()); std::vector<std::shared_future<Output>> results(inputs.size()); int nepoch = (inputs.size() + batch_size - 1) / batch_size; for(int epoch = 0; epoch < nepoch; ++epoch){ int begin = epoch * batch_size; int end = std::min((int)inputs.size(), begin + batch_size); for(int i = begin; i < end; ++i){ Job& job = jobs[i]; job.pro = std::make_shared<std::promise<Output>>(); if(!preprocess(job, inputs[i])){ job.pro->set_value(Output()); } results[i] = job.pro->get_future(); } /////////////////////////////////////////////////////////// { std::unique_lock<std::mutex> l(jobs_lock_); for(int i = begin; i < end; ++i){ jobs_.emplace(std::move(jobs[i])); }; } cond_.notify_one(); } return results; } protected: virtual void worker(std::promise<bool>& result) = 0; virtual bool preprocess(Job& job, const Input& input) = 0; virtual bool get_jobs_and_wait(std::vector<Job>& fetch_jobs, int max_size){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_jobs.clear(); for(int i = 0; i < max_size && !jobs_.empty(); ++i){ fetch_jobs.emplace_back(std::move(jobs_.front())); jobs_.pop(); } return true; } virtual bool get_job_and_wait(Job& fetch_job){ std::unique_lock<std::mutex> l(jobs_lock_); cond_.wait(l, [&](){ return !run_ || !jobs_.empty(); }); if(!run_) return false; fetch_job = std::move(jobs_.front()); jobs_.pop(); return true; } protected: StartParam start_param_; std::atomic<bool> run_; std::mutex jobs_lock_; std::queue<Job> jobs_; std::shared_ptr<std::thread> worker_; std::condition_variable cond_; std::shared_ptr<MonopolyAllocator<Tensor>> tensor_allocator_; }; ///////////////////////////////////class YoloTRTInferImpl////////////////////////////////////// /* Yolo的具体实现 通过上述类的特性,实现预处理的计算重叠、异步垮线程调用,最终拼接为多个图为一个batch进行推理。最大化的利用 显卡性能,实现高性能高可用好用的yolo推理 */ const char* type_name(Type type){ switch(type){ case Type::V5: return "YoloV5"; case Type::X: return "YoloX"; default: return "Unknow"; } } struct AffineMatrix{ float i2d[6]; // image to dst(network), 2x3 matrix float d2i[6]; // dst to image, 2x3 matrix void compute(const cv::Size& from, const cv::Size& to){ float scale_x = to.width / (float)from.width; float scale_y = to.height / (float)from.height; float scale = std::min(scale_x, scale_y); i2d[0] = scale; i2d[1] = 0; i2d[2] = -scale * from.width * 0.5 + to.width * 0.5 + scale * 0.5 - 0.5; i2d[3] = 0; i2d[4] = scale; i2d[5] = -scale * from.height * 0.5 + to.height * 0.5 + scale * 0.5 - 0.5; cv::Mat m2x3_i2d(2, 3, CV_32F, i2d); cv::Mat m2x3_d2i(2, 3, CV_32F, d2i); cv::invertAffineTransform(m2x3_i2d, m2x3_d2i); } cv::Mat i2d_mat(){ return cv::Mat(2, 3, CV_32F, i2d); } }; using ThreadSafedAsyncInferImpl = ThreadSafedAsyncInfer < cv::Mat, // input BoxArray, // output tuple<string, int>, // start param AffineMatrix // additional >; class YoloTRTInferImpl : public Infer, public ThreadSafedAsyncInferImpl{ public: /** 要求在TRTInferImpl里面执行stop,而不是在基类执行stop **/ virtual ~YoloTRTInferImpl(){ stop(); } virtual bool startup(const string& file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ if(type == Type::V5){ normalize_ = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = Norm::mean_std(mean, std, 1/255.0f, ChannelType::Invert); normalize_ = Norm::None(); }else{ INFOE("Unsupport type %d", type); } confidence_threshold_ = confidence_threshold; nms_threshold_ = nms_threshold; return ThreadSafedAsyncInferImpl::startup(make_tuple(file, gpuid)); } virtual void worker(promise<bool>& result) override{ string file = get<0>(start_param_); int gpuid = get<1>(start_param_); set_device(gpuid); auto engine = load_infer(file); if(engine == nullptr){ INFOE("Engine %s load failed", file.c_str()); result.set_value(false); return; } engine->print(); const int MAX_IMAGE_BBOX = 1024; const int NUM_BOX_ELEMENT = 7; // left, top, right, bottom, confidence, class, keepflag Tensor affin_matrix_device; Tensor output_array_device; int max_batch_size = engine->get_max_batch_size(); auto input = engine->tensor("images"); auto output = engine->tensor("output"); int num_classes = output->size(2) - 5; input_width_ = input->size(3); input_height_ = input->size(2); tensor_allocator_ = make_shared<MonopolyAllocator<Tensor>>(max_batch_size * 2); stream_ = engine->get_stream(); gpu_ = gpuid; result.set_value(true); input->resize_single_dim(0, max_batch_size).to_gpu(); affin_matrix_device.set_stream(stream_); // 这里8个值的目的是保证 8 * sizeof(float) % 32 == 0 affin_matrix_device.resize(max_batch_size, 8).to_gpu(); // 这里的 1 + MAX_IMAGE_BBOX结构是,counter + bboxes ... output_array_device.resize(max_batch_size, 1 + MAX_IMAGE_BBOX * NUM_BOX_ELEMENT).to_gpu(); vector<Job> fetch_jobs; while(get_jobs_and_wait(fetch_jobs, max_batch_size)){ int infer_batch_size = fetch_jobs.size(); input->resize_single_dim(0, infer_batch_size); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; auto& mono = job.mono_tensor->data(); affin_matrix_device.copy_from_gpu(affin_matrix_device.offset(ibatch), mono->get_workspace()->gpu(), 6); input->copy_from_gpu(input->offset(ibatch), mono->gpu(), mono->count()); job.mono_tensor->release(); } engine->forward(false); output_array_device.to_gpu(false); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ auto& job = fetch_jobs[ibatch]; float* image_based_output = output->gpu<float>(ibatch); float* output_array_ptr = output_array_device.gpu<float>(ibatch); auto affine_matrix = affin_matrix_device.gpu<float>(ibatch); checkCudaRuntime(cudaMemsetAsync(output_array_ptr, 0, sizeof(int), stream_)); decode_kernel_invoker(image_based_output, output->size(1), num_classes, confidence_threshold_, nms_threshold_, affine_matrix, output_array_ptr, MAX_IMAGE_BBOX, stream_); } output_array_device.to_cpu(); for(int ibatch = 0; ibatch < infer_batch_size; ++ibatch){ float* parray = output_array_device.cpu<float>(ibatch); int count = min(MAX_IMAGE_BBOX, (int)*parray); auto& job = fetch_jobs[ibatch]; auto& image_based_boxes = job.output; for(int i = 0; i < count; ++i){ float* pbox = parray + 1 + i * NUM_BOX_ELEMENT; int label = pbox[5]; int keepflag = pbox[6]; if(keepflag == 1){ image_based_boxes.emplace_back(pbox[0], pbox[1], pbox[2], pbox[3], pbox[4], label); } } job.pro->set_value(image_based_boxes); } fetch_jobs.clear(); } stream_ = nullptr; tensor_allocator_.reset(); INFO("Engine destroy."); } virtual bool preprocess(Job& job, const Mat& image) override{ if(tensor_allocator_ == nullptr){ INFOE("tensor_allocator_ is nullptr"); return false; } job.mono_tensor = tensor_allocator_->query(); if(job.mono_tensor == nullptr){ INFOE("Tensor allocator query failed."); return false; } AutoDevice auto_device(gpu_); auto& tensor = job.mono_tensor->data(); if(tensor == nullptr){ // not init tensor = make_shared<Tensor>(); tensor->set_workspace(make_shared<MixMemory>()); } Size input_size(input_width_, input_height_); job.additional.compute(image.size(), input_size); tensor->set_stream(stream_); tensor->resize(1, 3, input_height_, input_width_); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(job.additional.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; //checkCudaRuntime(cudaMemcpyAsync(image_host, image.data, size_image, cudaMemcpyHostToHost, stream_)); // speed up memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, job.additional.d2i, sizeof(job.additional.d2i)); checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream_)); checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(job.additional.d2i), cudaMemcpyHostToDevice, stream_)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(), input_width_, input_height_, affine_matrix_device, 114, normalize_, stream_ ); return true; } virtual vector<shared_future<BoxArray>> commits(const vector<Mat>& images) override{ return ThreadSafedAsyncInferImpl::commits(images); } virtual std::shared_future<BoxArray> commit(const Mat& image) override{ return ThreadSafedAsyncInferImpl::commit(image); } private: int input_width_ = 0; int input_height_ = 0; int gpu_ = 0; float confidence_threshold_ = 0; float nms_threshold_ = 0; cudaStream_t stream_ = nullptr; Norm normalize_; }; void image_to_tensor(const cv::Mat& image, shared_ptr<Tensor>& tensor, Type type, int ibatch){ Norm normalize; if(type == Type::V5){ normalize = Norm::alpha_beta(1 / 255.0f, 0.0f, ChannelType::SwapRB); }else if(type == Type::X){ //float mean[] = {0.485, 0.456, 0.406}; //float std[] = {0.229, 0.224, 0.225}; //normalize_ = CUDAKernel::Norm::mean_std(mean, std, 1/255.0f, CUDAKernel::ChannelType::Invert); normalize = Norm::None(); }else{ INFOE("Unsupport type %d", type); } Size input_size(tensor->size(3), tensor->size(2)); AffineMatrix affine; affine.compute(image.size(), input_size); size_t size_image = image.cols * image.rows * 3; size_t size_matrix = upbound(sizeof(affine.d2i), 32); auto workspace = tensor->get_workspace(); uint8_t* gpu_workspace = (uint8_t*)workspace->gpu(size_matrix + size_image); float* affine_matrix_device = (float*)gpu_workspace; uint8_t* image_device = size_matrix + gpu_workspace; uint8_t* cpu_workspace = (uint8_t*)workspace->cpu(size_matrix + size_image); float* affine_matrix_host = (float*)cpu_workspace; uint8_t* image_host = size_matrix + cpu_workspace; auto stream = tensor->get_stream(); memcpy(image_host, image.data, size_image); memcpy(affine_matrix_host, affine.d2i, sizeof(affine.d2i)); checkCudaRuntime(cudaMemcpyAsync(image_device, image_host, size_image, cudaMemcpyHostToDevice, stream)); checkCudaRuntime(cudaMemcpyAsync(affine_matrix_device, affine_matrix_host, sizeof(affine.d2i), cudaMemcpyHostToDevice, stream)); warp_affine_bilinear_and_normalize_plane( image_device, image.cols * 3, image.cols, image.rows, tensor->gpu<float>(ibatch), input_size.width, input_size.height, affine_matrix_device, 114, normalize, stream ); } shared_ptr<Infer> create_infer(const string& engine_file, Type type, int gpuid, float confidence_threshold, float nms_threshold){ shared_ptr<YoloTRTInferImpl> instance(new YoloTRTInferImpl()); if(!instance->startup(engine_file, type, gpuid, confidence_threshold, nms_threshold)){ instance.reset(); } return instance; } //////////////////////////////////////Compile Model///////////////////////////////////////////////////////////// const char* mode_string(Mode type) { switch (type) { case Mode::FP32: return "FP32"; case Mode::FP16: return "FP16"; case Mode::INT8: return "INT8"; default: return "UnknowCompileMode"; } } typedef std::function<void(int current, int count, const std::vector<std::string>& files, std::shared_ptr<Tensor>& tensor)> Int8Process; class Int8EntropyCalibrator : public IInt8EntropyCalibrator2{ public: Int8EntropyCalibrator(const vector<string>& imagefiles, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->allimgs_ = imagefiles; this->preprocess_ = preprocess; this->fromCalibratorData_ = false; files_.resize(dims.d[0]); checkCudaRuntime(cudaStreamCreate(&stream_)); } Int8EntropyCalibrator(const vector<uint8_t>& entropyCalibratorData, nvinfer1::Dims dims, const Int8Process& preprocess) { Assert(preprocess != nullptr); this->dims_ = dims; this->entropyCalibratorData_ = entropyCalibratorData; this->preprocess_ = preprocess; this->fromCalibratorData_ = true; files_.resize(dims.d[0]); checkCudaRuntime(cudaStreamCreate(&stream_)); } virtual ~Int8EntropyCalibrator(){ checkCudaRuntime(cudaStreamDestroy(stream_)); } int getBatchSize() const noexcept { return dims_.d[0]; } bool next() { int batch_size = dims_.d[0]; if (cursor_ + batch_size > allimgs_.size()) return false; int old_cursor = cursor_; for(int i = 0; i < batch_size; ++i) files_[i] = allimgs_[cursor_++]; if (!tensor_){ tensor_.reset(new Tensor(dims_.nbDims, dims_.d)); tensor_->set_stream(stream_); tensor_->set_workspace(make_shared<MixMemory>()); } preprocess_(old_cursor, allimgs_.size(), files_, tensor_); return true; } bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept { if (!next()) return false; bindings[0] = tensor_->gpu(); return true; } const vector<uint8_t>& getEntropyCalibratorData() { return entropyCalibratorData_; } const void* readCalibrationCache(size_t& length) noexcept { if (fromCalibratorData_) { length = this->entropyCalibratorData_.size(); return this->entropyCalibratorData_.data(); } length = 0; return nullptr; } virtual void writeCalibrationCache(const void* cache, size_t length) noexcept { entropyCalibratorData_.assign((uint8_t*)cache, (uint8_t*)cache + length); } private: Int8Process preprocess_; vector<string> allimgs_; size_t batchCudaSize_ = 0; int cursor_ = 0; nvinfer1::Dims dims_; vector<string> files_; shared_ptr<Tensor> tensor_; vector<uint8_t> entropyCalibratorData_; bool fromCalibratorData_ = false; cudaStream_t stream_ = nullptr; }; bool compile( Mode mode, Type type, unsigned int max_batch_size, const string& source_onnx, const string& saveto, size_t max_workspace_size, const std::string& int8_images_folder, const std::string& int8_entropy_calibrator_cache_file) { bool hasEntropyCalibrator = false; vector<uint8_t> entropyCalibratorData; vector<string> entropyCalibratorFiles; auto int8process = [=](int current, int count, const vector<string>& files, shared_ptr<Tensor>& tensor){ for(int i = 0; i < files.size(); ++i){ auto& file = files[i]; INFO("Int8 load %d / %d, %s", current + i + 1, count, file.c_str()); auto image = cv::imread(file); if(image.empty()){ INFOE("Load image failed, %s", file.c_str()); continue; } image_to_tensor(image, tensor, type, i); } tensor->synchronize(); }; if (mode == Mode::INT8) { if (!int8_entropy_calibrator_cache_file.empty()) { if (exists(int8_entropy_calibrator_cache_file)) { entropyCalibratorData = load_file(int8_entropy_calibrator_cache_file); if (entropyCalibratorData.empty()) { INFOE("entropyCalibratorFile is set as: %s, but we read is empty.", int8_entropy_calibrator_cache_file.c_str()); return false; } hasEntropyCalibrator = true; } } if (hasEntropyCalibrator) { if (!int8_images_folder.empty()) { INFOW("int8_images_folder is ignore, when int8_entropy_calibrator_cache_file is set"); } } else { entropyCalibratorFiles = glob_image_files(int8_images_folder); if (entropyCalibratorFiles.empty()) { INFOE("Can not find any images(jpg/png/bmp/jpeg/tiff) from directory: %s", int8_images_folder.c_str()); return false; } if(entropyCalibratorFiles.size() < max_batch_size){ INFOW("Too few images provided, %d[provided] < %d[max batch size], image copy will be performed", entropyCalibratorFiles.size(), max_batch_size); for(int i = entropyCalibratorFiles.size(); i < max_batch_size; ++i) entropyCalibratorFiles.push_back(entropyCalibratorFiles[i % entropyCalibratorFiles.size()]); } } } else { if (hasEntropyCalibrator) { INFOW("int8_entropy_calibrator_cache_file is ignore, when Mode is '%s'", mode_string(mode)); } } INFO("Compile %s %s.", mode_string(mode), source_onnx.c_str()); shared_ptr<IBuilder> builder(createInferBuilder(gLogger), destroy_nvidia_pointer<IBuilder>); if (builder == nullptr) { INFOE("Can not create builder."); return false; } shared_ptr<IBuilderConfig> config(builder->createBuilderConfig(), destroy_nvidia_pointer<IBuilderConfig>); if (mode == Mode::FP16) { if (!builder->platformHasFastFp16()) { INFOW("Platform not have fast fp16 support"); } config->setFlag(BuilderFlag::kFP16); } else if (mode == Mode::INT8) { if (!builder->platformHasFastInt8()) { INFOW("Platform not have fast int8 support"); } config->setFlag(BuilderFlag::kINT8); } shared_ptr<INetworkDefinition> network; shared_ptr<nvonnxparser::IParser> onnxParser; const auto explicitBatch = 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH); network = shared_ptr<INetworkDefinition>(builder->createNetworkV2(explicitBatch), destroy_nvidia_pointer<INetworkDefinition>); //from onnx is not markOutput onnxParser.reset(nvonnxparser::createParser(*network, gLogger), destroy_nvidia_pointer<nvonnxparser::IParser>); if (onnxParser == nullptr) { INFOE("Can not create parser."); return false; } if (!onnxParser->parseFromFile(source_onnx.c_str(), 1)) { INFOE("Can not parse OnnX file: %s", source_onnx.c_str()); return false; } auto inputTensor = network->getInput(0); auto inputDims = inputTensor->getDimensions(); shared_ptr<Int8EntropyCalibrator> int8Calibrator; if (mode == Mode::INT8) { auto calibratorDims = inputDims; calibratorDims.d[0] = max_batch_size; if (hasEntropyCalibrator) { INFO("Using exist entropy calibrator data[%d bytes]: %s", entropyCalibratorData.size(), int8_entropy_calibrator_cache_file.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorData, calibratorDims, int8process )); } else { INFO("Using image list[%d files]: %s", entropyCalibratorFiles.size(), int8_images_folder.c_str()); int8Calibrator.reset(new Int8EntropyCalibrator( entropyCalibratorFiles, calibratorDims, int8process )); } config->setInt8Calibrator(int8Calibrator.get()); } INFO("Input shape is %s", join_dims(vector<int>(inputDims.d, inputDims.d + inputDims.nbDims)).c_str()); INFO("Set max batch size = %d", max_batch_size); INFO("Set max workspace size = %.2f MB", max_workspace_size / 1024.0f / 1024.0f); int net_num_input = network->getNbInputs(); INFO("Network has %d inputs:", net_num_input); vector<string> input_names(net_num_input); for(int i = 0; i < net_num_input; ++i){ auto tensor = network->getInput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); input_names[i] = tensor->getName(); } int net_num_output = network->getNbOutputs(); INFO("Network has %d outputs:", net_num_output); for(int i = 0; i < net_num_output; ++i){ auto tensor = network->getOutput(i); auto dims = tensor->getDimensions(); auto dims_str = join_dims(vector<int>(dims.d, dims.d+dims.nbDims)); INFO(" %d.[%s] shape is %s", i, tensor->getName(), dims_str.c_str()); } int net_num_layers = network->getNbLayers(); INFO("Network has %d layers", net_num_layers); builder->setMaxBatchSize(max_batch_size); config->setMaxWorkspaceSize(max_workspace_size); auto profile = builder->createOptimizationProfile(); for(int i = 0; i < net_num_input; ++i){ auto input = network->getInput(i); auto input_dims = input->getDimensions(); input_dims.d[0] = 1; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMIN, input_dims); profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kOPT, input_dims); input_dims.d[0] = max_batch_size; profile->setDimensions(input->getName(), nvinfer1::OptProfileSelector::kMAX, input_dims); } config->addOptimizationProfile(profile); INFO("Building engine..."); auto time_start = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); shared_ptr<ICudaEngine> engine(builder->buildEngineWithConfig(*network, *config), destroy_nvidia_pointer<ICudaEngine>); if (engine == nullptr) { INFOE("engine is nullptr"); return false; } if (mode == Mode::INT8) { if (!hasEntropyCalibrator) { if (!int8_entropy_calibrator_cache_file.empty()) { INFO("Save calibrator to: %s", int8_entropy_calibrator_cache_file.c_str()); save_file(int8_entropy_calibrator_cache_file, int8Calibrator->getEntropyCalibratorData()); } else { INFO("No set entropyCalibratorFile, and entropyCalibrator will not save."); } } } auto time_end = chrono::duration_cast<chrono::milliseconds>(chrono::system_clock::now().time_since_epoch()).count(); INFO("Build done %lld ms !", time_end - time_start); // serialize the engine, then close everything down shared_ptr<IHostMemory> seridata(engine->serialize(), destroy_nvidia_pointer<IHostMemory>); return save_file(saveto, seridata->data(), seridata->size()); } };
901cee6a0edfdef00fcec98c0045dfc73eb2e90f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * @Description: GPU implementation of bp_par_2d_sv.h * @Author: Tianling Lyu * @Date: 2019-12-03 11:42:04 * @LastEditors: Tianling Lyu * @LastEditTime: 2019-12-08 14:57:04 */ #include "include/bp_par_2d_sv.h" #define _USE_MATH_DEFINES #include <cmath> #include "cuda/cuda_common.h" #ifndef M_PI #define M_PI 3.14159265358979323846264338327950288 #define M_PI_4 M_PI/4 #endif #define MAX(x, y) (((x)>(y)) ? (x) : (y)) namespace ct_recon { #ifdef USE_ROCM __global__ void ParallelSingleViewBp2DPixDrivenPrepKernel(double* xcos, double* ysin, const ParallelSingleViewBp2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id % param.na; int ipos = thread_id / param.na; double angle = param.orbit_start + ia * param.orbit; // calculate x*cos(angle) if (ipos < param.nx) { double centx = static_cast<double>(param.nx-1) / 2 + param.offset_x; double posx = (ipos-centx) * param.dx; xcos[ia + ipos*param.na] = posx * cos(angle); } // calculate y*sin(angle) if (ipos < param.ny) { double centy = static_cast<double>(param.ny-1) / 2 + param.offset_y; double posy = (centy-ipos) * param.dy; ysin[ia + ipos*param.na] = posy * sin(angle); } } return; } bool ParallelSingleViewBp2DPixDrivenPrep::calculate_on_gpu(double* xcos, double* ysin, int* buffer, hipStream_t stream) const { int n_elements = param_.na * MAX(param_.nx, param_.ny); CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelSingleViewBp2DPixDrivenPrepKernel) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, xcos, ysin, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <typename T> __global__ void ParallelSingleViewBp2DPixDrivenKernel(const T* proj, T* img, const double* xcos, const double* ysin, ParallelSingleViewBp2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ix = thread_id % param.nx; int iy = thread_id / param.nx; double cents = (static_cast<double>(param.ns-1)) / 2 + param.offset_s; double s, u; int is1, is2; double sum = 0; const double *xcos_ptr = xcos + ix * param.na; const double *ysin_ptr = ysin + iy * param.na; const T* proj_ptr = proj; T* img_ptr = img + thread_id * param.na; // SingleViewBp for (unsigned int ia = 0; ia < param.na; ++ia) { s = (xcos_ptr[ia] + ysin_ptr[ia]) / param.ds + cents; if (s >= 0 && s <= param.ns-1) { // linear interpolation is1 = floor(s); is2 = ceil(s); u = s - is1; *img_ptr = (1-u) * proj_ptr[is1] + u * proj_ptr[is2]; } else { *img_ptr = 0; } ++img_ptr; proj_ptr += param.ns; } // write to image img[thread_id] = sum * param.orbit; } return; } template <> bool ParallelSingleViewBp2DPixDriven<float>::calculate_on_gpu(const float* proj, float* img, const double* xcos, const double* ysin, const int* buffer, hipStream_t stream) const { int n_elements = this->param_.nx*this->param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelSingleViewBp2DPixDrivenKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, proj, img, xcos, ysin, this->param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool ParallelSingleViewBp2DPixDriven<double>::calculate_on_gpu(const double* proj, double* img, const double* xcos, const double* ysin, const int* buffer, hipStream_t stream) const { int n_elements = this->param_.nx*this->param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelSingleViewBp2DPixDrivenKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, proj, img, xcos, ysin, this->param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } #endif } // namespace ct_recon
901cee6a0edfdef00fcec98c0045dfc73eb2e90f.cu
/* * @Description: GPU implementation of bp_par_2d_sv.h * @Author: Tianling Lyu * @Date: 2019-12-03 11:42:04 * @LastEditors: Tianling Lyu * @LastEditTime: 2019-12-08 14:57:04 */ #include "include/bp_par_2d_sv.h" #define _USE_MATH_DEFINES #include <cmath> #include "cuda/cuda_common.h" #ifndef M_PI #define M_PI 3.14159265358979323846264338327950288 #define M_PI_4 M_PI/4 #endif #define MAX(x, y) (((x)>(y)) ? (x) : (y)) namespace ct_recon { #ifdef USE_CUDA __global__ void ParallelSingleViewBp2DPixDrivenPrepKernel(double* xcos, double* ysin, const ParallelSingleViewBp2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id % param.na; int ipos = thread_id / param.na; double angle = param.orbit_start + ia * param.orbit; // calculate x*cos(angle) if (ipos < param.nx) { double centx = static_cast<double>(param.nx-1) / 2 + param.offset_x; double posx = (ipos-centx) * param.dx; xcos[ia + ipos*param.na] = posx * cos(angle); } // calculate y*sin(angle) if (ipos < param.ny) { double centy = static_cast<double>(param.ny-1) / 2 + param.offset_y; double posy = (centy-ipos) * param.dy; ysin[ia + ipos*param.na] = posy * sin(angle); } } return; } bool ParallelSingleViewBp2DPixDrivenPrep::calculate_on_gpu(double* xcos, double* ysin, int* buffer, cudaStream_t stream) const { int n_elements = param_.na * MAX(param_.nx, param_.ny); CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelSingleViewBp2DPixDrivenPrepKernel <<<config.block_count, config.thread_per_block, 0, stream>>> (xcos, ysin, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <typename T> __global__ void ParallelSingleViewBp2DPixDrivenKernel(const T* proj, T* img, const double* xcos, const double* ysin, ParallelSingleViewBp2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ix = thread_id % param.nx; int iy = thread_id / param.nx; double cents = (static_cast<double>(param.ns-1)) / 2 + param.offset_s; double s, u; int is1, is2; double sum = 0; const double *xcos_ptr = xcos + ix * param.na; const double *ysin_ptr = ysin + iy * param.na; const T* proj_ptr = proj; T* img_ptr = img + thread_id * param.na; // SingleViewBp for (unsigned int ia = 0; ia < param.na; ++ia) { s = (xcos_ptr[ia] + ysin_ptr[ia]) / param.ds + cents; if (s >= 0 && s <= param.ns-1) { // linear interpolation is1 = floor(s); is2 = ceil(s); u = s - is1; *img_ptr = (1-u) * proj_ptr[is1] + u * proj_ptr[is2]; } else { *img_ptr = 0; } ++img_ptr; proj_ptr += param.ns; } // write to image img[thread_id] = sum * param.orbit; } return; } template <> bool ParallelSingleViewBp2DPixDriven<float>::calculate_on_gpu(const float* proj, float* img, const double* xcos, const double* ysin, const int* buffer, cudaStream_t stream) const { int n_elements = this->param_.nx*this->param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelSingleViewBp2DPixDrivenKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (proj, img, xcos, ysin, this->param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool ParallelSingleViewBp2DPixDriven<double>::calculate_on_gpu(const double* proj, double* img, const double* xcos, const double* ysin, const int* buffer, cudaStream_t stream) const { int n_elements = this->param_.nx*this->param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelSingleViewBp2DPixDrivenKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (proj, img, xcos, ysin, this->param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } #endif } // namespace ct_recon
48f791df8b89f686f6ca4da64bfa01163683c126.hip
// !!! This is a file automatically generated by hipify!!! #include "mtbs_cu.h" #include <pthread.h> #include "ecm_list.h" #define EPOCH_MAX 64 typedef struct { unsigned short skrid; unsigned short offset:12; unsigned char barid:4; } mAO_t; static pthread_t host_scheduler; static BOOL host_scheduler_done; static hipStream_t strm_host; static unsigned char *mtb_epochs_host; static unsigned char *mtb_epochs_host_alloc; typedef struct { unsigned start, len; struct list_head list; } uprange_t; typedef struct { unsigned n_skruns; skrid_t skrid_start; skrun_t *skruns; mAO_t *mAOTs_host; struct list_head upranges; } htod_copyinfo_t; #define COPYIDX_OTHER() ((copyidx + 1) % 2) static unsigned copyidx; static htod_copyinfo_t copyinfos[2]; typedef struct { skrid_t skrid; unsigned char *epochs; } sched_ctx_t; static pthread_spinlock_t lock; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; #define NEXT_EPOCH(epoch) (((epoch) + 1) % EPOCH_MAX) #define mTB_INDEX_HOST(idx_sm, idx_MTB, idx) ((idx_sm) * n_max_mtbs_per_sm + (idx_MTB) * n_mtbs_per_MTB + idx) #define EPOCH_HOST(idx_sm, idx_MTB, idx) mtb_epochs_host[mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1] #define EPOCH_HOST_ALLOC(idx_sm, idx_MTB, idx) mtb_epochs_host_alloc[mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1] #define mTB_ALLOCOFF_TABLE_EPOCH_HOST(cinfo, epoch) ((cinfo)->mAOTs_host + n_max_mtbs * (epoch)) #define mAO_EPOCH_HOST(cinfo, epoch, idx_sm, idx_MTB, idx) (mTB_ALLOCOFF_TABLE_EPOCH_HOST(cinfo, epoch) + mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1) static skrun_t *g_skruns; static BOOL *g_mtbs_done; static BOOL *skrun_dones; static unsigned skrid_done_min; static unsigned cur_skrid_host; static BOOL checker_done; static pthread_t checker; static mAO_t *g_mAOTs; extern unsigned char *g_mtb_epochs; #include "sched_host.cuh" typedef struct { BOOL locked; unsigned idxs_last[8]; /* 8 should be larger than n_MTBs_per_sm */ unsigned char barid; } sched_sm_t; static sched_sm_t *sched_sms; static unsigned idx_ssm_last; static void lock_sm(unsigned *pidx_sm, unsigned *pidx_MTB) { unsigned idx_ssm_cur = 0, idx_ssm_start; unsigned n_MTBs = n_sm_count * n_MTBs_per_sm; pthread_spin_lock(&lock); idx_ssm_start = idx_ssm_last; while (TRUE) { if (!sched_sms[idx_ssm_last].locked) { sched_sms[idx_ssm_last].locked = TRUE; idx_ssm_cur = idx_ssm_last; idx_ssm_last = (idx_ssm_last + 1) % n_MTBs; break; } idx_ssm_last = (idx_ssm_last + 1) % n_MTBs; if (idx_ssm_last == idx_ssm_start) { pthread_spin_unlock(&lock); usleep(1); pthread_spin_lock(&lock); } } pthread_spin_unlock(&lock); *pidx_sm = idx_ssm_cur % n_sm_count; *pidx_MTB = idx_ssm_cur / n_sm_count; } static void unlock_sm(unsigned idx_sm, unsigned idx_MTB) { pthread_spin_lock(&lock); sched_sms[idx_MTB * n_sm_count + idx_sm].locked = FALSE; pthread_spin_unlock(&lock); } static BOOL find_mtbs_on_sm(unsigned idx_sm, unsigned idx_MTB, unsigned n_mtbs, unsigned char *epochs) { sched_sm_t *ssm = &sched_sms[idx_sm]; unsigned n_mtbs_cur = 0; unsigned idx, idx_start; idx = idx_start = ssm->idxs_last[idx_MTB]; do { int epoch = EPOCH_HOST(idx_sm, idx_MTB, idx + 1); int epoch_alloc = EPOCH_HOST_ALLOC(idx_sm, idx_MTB, idx + 1); /* Next epoch entry should be set to zero to proect overrun */ if (NEXT_EPOCH(NEXT_EPOCH(epoch_alloc)) == epoch) { idx = (idx + 1) % n_mtbs_per_MTB; continue; } epochs[idx] = epoch_alloc; n_mtbs_cur++; idx = (idx + 1) % n_mtbs_per_MTB; if (n_mtbs_cur == n_mtbs) { ssm->idxs_last[idx_MTB] = idx; return TRUE; } } while (idx != idx_start); return FALSE; } static uprange_t * create_uprange(unsigned up_idx) { uprange_t *ur; ur = (uprange_t *)malloc(sizeof(uprange_t)); ur->start = up_idx; ur->len = 1; return ur; } static BOOL get_sibling_upranges(htod_copyinfo_t *cinfo, unsigned up_idx, uprange_t **pprev, uprange_t **pnext) { uprange_t *prev = NULL; struct list_head *lp; list_for_each (lp, &cinfo->upranges) { uprange_t *ur = list_entry(lp, uprange_t, list); if (prev && prev->start <= up_idx && up_idx < prev->start + prev->len) return FALSE; if (up_idx == ur->start) return FALSE; if (up_idx < ur->start) { *pprev = prev; *pnext = ur; return TRUE; } prev = ur; } *pnext = NULL; *pprev = prev; if (prev && prev->start <= up_idx && up_idx < prev->start + prev->len) return FALSE; return TRUE; } static void apply_mAOT_uprange(unsigned char epoch, unsigned idx_sm, unsigned idx_MTB, unsigned idx, skrid_t skrid, unsigned short offset, unsigned char barid) { htod_copyinfo_t *cinfo; unsigned up_idx; uprange_t *ur_new; uprange_t *prev, *next; mAO_t *mAO; pthread_spin_lock(&lock); up_idx = n_max_mtbs * epoch + mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1; cinfo = copyinfos + COPYIDX_OTHER(); mAO = mAO_EPOCH_HOST(cinfo, epoch, idx_sm, idx_MTB, idx); mAO->skrid = skrid; mAO->offset = offset; mAO->barid = barid; if (!get_sibling_upranges(cinfo, up_idx, &prev, &next)) { pthread_spin_unlock(&lock); return; } if (prev) { if (prev->start + prev->len == up_idx) prev->len++; else prev = NULL; } if (prev) { if (next) { if (prev->start + prev->len == next->start) { prev->len += next->len; list_del(&next->list); free(next); } } pthread_spin_unlock(&lock); return; } if (next) { if (up_idx == next->start - 1) { next->start = up_idx; next->len++; pthread_spin_unlock(&lock); return; } } ur_new = create_uprange(up_idx); if (next) list_add_tail(&ur_new->list, &next->list); else list_add_tail(&ur_new->list, &cinfo->upranges); pthread_spin_unlock(&lock); } static void set_mtbs_skrid(sched_ctx_t *pctx, unsigned idx_sm, unsigned idx_MTB, unsigned n_mtbs, unsigned char barid, unsigned char *epochs) { unsigned n_mtbs_cur = 0; unsigned short offbase = 0; unsigned i; for (i = 1; i <= n_mtbs_per_MTB; i++) { unsigned char epoch = epochs[i - 1]; if (epoch == EPOCH_MAX) continue; apply_mAOT_uprange(epoch, idx_sm, idx_MTB, i, pctx->skrid, offbase, barid); apply_mAOT_uprange(NEXT_EPOCH(epoch), idx_sm, idx_MTB, i, 0, (unsigned short)-1, barid); EPOCH_HOST_ALLOC(idx_sm, idx_MTB, i) = NEXT_EPOCH(epoch); n_mtbs_cur++; if (n_mtbs_cur == n_mtbs) return; offbase++; } } static BOOL assign_tb_by_rr(sched_ctx_t *pctx, unsigned n_mtbs) { unsigned i; for (i = 0; i < n_sm_count; i++) { unsigned idx_sm, idx_MTB; lock_sm(&idx_sm, &idx_MTB); memset(pctx->epochs, EPOCH_MAX, n_mtbs_per_MTB); if (find_mtbs_on_sm(idx_sm, idx_MTB, n_mtbs, pctx->epochs)) { unsigned char barid = 16; if (n_mtbs > 1) { barid = sched_sms[idx_sm].barid; sched_sms[idx_sm].barid = (barid + 1) % 16; } set_mtbs_skrid(pctx, idx_sm, idx_MTB, n_mtbs, barid, pctx->epochs); unlock_sm(idx_sm, idx_MTB); return TRUE; } unlock_sm(idx_sm, idx_MTB); } return FALSE; } static void reload_epochs(void) { cuMemcpyDtoHAsync(mtb_epochs_host, (hipDeviceptr_t)g_mtb_epochs, n_max_mtbs, strm_host); hipStreamSynchronize(strm_host); } static void assign_tb_host(sched_ctx_t *pctx, unsigned n_mtbs_per_tb) { do { if (assign_tb_by_rr(pctx, n_mtbs_per_tb)) return; reload_epochs(); } while (TRUE); } static void init_sched_ctx(sched_ctx_t *pctx, skrid_t skrid) { pctx->skrid = skrid; pctx->epochs = (unsigned char *)malloc(n_mtbs_per_MTB); } static void fini_sched_ctx(sched_ctx_t *pctx) { free(pctx->epochs); } static void schedule_mtbs(skrid_t skrid, unsigned n_mtbs_per_tb) { sched_ctx_t ctx; init_sched_ctx(&ctx, skrid); assign_tb_host(&ctx, n_mtbs_per_tb); fini_sched_ctx(&ctx); } static sk_t submit_skrun_host(vstream_t vstream, skrun_t *skr) { skrid_t skrid; htod_copyinfo_t *cinfo; pthread_spin_lock(&lock); while (skrid_done_min == (cur_skrid_host + 1) % n_queued_kernels) { /* full */ pthread_spin_unlock(&lock); usleep(1000); pthread_spin_lock(&lock); } skrid = cur_skrid_host + 1; cinfo = copyinfos + COPYIDX_OTHER(); if (cinfo->n_skruns == 0) cinfo->skrid_start = skrid; memcpy(cinfo->skruns + cinfo->n_skruns, skr, sizeof(skrun_t)); cinfo->n_skruns++; cur_skrid_host = (cur_skrid_host + 1) % n_queued_kernels; pthread_spin_unlock(&lock); ////TODO skrun_dones[skrid - 1] = FALSE; schedule_mtbs(skrid, skr->n_mtbs_per_tb); return (sk_t)(long long)skrid; } static void wait_skrun_host(sk_t sk, vstream_t vstream, int *pres) { skrun_t *skr; skrid_t skrid = (skrid_t)(long long)sk; pthread_mutex_lock(&mutex); while (!checker_done && !skrun_dones[skrid - 1]) pthread_cond_wait(&cond, &mutex); pthread_mutex_unlock(&mutex); skr = g_skruns + (skrid - 1); cuMemcpyDtoHAsync(pres, (hipDeviceptr_t)&skr->res, sizeof(int), strm_host); hipStreamSynchronize(strm_host); } static void run_copycat(hipStream_t strm) { htod_copyinfo_t *cinfo; BOOL sync_required = FALSE; struct list_head *lp, *next; cinfo = copyinfos + copyidx; if (cinfo->n_skruns != 0) { hipError_t res = cuMemcpyHtoDAsync((hipDeviceptr_t)(g_skruns + cinfo->skrid_start - 1), cinfo->skruns, sizeof(skrun_t) * cinfo->n_skruns, strm); sync_required = TRUE; cinfo->n_skruns = 0; } list_for_each_n (lp, &cinfo->upranges, next) { uprange_t *ur = list_entry(lp, uprange_t, list); cuMemcpyHtoDAsync((hipDeviceptr_t)(g_mAOTs + ur->start), cinfo->mAOTs_host + ur->start, ur->len * sizeof(mAO_t), strm); sync_required = TRUE; list_del(&ur->list); free(ur); } if (sync_required) hipStreamSynchronize(strm); pthread_spin_lock(&lock); copyidx = COPYIDX_OTHER(); pthread_spin_unlock(&lock); } static void * htod_copycat_func(void *arg) { hipCtxSetCurrent(context); while (!host_scheduler_done) { run_copycat(strm_host); usleep(10); } return NULL; } static void notify_done_skruns(unsigned n_checks) { unsigned min_new = skrid_done_min; BOOL notify = FALSE; unsigned i, idx; idx = skrid_done_min; for (i = 0; i < n_checks; i++) { if (!skrun_dones[idx]) { if (g_mtbs_done[idx]) { notify = TRUE; skrun_dones[idx] = TRUE; g_mtbs_done[idx] = FALSE; } } if (skrun_dones[idx]) { if (min_new == idx) { min_new = (min_new + 1) % n_queued_kernels; notify = TRUE; } } idx = (idx + 1) % n_queued_kernels; } skrid_done_min = min_new; if (notify) pthread_cond_broadcast(&cond); } static void * skruns_checkfunc(void *arg) { while (!checker_done) { unsigned n_checks = (cur_skrid_host + n_queued_kernels - skrid_done_min) % n_queued_kernels; pthread_mutex_lock(&mutex); if (n_checks > 0) { notify_done_skruns(n_checks); } pthread_mutex_unlock(&mutex); usleep(100); } return NULL; } static void init_skrun_host(void) { void *params[4]; int i; hipStreamCreate__(&strm_host, hipStreamNonBlocking); g_skruns = (skrun_t *)mtbs_cudaMalloc(sizeof(skrun_t) * n_queued_kernels); hipMemAllocHost((void **)&g_mtbs_done, sizeof(BOOL) * n_queued_kernels); for (i = 0; i < n_queued_kernels; i++) g_mtbs_done[i] = FALSE; skrun_dones = (BOOL *)calloc(n_queued_kernels, sizeof(BOOL)); pthread_create(&checker, NULL, skruns_checkfunc, NULL); g_mAOTs = (mAO_t *)mtbs_cudaMalloc(EPOCH_MAX * n_max_mtbs * sizeof(mAO_t)); g_mtb_epochs = (unsigned char *)mtbs_cudaMalloc(n_max_mtbs); params[0] = &g_mAOTs; params[1] = &g_mtb_epochs; params[2] = &g_skruns; params[3] = &g_mtbs_done; if (!invoke_kernel_func("func_init_skrun_host", params)) { exit(12); } mtb_epochs_host = (unsigned char *)malloc(n_max_mtbs); mtb_epochs_host_alloc = (unsigned char *)malloc(n_max_mtbs); for (i = 0; i < n_max_mtbs; i++) { mtb_epochs_host[i] = 0; mtb_epochs_host_alloc[i] = 0; } for (i = 0; i < 2; i++) { copyinfos[i].n_skruns = 0; copyinfos[i].skruns = (skrun_t *)malloc(sizeof(skrun_t) * n_queued_kernels); copyinfos[i].mAOTs_host = (mAO_t *)malloc(EPOCH_MAX * n_max_mtbs * sizeof(mAO_t)); INIT_LIST_HEAD(&copyinfos[i].upranges); memset(copyinfos[i].mAOTs_host, 0, sizeof(mAO_t) * n_max_mtbs * EPOCH_MAX); } pthread_spin_init(&lock, 0); sched_sms = (sched_sm_t *)calloc(n_sm_count * n_MTBs_per_sm, sizeof(sched_sm_t)); pthread_create(&host_scheduler, NULL, htod_copycat_func, NULL); } static void fini_skrun_host(void) { void *retval; host_scheduler_done = TRUE; pthread_join(host_scheduler, &retval); checker_done = TRUE; pthread_join(checker, &retval); mtbs_cudaFree(g_skruns); mtbs_cudaFree(g_mAOTs); mtbs_cudaFree(g_mtb_epochs); } sched_t sched_sd_host = { "host", TBS_TYPE_SD_HOST, "func_macro_TB_host", init_skrun_host, fini_skrun_host, submit_skrun_host, wait_skrun_host, };
48f791df8b89f686f6ca4da64bfa01163683c126.cu
#include "mtbs_cu.h" #include <pthread.h> #include "ecm_list.h" #define EPOCH_MAX 64 typedef struct { unsigned short skrid; unsigned short offset:12; unsigned char barid:4; } mAO_t; static pthread_t host_scheduler; static BOOL host_scheduler_done; static CUstream strm_host; static unsigned char *mtb_epochs_host; static unsigned char *mtb_epochs_host_alloc; typedef struct { unsigned start, len; struct list_head list; } uprange_t; typedef struct { unsigned n_skruns; skrid_t skrid_start; skrun_t *skruns; mAO_t *mAOTs_host; struct list_head upranges; } htod_copyinfo_t; #define COPYIDX_OTHER() ((copyidx + 1) % 2) static unsigned copyidx; static htod_copyinfo_t copyinfos[2]; typedef struct { skrid_t skrid; unsigned char *epochs; } sched_ctx_t; static pthread_spinlock_t lock; static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; #define NEXT_EPOCH(epoch) (((epoch) + 1) % EPOCH_MAX) #define mTB_INDEX_HOST(idx_sm, idx_MTB, idx) ((idx_sm) * n_max_mtbs_per_sm + (idx_MTB) * n_mtbs_per_MTB + idx) #define EPOCH_HOST(idx_sm, idx_MTB, idx) mtb_epochs_host[mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1] #define EPOCH_HOST_ALLOC(idx_sm, idx_MTB, idx) mtb_epochs_host_alloc[mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1] #define mTB_ALLOCOFF_TABLE_EPOCH_HOST(cinfo, epoch) ((cinfo)->mAOTs_host + n_max_mtbs * (epoch)) #define mAO_EPOCH_HOST(cinfo, epoch, idx_sm, idx_MTB, idx) (mTB_ALLOCOFF_TABLE_EPOCH_HOST(cinfo, epoch) + mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1) static skrun_t *g_skruns; static BOOL *g_mtbs_done; static BOOL *skrun_dones; static unsigned skrid_done_min; static unsigned cur_skrid_host; static BOOL checker_done; static pthread_t checker; static mAO_t *g_mAOTs; extern unsigned char *g_mtb_epochs; #include "sched_host.cuh" typedef struct { BOOL locked; unsigned idxs_last[8]; /* 8 should be larger than n_MTBs_per_sm */ unsigned char barid; } sched_sm_t; static sched_sm_t *sched_sms; static unsigned idx_ssm_last; static void lock_sm(unsigned *pidx_sm, unsigned *pidx_MTB) { unsigned idx_ssm_cur = 0, idx_ssm_start; unsigned n_MTBs = n_sm_count * n_MTBs_per_sm; pthread_spin_lock(&lock); idx_ssm_start = idx_ssm_last; while (TRUE) { if (!sched_sms[idx_ssm_last].locked) { sched_sms[idx_ssm_last].locked = TRUE; idx_ssm_cur = idx_ssm_last; idx_ssm_last = (idx_ssm_last + 1) % n_MTBs; break; } idx_ssm_last = (idx_ssm_last + 1) % n_MTBs; if (idx_ssm_last == idx_ssm_start) { pthread_spin_unlock(&lock); usleep(1); pthread_spin_lock(&lock); } } pthread_spin_unlock(&lock); *pidx_sm = idx_ssm_cur % n_sm_count; *pidx_MTB = idx_ssm_cur / n_sm_count; } static void unlock_sm(unsigned idx_sm, unsigned idx_MTB) { pthread_spin_lock(&lock); sched_sms[idx_MTB * n_sm_count + idx_sm].locked = FALSE; pthread_spin_unlock(&lock); } static BOOL find_mtbs_on_sm(unsigned idx_sm, unsigned idx_MTB, unsigned n_mtbs, unsigned char *epochs) { sched_sm_t *ssm = &sched_sms[idx_sm]; unsigned n_mtbs_cur = 0; unsigned idx, idx_start; idx = idx_start = ssm->idxs_last[idx_MTB]; do { int epoch = EPOCH_HOST(idx_sm, idx_MTB, idx + 1); int epoch_alloc = EPOCH_HOST_ALLOC(idx_sm, idx_MTB, idx + 1); /* Next epoch entry should be set to zero to proect overrun */ if (NEXT_EPOCH(NEXT_EPOCH(epoch_alloc)) == epoch) { idx = (idx + 1) % n_mtbs_per_MTB; continue; } epochs[idx] = epoch_alloc; n_mtbs_cur++; idx = (idx + 1) % n_mtbs_per_MTB; if (n_mtbs_cur == n_mtbs) { ssm->idxs_last[idx_MTB] = idx; return TRUE; } } while (idx != idx_start); return FALSE; } static uprange_t * create_uprange(unsigned up_idx) { uprange_t *ur; ur = (uprange_t *)malloc(sizeof(uprange_t)); ur->start = up_idx; ur->len = 1; return ur; } static BOOL get_sibling_upranges(htod_copyinfo_t *cinfo, unsigned up_idx, uprange_t **pprev, uprange_t **pnext) { uprange_t *prev = NULL; struct list_head *lp; list_for_each (lp, &cinfo->upranges) { uprange_t *ur = list_entry(lp, uprange_t, list); if (prev && prev->start <= up_idx && up_idx < prev->start + prev->len) return FALSE; if (up_idx == ur->start) return FALSE; if (up_idx < ur->start) { *pprev = prev; *pnext = ur; return TRUE; } prev = ur; } *pnext = NULL; *pprev = prev; if (prev && prev->start <= up_idx && up_idx < prev->start + prev->len) return FALSE; return TRUE; } static void apply_mAOT_uprange(unsigned char epoch, unsigned idx_sm, unsigned idx_MTB, unsigned idx, skrid_t skrid, unsigned short offset, unsigned char barid) { htod_copyinfo_t *cinfo; unsigned up_idx; uprange_t *ur_new; uprange_t *prev, *next; mAO_t *mAO; pthread_spin_lock(&lock); up_idx = n_max_mtbs * epoch + mTB_INDEX_HOST(idx_sm, idx_MTB, idx) - 1; cinfo = copyinfos + COPYIDX_OTHER(); mAO = mAO_EPOCH_HOST(cinfo, epoch, idx_sm, idx_MTB, idx); mAO->skrid = skrid; mAO->offset = offset; mAO->barid = barid; if (!get_sibling_upranges(cinfo, up_idx, &prev, &next)) { pthread_spin_unlock(&lock); return; } if (prev) { if (prev->start + prev->len == up_idx) prev->len++; else prev = NULL; } if (prev) { if (next) { if (prev->start + prev->len == next->start) { prev->len += next->len; list_del(&next->list); free(next); } } pthread_spin_unlock(&lock); return; } if (next) { if (up_idx == next->start - 1) { next->start = up_idx; next->len++; pthread_spin_unlock(&lock); return; } } ur_new = create_uprange(up_idx); if (next) list_add_tail(&ur_new->list, &next->list); else list_add_tail(&ur_new->list, &cinfo->upranges); pthread_spin_unlock(&lock); } static void set_mtbs_skrid(sched_ctx_t *pctx, unsigned idx_sm, unsigned idx_MTB, unsigned n_mtbs, unsigned char barid, unsigned char *epochs) { unsigned n_mtbs_cur = 0; unsigned short offbase = 0; unsigned i; for (i = 1; i <= n_mtbs_per_MTB; i++) { unsigned char epoch = epochs[i - 1]; if (epoch == EPOCH_MAX) continue; apply_mAOT_uprange(epoch, idx_sm, idx_MTB, i, pctx->skrid, offbase, barid); apply_mAOT_uprange(NEXT_EPOCH(epoch), idx_sm, idx_MTB, i, 0, (unsigned short)-1, barid); EPOCH_HOST_ALLOC(idx_sm, idx_MTB, i) = NEXT_EPOCH(epoch); n_mtbs_cur++; if (n_mtbs_cur == n_mtbs) return; offbase++; } } static BOOL assign_tb_by_rr(sched_ctx_t *pctx, unsigned n_mtbs) { unsigned i; for (i = 0; i < n_sm_count; i++) { unsigned idx_sm, idx_MTB; lock_sm(&idx_sm, &idx_MTB); memset(pctx->epochs, EPOCH_MAX, n_mtbs_per_MTB); if (find_mtbs_on_sm(idx_sm, idx_MTB, n_mtbs, pctx->epochs)) { unsigned char barid = 16; if (n_mtbs > 1) { barid = sched_sms[idx_sm].barid; sched_sms[idx_sm].barid = (barid + 1) % 16; } set_mtbs_skrid(pctx, idx_sm, idx_MTB, n_mtbs, barid, pctx->epochs); unlock_sm(idx_sm, idx_MTB); return TRUE; } unlock_sm(idx_sm, idx_MTB); } return FALSE; } static void reload_epochs(void) { cuMemcpyDtoHAsync(mtb_epochs_host, (CUdeviceptr)g_mtb_epochs, n_max_mtbs, strm_host); cuStreamSynchronize(strm_host); } static void assign_tb_host(sched_ctx_t *pctx, unsigned n_mtbs_per_tb) { do { if (assign_tb_by_rr(pctx, n_mtbs_per_tb)) return; reload_epochs(); } while (TRUE); } static void init_sched_ctx(sched_ctx_t *pctx, skrid_t skrid) { pctx->skrid = skrid; pctx->epochs = (unsigned char *)malloc(n_mtbs_per_MTB); } static void fini_sched_ctx(sched_ctx_t *pctx) { free(pctx->epochs); } static void schedule_mtbs(skrid_t skrid, unsigned n_mtbs_per_tb) { sched_ctx_t ctx; init_sched_ctx(&ctx, skrid); assign_tb_host(&ctx, n_mtbs_per_tb); fini_sched_ctx(&ctx); } static sk_t submit_skrun_host(vstream_t vstream, skrun_t *skr) { skrid_t skrid; htod_copyinfo_t *cinfo; pthread_spin_lock(&lock); while (skrid_done_min == (cur_skrid_host + 1) % n_queued_kernels) { /* full */ pthread_spin_unlock(&lock); usleep(1000); pthread_spin_lock(&lock); } skrid = cur_skrid_host + 1; cinfo = copyinfos + COPYIDX_OTHER(); if (cinfo->n_skruns == 0) cinfo->skrid_start = skrid; memcpy(cinfo->skruns + cinfo->n_skruns, skr, sizeof(skrun_t)); cinfo->n_skruns++; cur_skrid_host = (cur_skrid_host + 1) % n_queued_kernels; pthread_spin_unlock(&lock); ////TODO skrun_dones[skrid - 1] = FALSE; schedule_mtbs(skrid, skr->n_mtbs_per_tb); return (sk_t)(long long)skrid; } static void wait_skrun_host(sk_t sk, vstream_t vstream, int *pres) { skrun_t *skr; skrid_t skrid = (skrid_t)(long long)sk; pthread_mutex_lock(&mutex); while (!checker_done && !skrun_dones[skrid - 1]) pthread_cond_wait(&cond, &mutex); pthread_mutex_unlock(&mutex); skr = g_skruns + (skrid - 1); cuMemcpyDtoHAsync(pres, (CUdeviceptr)&skr->res, sizeof(int), strm_host); cuStreamSynchronize(strm_host); } static void run_copycat(CUstream strm) { htod_copyinfo_t *cinfo; BOOL sync_required = FALSE; struct list_head *lp, *next; cinfo = copyinfos + copyidx; if (cinfo->n_skruns != 0) { CUresult res = cuMemcpyHtoDAsync((CUdeviceptr)(g_skruns + cinfo->skrid_start - 1), cinfo->skruns, sizeof(skrun_t) * cinfo->n_skruns, strm); sync_required = TRUE; cinfo->n_skruns = 0; } list_for_each_n (lp, &cinfo->upranges, next) { uprange_t *ur = list_entry(lp, uprange_t, list); cuMemcpyHtoDAsync((CUdeviceptr)(g_mAOTs + ur->start), cinfo->mAOTs_host + ur->start, ur->len * sizeof(mAO_t), strm); sync_required = TRUE; list_del(&ur->list); free(ur); } if (sync_required) cuStreamSynchronize(strm); pthread_spin_lock(&lock); copyidx = COPYIDX_OTHER(); pthread_spin_unlock(&lock); } static void * htod_copycat_func(void *arg) { cuCtxSetCurrent(context); while (!host_scheduler_done) { run_copycat(strm_host); usleep(10); } return NULL; } static void notify_done_skruns(unsigned n_checks) { unsigned min_new = skrid_done_min; BOOL notify = FALSE; unsigned i, idx; idx = skrid_done_min; for (i = 0; i < n_checks; i++) { if (!skrun_dones[idx]) { if (g_mtbs_done[idx]) { notify = TRUE; skrun_dones[idx] = TRUE; g_mtbs_done[idx] = FALSE; } } if (skrun_dones[idx]) { if (min_new == idx) { min_new = (min_new + 1) % n_queued_kernels; notify = TRUE; } } idx = (idx + 1) % n_queued_kernels; } skrid_done_min = min_new; if (notify) pthread_cond_broadcast(&cond); } static void * skruns_checkfunc(void *arg) { while (!checker_done) { unsigned n_checks = (cur_skrid_host + n_queued_kernels - skrid_done_min) % n_queued_kernels; pthread_mutex_lock(&mutex); if (n_checks > 0) { notify_done_skruns(n_checks); } pthread_mutex_unlock(&mutex); usleep(100); } return NULL; } static void init_skrun_host(void) { void *params[4]; int i; cuStreamCreate(&strm_host, CU_STREAM_NON_BLOCKING); g_skruns = (skrun_t *)mtbs_cudaMalloc(sizeof(skrun_t) * n_queued_kernels); cuMemAllocHost((void **)&g_mtbs_done, sizeof(BOOL) * n_queued_kernels); for (i = 0; i < n_queued_kernels; i++) g_mtbs_done[i] = FALSE; skrun_dones = (BOOL *)calloc(n_queued_kernels, sizeof(BOOL)); pthread_create(&checker, NULL, skruns_checkfunc, NULL); g_mAOTs = (mAO_t *)mtbs_cudaMalloc(EPOCH_MAX * n_max_mtbs * sizeof(mAO_t)); g_mtb_epochs = (unsigned char *)mtbs_cudaMalloc(n_max_mtbs); params[0] = &g_mAOTs; params[1] = &g_mtb_epochs; params[2] = &g_skruns; params[3] = &g_mtbs_done; if (!invoke_kernel_func("func_init_skrun_host", params)) { exit(12); } mtb_epochs_host = (unsigned char *)malloc(n_max_mtbs); mtb_epochs_host_alloc = (unsigned char *)malloc(n_max_mtbs); for (i = 0; i < n_max_mtbs; i++) { mtb_epochs_host[i] = 0; mtb_epochs_host_alloc[i] = 0; } for (i = 0; i < 2; i++) { copyinfos[i].n_skruns = 0; copyinfos[i].skruns = (skrun_t *)malloc(sizeof(skrun_t) * n_queued_kernels); copyinfos[i].mAOTs_host = (mAO_t *)malloc(EPOCH_MAX * n_max_mtbs * sizeof(mAO_t)); INIT_LIST_HEAD(&copyinfos[i].upranges); memset(copyinfos[i].mAOTs_host, 0, sizeof(mAO_t) * n_max_mtbs * EPOCH_MAX); } pthread_spin_init(&lock, 0); sched_sms = (sched_sm_t *)calloc(n_sm_count * n_MTBs_per_sm, sizeof(sched_sm_t)); pthread_create(&host_scheduler, NULL, htod_copycat_func, NULL); } static void fini_skrun_host(void) { void *retval; host_scheduler_done = TRUE; pthread_join(host_scheduler, &retval); checker_done = TRUE; pthread_join(checker, &retval); mtbs_cudaFree(g_skruns); mtbs_cudaFree(g_mAOTs); mtbs_cudaFree(g_mtb_epochs); } sched_t sched_sd_host = { "host", TBS_TYPE_SD_HOST, "func_macro_TB_host", init_skrun_host, fini_skrun_host, submit_skrun_host, wait_skrun_host, };
446fdf576663e51aced3180f373587a716f539b2.hip
// !!! This is a file automatically generated by hipify!!! #include <Material.h> #define RANDVEC3 vec3(hiprand_uniform(local_rand_state),hiprand_uniform(local_rand_state),hiprand_uniform(local_rand_state)) __device__ vec3 random_in_unit_sphere(hiprandState_t *local_rand_state) { vec3 p; do { p = 2.0f*RANDVEC3 - vec3(1,1,1); } while (p.squared_length() >= 1.0f); return p; } __device__ float schlick(float cosine, float ref_idx) { float r0 = (1.0f-ref_idx) / (1.0f+ref_idx); r0 = r0*r0; return r0 + (1.0f-r0)*pow((1.0f - cosine),5.0f); } __device__ vec3 reflect(const vec3& v,const vec3& n){ return v - 2.0f*dot(v,n)*n; } __device__ bool refract(const vec3& v, const vec3& n, float ni_over_nt, vec3& refracted) { vec3 uv = unit_vector(v); float dt = dot(uv, n); float discriminant = 1.0f - ni_over_nt*ni_over_nt*(1-dt*dt); if (discriminant > 0) { refracted = ni_over_nt*(uv - n*dt) - n*sqrt(discriminant); return true; } else return false; } __device__ lambertian::lambertian(const vec3& a) : albedo(a) {} __device__ bool lambertian::scatter(const ray&r_in,const hit_record&rec,vec3& attenuation,ray& scattered,hiprandState_t *local_rand_state) const{ vec3 target = rec.p + rec.normal + random_in_unit_sphere(local_rand_state); scattered = ray(rec.p, target-rec.p); attenuation = albedo; return true; } __device__ metal::metal(const vec3& a,float f) : albedo(a), fuzz(f) {} __device__ bool metal::scatter(const ray&r_in,const hit_record&rec,vec3& attenuation,ray& scattered,hiprandState_t *local_rand_state) const{ vec3 reflected = reflect(unit_vector(r_in.direction()),rec.normal); scattered = ray(rec.p, reflected + fuzz*random_in_unit_sphere(local_rand_state)); attenuation = albedo; return (dot(scattered.direction(),rec.normal) > 0.0f); // We dont want rays coming from inside of the sphere by any chance. } __device__ dielectric::dielectric(float ri) : ref_idx(ri) {} __device__ bool dielectric::scatter(const ray& r_in, const hit_record& rec, vec3& attenuation, ray& scattered, hiprandState_t * local_rand_state) const { vec3 outward_normal; vec3 reflected = reflect(r_in.direction(), rec.normal); float ni_over_nt; attenuation = vec3(1.0f, 1.0f, 1.0f); vec3 refracted; float reflect_prob; float cosine; if (dot(r_in.direction(), rec.normal) > 0.0f) { outward_normal = -rec.normal; ni_over_nt = ref_idx; cosine = dot(r_in.direction(), rec.normal) / r_in.direction().length(); cosine = sqrt(1.0f - ref_idx*ref_idx*(1.0f-cosine*cosine)); } else { outward_normal = rec.normal; ni_over_nt = 1.0f / ref_idx; cosine = -dot(r_in.direction(), rec.normal) / r_in.direction().length(); } if (refract(r_in.direction(), outward_normal, ni_over_nt, refracted)) reflect_prob = schlick(cosine, ref_idx); else reflect_prob = 1.0f; if (hiprand_uniform(local_rand_state) < reflect_prob) scattered = ray(rec.p, reflected); else scattered = ray(rec.p, refracted); return true; }
446fdf576663e51aced3180f373587a716f539b2.cu
#include <Material.h> #define RANDVEC3 vec3(curand_uniform(local_rand_state),curand_uniform(local_rand_state),curand_uniform(local_rand_state)) __device__ vec3 random_in_unit_sphere(curandState *local_rand_state) { vec3 p; do { p = 2.0f*RANDVEC3 - vec3(1,1,1); } while (p.squared_length() >= 1.0f); return p; } __device__ float schlick(float cosine, float ref_idx) { float r0 = (1.0f-ref_idx) / (1.0f+ref_idx); r0 = r0*r0; return r0 + (1.0f-r0)*pow((1.0f - cosine),5.0f); } __device__ vec3 reflect(const vec3& v,const vec3& n){ return v - 2.0f*dot(v,n)*n; } __device__ bool refract(const vec3& v, const vec3& n, float ni_over_nt, vec3& refracted) { vec3 uv = unit_vector(v); float dt = dot(uv, n); float discriminant = 1.0f - ni_over_nt*ni_over_nt*(1-dt*dt); if (discriminant > 0) { refracted = ni_over_nt*(uv - n*dt) - n*sqrt(discriminant); return true; } else return false; } __device__ lambertian::lambertian(const vec3& a) : albedo(a) {} __device__ bool lambertian::scatter(const ray&r_in,const hit_record&rec,vec3& attenuation,ray& scattered,curandState *local_rand_state) const{ vec3 target = rec.p + rec.normal + random_in_unit_sphere(local_rand_state); scattered = ray(rec.p, target-rec.p); attenuation = albedo; return true; } __device__ metal::metal(const vec3& a,float f) : albedo(a), fuzz(f) {} __device__ bool metal::scatter(const ray&r_in,const hit_record&rec,vec3& attenuation,ray& scattered,curandState *local_rand_state) const{ vec3 reflected = reflect(unit_vector(r_in.direction()),rec.normal); scattered = ray(rec.p, reflected + fuzz*random_in_unit_sphere(local_rand_state)); attenuation = albedo; return (dot(scattered.direction(),rec.normal) > 0.0f); // We dont want rays coming from inside of the sphere by any chance. } __device__ dielectric::dielectric(float ri) : ref_idx(ri) {} __device__ bool dielectric::scatter(const ray& r_in, const hit_record& rec, vec3& attenuation, ray& scattered, curandState * local_rand_state) const { vec3 outward_normal; vec3 reflected = reflect(r_in.direction(), rec.normal); float ni_over_nt; attenuation = vec3(1.0f, 1.0f, 1.0f); vec3 refracted; float reflect_prob; float cosine; if (dot(r_in.direction(), rec.normal) > 0.0f) { outward_normal = -rec.normal; ni_over_nt = ref_idx; cosine = dot(r_in.direction(), rec.normal) / r_in.direction().length(); cosine = sqrt(1.0f - ref_idx*ref_idx*(1.0f-cosine*cosine)); } else { outward_normal = rec.normal; ni_over_nt = 1.0f / ref_idx; cosine = -dot(r_in.direction(), rec.normal) / r_in.direction().length(); } if (refract(r_in.direction(), outward_normal, ni_over_nt, refracted)) reflect_prob = schlick(cosine, ref_idx); else reflect_prob = 1.0f; if (curand_uniform(local_rand_state) < reflect_prob) scattered = ray(rec.p, reflected); else scattered = ray(rec.p, refracted); return true; }
2380c4b624edf5b295cf657ee9ce93360b6512dc.hip
// !!! This is a file automatically generated by hipify!!! #define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <needle.h> #include <hip/hip_runtime.h> #include <sys/time.h> // includes, kernels #include <needle_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int max_rows, max_cols, penalty; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *matrix_cuda_out, *referrence_cuda; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 4) { max_rows = atoi(argv[1]); max_cols = atoi(argv[2]); penalty = atoi(argv[3]); } else{ printf("Wrong Usage"); exit(1); } max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; size = max_cols * max_rows; hipMalloc((void**)& referrence_cuda, sizeof(int)*size); hipMalloc((void**)& matrix_cuda, sizeof(int)*size); hipMalloc((void**)& matrix_cuda_out, sizeof(int)*size); hipMemcpy(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice); hipMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice); dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernelGGL(( needle_cuda_shared_1), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda, matrix_cuda_out ,max_cols, penalty, i, block_width); } //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; hipLaunchKernelGGL(( needle_cuda_shared_2), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence_cuda, matrix_cuda, matrix_cuda_out ,max_cols, penalty, i, block_width); } hipMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost); #ifdef TRACE printf("print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) printf("%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } traceback = maximum(nw, w, n); printf("%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } printf("\n"); #endif hipFree(referrence_cuda); hipFree(matrix_cuda); hipFree(matrix_cuda_out); printf("\nTEST PASSED\n"); }
2380c4b624edf5b295cf657ee9ce93360b6512dc.cu
#define LIMIT -999 #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <needle.h> #include <cuda.h> #include <sys/time.h> // includes, kernels #include <needle_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest( int argc, char** argv); int blosum62[24][24] = { { 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4}, {-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4}, {-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4}, {-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4}, { 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4}, {-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4}, {-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4}, {-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4}, {-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4}, {-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4}, {-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4}, {-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4}, {-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4}, {-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4}, { 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4}, { 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4}, {-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4}, {-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4}, { 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4}, {-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4}, {-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4}, { 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4}, {-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1} }; double gettime() { struct timeval t; gettimeofday(&t,NULL); return t.tv_sec+t.tv_usec*1e-6; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { runTest( argc, argv); return EXIT_SUCCESS; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest( int argc, char** argv) { int max_rows, max_cols, penalty; int *input_itemsets, *output_itemsets, *referrence; int *matrix_cuda, *matrix_cuda_out, *referrence_cuda; int size; // the lengths of the two sequences should be able to divided by 16. // And at current stage max_rows needs to equal max_cols if (argc == 4) { max_rows = atoi(argv[1]); max_cols = atoi(argv[2]); penalty = atoi(argv[3]); } else{ printf("Wrong Usage"); exit(1); } max_rows = max_rows + 1; max_cols = max_cols + 1; referrence = (int *)malloc( max_rows * max_cols * sizeof(int) ); input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) ); if (!input_itemsets) fprintf(stderr, "error: can not allocate memory"); srand ( 7 ); for (int i = 0 ; i < max_cols; i++){ for (int j = 0 ; j < max_rows; j++){ input_itemsets[i*max_cols+j] = 0; } } for( int i=1; i< max_rows ; i++){ //please define your own sequence. input_itemsets[i*max_cols] = rand() % 10 + 1; } for( int j=1; j< max_cols ; j++){ //please define your own sequence. input_itemsets[j] = rand() % 10 + 1; } for (int i = 1 ; i < max_cols; i++){ for (int j = 1 ; j < max_rows; j++){ referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]]; } } for( int i = 1; i< max_rows ; i++) input_itemsets[i*max_cols] = -i * penalty; for( int j = 1; j< max_cols ; j++) input_itemsets[j] = -j * penalty; size = max_cols * max_rows; cudaMalloc((void**)& referrence_cuda, sizeof(int)*size); cudaMalloc((void**)& matrix_cuda, sizeof(int)*size); cudaMalloc((void**)& matrix_cuda_out, sizeof(int)*size); cudaMemcpy(referrence_cuda, referrence, sizeof(int) * size, cudaMemcpyHostToDevice); cudaMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, cudaMemcpyHostToDevice); dim3 dimGrid; dim3 dimBlock(BLOCK_SIZE, 1); int block_width = ( max_cols - 1 )/BLOCK_SIZE; //process top-left matrix for( int i = 1 ; i <= block_width ; i++){ dimGrid.x = i; dimGrid.y = 1; needle_cuda_shared_1<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, matrix_cuda_out ,max_cols, penalty, i, block_width); } //process bottom-right matrix for( int i = block_width - 1 ; i >= 1 ; i--){ dimGrid.x = i; dimGrid.y = 1; needle_cuda_shared_2<<<dimGrid, dimBlock>>>(referrence_cuda, matrix_cuda, matrix_cuda_out ,max_cols, penalty, i, block_width); } cudaMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, cudaMemcpyDeviceToHost); #ifdef TRACE printf("print traceback value GPU:\n"); for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){ int nw, n, w, traceback; if ( i == max_rows - 2 && j == max_rows - 2 ) printf("%d ", output_itemsets[ i * max_cols + j]); //print the first element if ( i == 0 && j == 0 ) break; if ( i > 0 && j > 0 ){ nw = output_itemsets[(i - 1) * max_cols + j - 1]; w = output_itemsets[ i * max_cols + j - 1 ]; n = output_itemsets[(i - 1) * max_cols + j]; } else if ( i == 0 ){ nw = n = LIMIT; w = output_itemsets[ i * max_cols + j - 1 ]; } else if ( j == 0 ){ nw = w = LIMIT; n = output_itemsets[(i - 1) * max_cols + j]; } else{ } traceback = maximum(nw, w, n); printf("%d ", traceback); if(traceback == nw ) {i--; j--; continue;} else if(traceback == w ) {j--; continue;} else if(traceback == n ) {i--; continue;} else ; } printf("\n"); #endif cudaFree(referrence_cuda); cudaFree(matrix_cuda); cudaFree(matrix_cuda_out); printf("\nTEST PASSED\n"); }
c77e797c17360f179d541ac7b65215af1c9a9cf9.hip
// !!! This is a file automatically generated by hipify!!! #include <gtest/gtest.h> #include <utility> #include <algorithm> #include <libsgm.h> #include "winner_takes_all.hpp" #include "generator.hpp" #include "test_utility.hpp" #include "debug.hpp" namespace { static constexpr size_t NUM_PATHS = 8; thrust::host_vector<sgm::output_type> winner_takes_all_left( const thrust::host_vector<sgm::cost_type>& src, size_t width, size_t height, size_t pitch, size_t disparity, float uniqueness, bool subpixel) { thrust::host_vector<sgm::output_type> result(pitch * height); for(size_t i = 0; i < height; ++i){ for(size_t j = 0; j < width; ++j){ std::vector<std::pair<int, int>> v; for(size_t k = 0; k < disparity; ++k){ int cost_sum = 0; for(size_t p = 0; p < NUM_PATHS; ++p){ cost_sum += static_cast<int>(src[ p * disparity * width * height + i * disparity * width + j * disparity + k]); } v.emplace_back(cost_sum, static_cast<int>(k)); } const auto ite = std::min_element(v.begin(), v.end()); assert(ite != v.end()); const auto best = *ite; const int best_cost = best.first; sgm::output_type best_disp = best.second; sgm::output_type dst = best_disp; if (subpixel) { dst <<= sgm::StereoSGM::SUBPIXEL_SHIFT; if (0 < best_disp && best_disp < static_cast<int>(disparity) - 1) { const int left = v[best_disp - 1].first; const int right = v[best_disp + 1].first; const int numer = left - right; const int denom = left - 2 * best_cost + right; dst += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom); } } for (const auto& p : v) { const int cost = p.first; const int disp = p.second; if (cost * uniqueness < best_cost && abs(disp - best_disp) > 1) { dst = 0; break; } } result[i * pitch + j] = dst; } } return result; } thrust::host_vector<sgm::output_type> winner_takes_all_right( const thrust::host_vector<sgm::cost_type>& src, size_t width, size_t height, size_t pitch, size_t disparity, float uniqueness) { thrust::host_vector<sgm::output_type> result(pitch * height); for(size_t i = 0; i < height; ++i){ for(size_t j = 0; j < width; ++j){ std::vector<std::pair<int, int>> v; for(size_t k = 0; j + k < width && k < disparity; ++k){ int cost_sum = 0; for(size_t p = 0; p < NUM_PATHS; ++p){ cost_sum += static_cast<int>(src[ p * disparity * width * height + i * disparity * width + (j + k) * disparity + k]); } v.emplace_back(cost_sum, static_cast<int>(k)); } const auto ite = std::min_element(v.begin(), v.end()); assert(ite != v.end()); const auto best = *ite; result[i * pitch + j] = best.second; } } return result; } } static void test_random_left(bool subpixel, size_t padding = 0) { static constexpr size_t width = 313, height = 237, disparity = 128; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; const auto input = generate_random_sequence<sgm::cost_type>( width * height * disparity * NUM_PATHS); const auto expect = winner_takes_all_left( input, width, height, pitch, disparity, uniqueness, subpixel); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, subpixel, sgm::PathType::SCAN_8PATH, 0); hipStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_left_output(), wta.get_left_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } static void test_corner1_left(bool subpixel, size_t padding = 0) { static constexpr size_t width = 1, height = 1, disparity = 64; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; static constexpr size_t n = width * height * disparity * NUM_PATHS; static constexpr size_t step = width * height * disparity; thrust::host_vector<sgm::cost_type> input(n); for (auto& v : input) { v = 1; } for (size_t i = 0; i < NUM_PATHS; ++i) { input[i * step] = 64; } const auto expect = winner_takes_all_left( input, width, height, pitch, disparity, uniqueness, subpixel); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, subpixel, sgm::PathType::SCAN_8PATH, 0); hipStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_left_output(), wta.get_left_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } static void test_corner2_left(bool subpixel, size_t padding = 0) { static constexpr size_t width = 1, height = 1, disparity = 64; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; static constexpr size_t n = width * height * disparity * NUM_PATHS; static constexpr size_t step = width * height * disparity; thrust::host_vector<sgm::cost_type> input(n); for (auto& v : input) { v = 64; } for (size_t i = 0; i < NUM_PATHS; ++i) { input[i * step + 16] = 1; } for (size_t i = 0; i < NUM_PATHS; ++i) { input[i * step + 32] = 1; } const auto expect = winner_takes_all_left( input, width, height, pitch, disparity, uniqueness, subpixel); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, subpixel, sgm::PathType::SCAN_8PATH, 0); hipStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_left_output(), wta.get_left_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } TEST(WinnerTakesAllTest, RandomLeftNormal){ test_random_left(false); } TEST(WinnerTakesAllTest, RandomLeftSubpixel){ test_random_left(true); } TEST(WinnerTakesAllTest, RandomLeftNormalWithPitch){ test_random_left(false, 27); } TEST(WinnerTakesAllTest, RandomLeftSubpixelWithPitch){ test_random_left(true, 27); } TEST(WinnerTakesAllTest, Corner1LeftNormal){ test_corner1_left(false); } TEST(WinnerTakesAllTest, Corner1LeftSubpixel){ test_corner1_left(true); } TEST(WinnerTakesAllTest, Corner2LeftNormal){ test_corner2_left(false); } TEST(WinnerTakesAllTest, Corner2LeftSubpixel){ test_corner2_left(true); } static void test_random_right(size_t padding = 0) { static constexpr size_t width = 313, height = 237, disparity = 64; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; const auto input = generate_random_sequence<sgm::cost_type>( width * height * disparity * NUM_PATHS); const auto expect = winner_takes_all_right( input, width, height, pitch, disparity, uniqueness); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, false, sgm::PathType::SCAN_8PATH, 0); hipStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_right_output(), wta.get_right_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } TEST(WinnerTakesAllTest, RandomRight){ test_random_right(); } TEST(WinnerTakesAllTest, RandomRightWithPitch){ test_random_right(27); }
c77e797c17360f179d541ac7b65215af1c9a9cf9.cu
#include <gtest/gtest.h> #include <utility> #include <algorithm> #include <libsgm.h> #include "winner_takes_all.hpp" #include "generator.hpp" #include "test_utility.hpp" #include "debug.hpp" namespace { static constexpr size_t NUM_PATHS = 8; thrust::host_vector<sgm::output_type> winner_takes_all_left( const thrust::host_vector<sgm::cost_type>& src, size_t width, size_t height, size_t pitch, size_t disparity, float uniqueness, bool subpixel) { thrust::host_vector<sgm::output_type> result(pitch * height); for(size_t i = 0; i < height; ++i){ for(size_t j = 0; j < width; ++j){ std::vector<std::pair<int, int>> v; for(size_t k = 0; k < disparity; ++k){ int cost_sum = 0; for(size_t p = 0; p < NUM_PATHS; ++p){ cost_sum += static_cast<int>(src[ p * disparity * width * height + i * disparity * width + j * disparity + k]); } v.emplace_back(cost_sum, static_cast<int>(k)); } const auto ite = std::min_element(v.begin(), v.end()); assert(ite != v.end()); const auto best = *ite; const int best_cost = best.first; sgm::output_type best_disp = best.second; sgm::output_type dst = best_disp; if (subpixel) { dst <<= sgm::StereoSGM::SUBPIXEL_SHIFT; if (0 < best_disp && best_disp < static_cast<int>(disparity) - 1) { const int left = v[best_disp - 1].first; const int right = v[best_disp + 1].first; const int numer = left - right; const int denom = left - 2 * best_cost + right; dst += ((numer << sgm::StereoSGM::SUBPIXEL_SHIFT) + denom) / (2 * denom); } } for (const auto& p : v) { const int cost = p.first; const int disp = p.second; if (cost * uniqueness < best_cost && abs(disp - best_disp) > 1) { dst = 0; break; } } result[i * pitch + j] = dst; } } return result; } thrust::host_vector<sgm::output_type> winner_takes_all_right( const thrust::host_vector<sgm::cost_type>& src, size_t width, size_t height, size_t pitch, size_t disparity, float uniqueness) { thrust::host_vector<sgm::output_type> result(pitch * height); for(size_t i = 0; i < height; ++i){ for(size_t j = 0; j < width; ++j){ std::vector<std::pair<int, int>> v; for(size_t k = 0; j + k < width && k < disparity; ++k){ int cost_sum = 0; for(size_t p = 0; p < NUM_PATHS; ++p){ cost_sum += static_cast<int>(src[ p * disparity * width * height + i * disparity * width + (j + k) * disparity + k]); } v.emplace_back(cost_sum, static_cast<int>(k)); } const auto ite = std::min_element(v.begin(), v.end()); assert(ite != v.end()); const auto best = *ite; result[i * pitch + j] = best.second; } } return result; } } static void test_random_left(bool subpixel, size_t padding = 0) { static constexpr size_t width = 313, height = 237, disparity = 128; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; const auto input = generate_random_sequence<sgm::cost_type>( width * height * disparity * NUM_PATHS); const auto expect = winner_takes_all_left( input, width, height, pitch, disparity, uniqueness, subpixel); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, subpixel, sgm::PathType::SCAN_8PATH, 0); cudaStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_left_output(), wta.get_left_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } static void test_corner1_left(bool subpixel, size_t padding = 0) { static constexpr size_t width = 1, height = 1, disparity = 64; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; static constexpr size_t n = width * height * disparity * NUM_PATHS; static constexpr size_t step = width * height * disparity; thrust::host_vector<sgm::cost_type> input(n); for (auto& v : input) { v = 1; } for (size_t i = 0; i < NUM_PATHS; ++i) { input[i * step] = 64; } const auto expect = winner_takes_all_left( input, width, height, pitch, disparity, uniqueness, subpixel); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, subpixel, sgm::PathType::SCAN_8PATH, 0); cudaStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_left_output(), wta.get_left_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } static void test_corner2_left(bool subpixel, size_t padding = 0) { static constexpr size_t width = 1, height = 1, disparity = 64; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; static constexpr size_t n = width * height * disparity * NUM_PATHS; static constexpr size_t step = width * height * disparity; thrust::host_vector<sgm::cost_type> input(n); for (auto& v : input) { v = 64; } for (size_t i = 0; i < NUM_PATHS; ++i) { input[i * step + 16] = 1; } for (size_t i = 0; i < NUM_PATHS; ++i) { input[i * step + 32] = 1; } const auto expect = winner_takes_all_left( input, width, height, pitch, disparity, uniqueness, subpixel); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, subpixel, sgm::PathType::SCAN_8PATH, 0); cudaStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_left_output(), wta.get_left_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } TEST(WinnerTakesAllTest, RandomLeftNormal){ test_random_left(false); } TEST(WinnerTakesAllTest, RandomLeftSubpixel){ test_random_left(true); } TEST(WinnerTakesAllTest, RandomLeftNormalWithPitch){ test_random_left(false, 27); } TEST(WinnerTakesAllTest, RandomLeftSubpixelWithPitch){ test_random_left(true, 27); } TEST(WinnerTakesAllTest, Corner1LeftNormal){ test_corner1_left(false); } TEST(WinnerTakesAllTest, Corner1LeftSubpixel){ test_corner1_left(true); } TEST(WinnerTakesAllTest, Corner2LeftNormal){ test_corner2_left(false); } TEST(WinnerTakesAllTest, Corner2LeftSubpixel){ test_corner2_left(true); } static void test_random_right(size_t padding = 0) { static constexpr size_t width = 313, height = 237, disparity = 64; static constexpr float uniqueness = 0.95f; const size_t pitch = width + padding; const auto input = generate_random_sequence<sgm::cost_type>( width * height * disparity * NUM_PATHS); const auto expect = winner_takes_all_right( input, width, height, pitch, disparity, uniqueness); sgm::WinnerTakesAll<disparity> wta; const auto d_input = to_device_vector(input); wta.enqueue(d_input.data().get(), width, height, static_cast<int>(pitch), uniqueness, false, sgm::PathType::SCAN_8PATH, 0); cudaStreamSynchronize(0); const thrust::device_vector<sgm::output_type> d_actual( wta.get_right_output(), wta.get_right_output() + (pitch * height)); const auto actual = to_host_vector(d_actual); EXPECT_EQ(actual, expect); debug_compare(actual.data(), expect.data(), pitch, height, 1); } TEST(WinnerTakesAllTest, RandomRight){ test_random_right(); } TEST(WinnerTakesAllTest, RandomRightWithPitch){ test_random_right(27); }
b89cf272da3a360717c34bc06809260c2a139a36.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x1; int xdim0_advec_mom_kernel_x1_h = -1; __constant__ int ydim0_advec_mom_kernel_x1; int ydim0_advec_mom_kernel_x1_h = -1; __constant__ int xdim1_advec_mom_kernel_x1; int xdim1_advec_mom_kernel_x1_h = -1; __constant__ int ydim1_advec_mom_kernel_x1; int ydim1_advec_mom_kernel_x1_h = -1; __constant__ int xdim2_advec_mom_kernel_x1; int xdim2_advec_mom_kernel_x1_h = -1; __constant__ int ydim2_advec_mom_kernel_x1; int ydim2_advec_mom_kernel_x1_h = -1; __constant__ int xdim3_advec_mom_kernel_x1; int xdim3_advec_mom_kernel_x1_h = -1; __constant__ int ydim3_advec_mom_kernel_x1; int ydim3_advec_mom_kernel_x1_h = -1; __constant__ int xdim4_advec_mom_kernel_x1; int xdim4_advec_mom_kernel_x1_h = -1; __constant__ int ydim4_advec_mom_kernel_x1; int ydim4_advec_mom_kernel_x1_h = -1; __constant__ int xdim5_advec_mom_kernel_x1; int xdim5_advec_mom_kernel_x1_h = -1; __constant__ int ydim5_advec_mom_kernel_x1; int ydim5_advec_mom_kernel_x1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_x1 * (y) + \ xdim0_advec_mom_kernel_x1 * ydim0_advec_mom_kernel_x1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_x1 * (y) + \ xdim1_advec_mom_kernel_x1 * ydim1_advec_mom_kernel_x1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_x1 * (y) + \ xdim2_advec_mom_kernel_x1 * ydim2_advec_mom_kernel_x1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_x1 * (y) + \ xdim3_advec_mom_kernel_x1 * ydim3_advec_mom_kernel_x1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_mom_kernel_x1 * (y) + \ xdim4_advec_mom_kernel_x1 * ydim4_advec_mom_kernel_x1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_mom_kernel_x1 * (y) + \ xdim5_advec_mom_kernel_x1 * ydim5_advec_mom_kernel_x1 * (z)) // user function __device__ inline void advec_mom_kernel_x1_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] + vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)] + vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]; pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] + vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_mom_kernel_x1(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim0_advec_mom_kernel_x1 * ydim0_advec_mom_kernel_x1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim1_advec_mom_kernel_x1 * ydim1_advec_mom_kernel_x1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim2_advec_mom_kernel_x1 * ydim2_advec_mom_kernel_x1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim3_advec_mom_kernel_x1 * ydim3_advec_mom_kernel_x1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim4_advec_mom_kernel_x1 * ydim4_advec_mom_kernel_x1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim5_advec_mom_kernel_x1 * ydim5_advec_mom_kernel_x1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x1_gpu(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function void ops_par_loop_advec_mom_kernel_x1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 19)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(19, "advec_mom_kernel_x1"); OPS_kernels[19].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_x1_h || ydim0 != ydim0_advec_mom_kernel_x1_h || xdim1 != xdim1_advec_mom_kernel_x1_h || ydim1 != ydim1_advec_mom_kernel_x1_h || xdim2 != xdim2_advec_mom_kernel_x1_h || ydim2 != ydim2_advec_mom_kernel_x1_h || xdim3 != xdim3_advec_mom_kernel_x1_h || ydim3 != ydim3_advec_mom_kernel_x1_h || xdim4 != xdim4_advec_mom_kernel_x1_h || ydim4 != ydim4_advec_mom_kernel_x1_h || xdim5 != xdim5_advec_mom_kernel_x1_h || ydim5 != ydim5_advec_mom_kernel_x1_h) { hipMemcpyToSymbol(xdim0_advec_mom_kernel_x1, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_x1_h = xdim0; hipMemcpyToSymbol(ydim0_advec_mom_kernel_x1, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_x1_h = ydim0; hipMemcpyToSymbol(xdim1_advec_mom_kernel_x1, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_x1_h = xdim1; hipMemcpyToSymbol(ydim1_advec_mom_kernel_x1, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_x1_h = ydim1; hipMemcpyToSymbol(xdim2_advec_mom_kernel_x1, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_x1_h = xdim2; hipMemcpyToSymbol(ydim2_advec_mom_kernel_x1, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_x1_h = ydim2; hipMemcpyToSymbol(xdim3_advec_mom_kernel_x1, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_x1_h = xdim3; hipMemcpyToSymbol(ydim3_advec_mom_kernel_x1, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_x1_h = ydim3; hipMemcpyToSymbol(xdim4_advec_mom_kernel_x1, &xdim4, sizeof(int)); xdim4_advec_mom_kernel_x1_h = xdim4; hipMemcpyToSymbol(ydim4_advec_mom_kernel_x1, &ydim4, sizeof(int)); ydim4_advec_mom_kernel_x1_h = ydim4; hipMemcpyToSymbol(xdim5_advec_mom_kernel_x1, &xdim5, sizeof(int)); xdim5_advec_mom_kernel_x1_h = xdim5; hipMemcpyToSymbol(ydim5_advec_mom_kernel_x1, &ydim5, sizeof(int)); ydim5_advec_mom_kernel_x1_h = ydim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[19].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_mom_kernel_x1), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[19].time += t1 - t2; } ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[19].mpi_time += t2 - t1; OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
b89cf272da3a360717c34bc06809260c2a139a36.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_x1; int xdim0_advec_mom_kernel_x1_h = -1; __constant__ int ydim0_advec_mom_kernel_x1; int ydim0_advec_mom_kernel_x1_h = -1; __constant__ int xdim1_advec_mom_kernel_x1; int xdim1_advec_mom_kernel_x1_h = -1; __constant__ int ydim1_advec_mom_kernel_x1; int ydim1_advec_mom_kernel_x1_h = -1; __constant__ int xdim2_advec_mom_kernel_x1; int xdim2_advec_mom_kernel_x1_h = -1; __constant__ int ydim2_advec_mom_kernel_x1; int ydim2_advec_mom_kernel_x1_h = -1; __constant__ int xdim3_advec_mom_kernel_x1; int xdim3_advec_mom_kernel_x1_h = -1; __constant__ int ydim3_advec_mom_kernel_x1; int ydim3_advec_mom_kernel_x1_h = -1; __constant__ int xdim4_advec_mom_kernel_x1; int xdim4_advec_mom_kernel_x1_h = -1; __constant__ int ydim4_advec_mom_kernel_x1; int ydim4_advec_mom_kernel_x1_h = -1; __constant__ int xdim5_advec_mom_kernel_x1; int xdim5_advec_mom_kernel_x1_h = -1; __constant__ int ydim5_advec_mom_kernel_x1; int ydim5_advec_mom_kernel_x1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_x1 * (y) + \ xdim0_advec_mom_kernel_x1 * ydim0_advec_mom_kernel_x1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_x1 * (y) + \ xdim1_advec_mom_kernel_x1 * ydim1_advec_mom_kernel_x1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_x1 * (y) + \ xdim2_advec_mom_kernel_x1 * ydim2_advec_mom_kernel_x1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_x1 * (y) + \ xdim3_advec_mom_kernel_x1 * ydim3_advec_mom_kernel_x1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_mom_kernel_x1 * (y) + \ xdim4_advec_mom_kernel_x1 * ydim4_advec_mom_kernel_x1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_advec_mom_kernel_x1 * (y) + \ xdim5_advec_mom_kernel_x1 * ydim5_advec_mom_kernel_x1 * (z)) // user function __device__ inline void advec_mom_kernel_x1_gpu(double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_x, const double *vol_flux_y, const double *vol_flux_z) { post_vol[OPS_ACC1(0, 0, 0)] = volume[OPS_ACC2(0, 0, 0)] + vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)] + vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]; pre_vol[OPS_ACC0(0, 0, 0)] = post_vol[OPS_ACC1(0, 0, 0)] + vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_advec_mom_kernel_x1(double *__restrict arg0, double *__restrict arg1, const double *__restrict arg2, const double *__restrict arg3, const double *__restrict arg4, const double *__restrict arg5, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim0_advec_mom_kernel_x1 * ydim0_advec_mom_kernel_x1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim1_advec_mom_kernel_x1 * ydim1_advec_mom_kernel_x1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim2_advec_mom_kernel_x1 * ydim2_advec_mom_kernel_x1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim3_advec_mom_kernel_x1 * ydim3_advec_mom_kernel_x1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim4_advec_mom_kernel_x1 * ydim4_advec_mom_kernel_x1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_mom_kernel_x1 + idx_z * 1 * 1 * xdim5_advec_mom_kernel_x1 * ydim5_advec_mom_kernel_x1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_x1_gpu(arg0, arg1, arg2, arg3, arg4, arg5); } } // host stub function void ops_par_loop_advec_mom_kernel_x1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5) { // Timing double t1, t2, c1, c2; ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 6, range, 19)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(19, "advec_mom_kernel_x1"); OPS_kernels[19].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_x1_h || ydim0 != ydim0_advec_mom_kernel_x1_h || xdim1 != xdim1_advec_mom_kernel_x1_h || ydim1 != ydim1_advec_mom_kernel_x1_h || xdim2 != xdim2_advec_mom_kernel_x1_h || ydim2 != ydim2_advec_mom_kernel_x1_h || xdim3 != xdim3_advec_mom_kernel_x1_h || ydim3 != ydim3_advec_mom_kernel_x1_h || xdim4 != xdim4_advec_mom_kernel_x1_h || ydim4 != ydim4_advec_mom_kernel_x1_h || xdim5 != xdim5_advec_mom_kernel_x1_h || ydim5 != ydim5_advec_mom_kernel_x1_h) { cudaMemcpyToSymbol(xdim0_advec_mom_kernel_x1, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_x1_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_mom_kernel_x1, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_x1_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_mom_kernel_x1, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_x1_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_mom_kernel_x1, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_x1_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_mom_kernel_x1, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_x1_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_mom_kernel_x1, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_x1_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_mom_kernel_x1, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_x1_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_mom_kernel_x1, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_x1_h = ydim3; cudaMemcpyToSymbol(xdim4_advec_mom_kernel_x1, &xdim4, sizeof(int)); xdim4_advec_mom_kernel_x1_h = xdim4; cudaMemcpyToSymbol(ydim4_advec_mom_kernel_x1, &ydim4, sizeof(int)); ydim4_advec_mom_kernel_x1_h = ydim4; cudaMemcpyToSymbol(xdim5_advec_mom_kernel_x1, &xdim5, sizeof(int)); xdim5_advec_mom_kernel_x1_h = xdim5; cudaMemcpyToSymbol(ydim5_advec_mom_kernel_x1, &ydim5, sizeof(int)); ydim5_advec_mom_kernel_x1_h = ydim5; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[6]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 6); ops_halo_exchanges(args, 6, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[19].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_mom_kernel_x1<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[19].time += t1 - t2; } ops_set_dirtybit_device(args, 6); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[19].mpi_time += t2 - t1; OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
2b5288fcdc8ad5ce4361a3cba83630b933b4f49e.hip
// !!! This is a file automatically generated by hipify!!! #include <basicOps.cuh> #include <clusterNet.h> #include <assert.h> #include <stdio.h> #include <util.cuh> #include <batchAllocator.h> #include <basicOps.cuh> using std::cout; using std::endl; int run_batchAllocator_test(ClusterNet gpus) { Matrix *m1; Matrix *m2; Matrix *m_host; Matrix *m_host2; Matrix *m_host_dist; Matrix *m_host2_dist; //batch allocator test m1 = to_host(arange(10000,784)); m2 = to_host(arange(10000,1)); BatchAllocator b = BatchAllocator(); b.init(m1,m2,0.20,128,256); assert(test_matrix(b.CURRENT_BATCH,128,784)); assert(test_matrix(b.CURRENT_BATCH_Y,128,1)); assert(test_matrix(b.CURRENT_BATCH_CV,256,784)); assert(test_matrix(b.CURRENT_BATCH_CV_Y,256,1)); BatchAllocator b_dist = BatchAllocator(); b_dist.init(m1,m2,0.2,128,256,gpus,Distributed_weights); assert(test_matrix(b_dist.CURRENT_BATCH,128,784)); assert(test_matrix(b_dist.CURRENT_BATCH_Y,128,1)); assert(test_matrix(b_dist.CURRENT_BATCH_CV,256,784)); assert(test_matrix(b_dist.CURRENT_BATCH_CV_Y,256,1)); int value = 0; int value_y = 0; for(int epoch = 0; epoch < 2; epoch++) { value = 0; value_y = 0; for(int batchno = 0; batchno < b.TOTAL_BATCHES; batchno++) { assert(b.CURRENT_BATCH->rows == 128 || b.CURRENT_BATCH->rows == 8000%128); assert(b_dist.CURRENT_BATCH->rows == 128 || b_dist.CURRENT_BATCH->rows == 8000%128); m_host = to_host(b.CURRENT_BATCH); m_host2 = to_host(b.CURRENT_BATCH_Y); b.broadcast_batch_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH); m_host2_dist = to_host(b_dist.CURRENT_BATCH_Y); b_dist.broadcast_batch_to_processes(); for(int i = 0; i < b.CURRENT_BATCH->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,i,i,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,i,i,"Batch test")); value++; } b.allocate_next_batch_async(); b_dist.allocate_next_batch_async(); for(int i = 0; i < b.CURRENT_BATCH->rows; i++) { assert(test_eq(m_host2->data[i],(float)value_y,i,i,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,i,i,"Batch test")); value_y++; } b.replace_current_batch_with_next(); b_dist.replace_current_batch_with_next(); } assert(test_eq(value,6272000,"Batch test train 128")); assert(test_eq(value_y,8000,"Batch test train 128")); for(int batchno = 0; batchno < b.TOTAL_BATCHES_CV; batchno++) { assert(b.CURRENT_BATCH_CV->rows == 256 || b.CURRENT_BATCH_CV->rows == 2000%256); assert(b_dist.CURRENT_BATCH_CV->rows == 256 || b_dist.CURRENT_BATCH_CV->rows == 2000%256); m_host = to_host(b.CURRENT_BATCH_CV); m_host2 = to_host(b.CURRENT_BATCH_CV_Y); b.broadcast_batch_cv_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH_CV); m_host2_dist = to_host(b_dist.CURRENT_BATCH_CV_Y); b_dist.broadcast_batch_cv_to_processes(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,"Batch test")); value++; } b.allocate_next_cv_batch_async(); b_dist.allocate_next_cv_batch_async(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows; i++) { assert(test_eq(m_host2->data[i],(float)value_y,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,"Batch test")); value_y++; } b.replace_current_cv_batch_with_next(); b_dist.replace_current_cv_batch_with_next(); } } assert(test_eq(value,7840000,"Batch test")); assert(test_eq(value_y,10000,"Batch test")); m1 = to_host(arange(70000,784)); m2 = to_host(arange(70000,10)); b = BatchAllocator(); b.init(m1,m2,0.20,128,512); assert(test_matrix(b.CURRENT_BATCH,128,784)); assert(test_matrix(b.CURRENT_BATCH_Y,128,10)); assert(test_matrix(b.CURRENT_BATCH_CV,512,784)); assert(test_matrix(b.CURRENT_BATCH_CV_Y,512,10)); b_dist = BatchAllocator(); b_dist.init(m1,m2,0.2,128,512,gpus,Distributed_weights); assert(test_matrix(b_dist.CURRENT_BATCH,128,784)); assert(test_matrix(b_dist.CURRENT_BATCH_Y,128,10)); assert(test_matrix(b_dist.CURRENT_BATCH_CV,512,784)); assert(test_matrix(b_dist.CURRENT_BATCH_CV_Y,512,10)); for(int epoch = 0; epoch < 2; epoch++) { value = 0; value_y = 0; for(int batchno = 0; batchno < b.TOTAL_BATCHES; batchno++) { assert(b.CURRENT_BATCH->rows == 128 || b.CURRENT_BATCH->rows == 56000%128); assert(b_dist.CURRENT_BATCH->rows == 128 || b_dist.CURRENT_BATCH->rows == 56000%128); m_host = to_host(b.CURRENT_BATCH); m_host2 = to_host(b.CURRENT_BATCH_Y); b.broadcast_batch_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH); m_host2_dist = to_host(b_dist.CURRENT_BATCH_Y); b_dist.broadcast_batch_to_processes(); for(int i = 0; i < b.CURRENT_BATCH->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,"Batch test")); value++; } b.allocate_next_batch_async(); b_dist.allocate_next_batch_async(); for(int i = 0; i < b.CURRENT_BATCH->rows*10; i++) { assert(test_eq(m_host2->data[i],(float)value_y,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,"Batch test")); value_y++; } b.replace_current_batch_with_next(); b_dist.replace_current_batch_with_next(); } assert(test_eq(value,43904000,"Batch test")); assert(test_eq(value_y,560000,"Batch test")); for(int batchno = 0; batchno < b.TOTAL_BATCHES_CV; batchno++) { assert(b.CURRENT_BATCH_CV->rows == 512 || b.CURRENT_BATCH_CV->rows == 14000%512); assert(b_dist.CURRENT_BATCH_CV->rows == 512 || b_dist.CURRENT_BATCH_CV->rows == 14000%512); m_host = to_host(b.CURRENT_BATCH_CV); m_host2 = to_host(b.CURRENT_BATCH_CV_Y); b.broadcast_batch_cv_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH_CV); m_host2_dist = to_host(b_dist.CURRENT_BATCH_CV_Y); b_dist.broadcast_batch_cv_to_processes(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,"Batch test")); value++; } b.allocate_next_cv_batch_async(); b_dist.allocate_next_cv_batch_async(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows*10; i++) { assert(test_eq(m_host2->data[i],(float)value_y,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,"Batch test")); value_y++; } b.replace_current_cv_batch_with_next(); b_dist.replace_current_cv_batch_with_next(); } assert(test_eq(value,54880000,"Batch test")); assert(test_eq(value_y,700000,"Batch test")); } char buff[1024] = {0}; ssize_t len = ::readlink("/proc/self/exe", buff, sizeof(buff)-1); std::string path = std::string(buff); replace(path,"/build/testSuite.out","/tests/"); /* Matrix *X; Matrix *y; if(gpus.MYGPUID == 0) { X = read_sparse_hdf5((path + "crowdflower_X_test.hdf5").c_str()); y = read_sparse_hdf5((path + "crowdflower_y_test.hdf5").c_str()); } MPI_Barrier(MPI_COMM_WORLD); if(gpus.MYGPUID == 1) { X = read_sparse_hdf5((path + "crowdflower_X_test.hdf5").c_str()); y = read_sparse_hdf5((path + "crowdflower_y_test.hdf5").c_str()); } MPI_Barrier(MPI_COMM_WORLD); if(gpus.MYGPUID == 2) { X = read_sparse_hdf5((path + "crowdflower_X_test.hdf5").c_str()); y = read_sparse_hdf5((path + "crowdflower_y_test.hdf5").c_str()); } b = BatchAllocator(); b.init(X,y,0.20,128,256,gpus, Distributed_weights_sparse); assert(test_eq(b.CURRENT_BATCH->rows,128,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->rows,128,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->cols,24,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->rows,256,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->rows,256,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->cols,24,"sparse distributed batch allocator test")); int index = 0; int index_y = 0; int index_rows = 0; int row_ptr_offset = 0; int row_ptr_offset_y = 0; for(int epoch = 0; epoch < 3; epoch++) { index_rows = 0; index = 0; index_y = 0; row_ptr_offset = 0; row_ptr_offset_y = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { Matrix *s1 = to_host(b.CURRENT_BATCH); Matrix *s2 = to_host(b.CURRENT_BATCH_Y); Matrix *B = ones(b.CURRENT_BATCH->cols,20); Matrix *out = zeros(b.CURRENT_BATCH->rows, B->cols); b.broadcast_batch_to_processes(); for(int j = 0; j < b.CURRENT_BATCH->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH->idx_bytes,(int)b.CURRENT_BATCH->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH->rows +1)*sizeof(int),(int)b.CURRENT_BATCH->ptr_bytes,"test sparse batch bytes")); assert(test_eq(y->ptr_rows[index_rows + b.CURRENT_BATCH_Y->rows] - y->ptr_rows[index_rows],b.CURRENT_BATCH_Y->size,"test sparse batch size")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_Y->rows] - y->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_Y->idx_bytes,(int)b.CURRENT_BATCH_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_Y->rows] - y->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_Y->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_Y->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_Y->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH_Y->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH->size; row_ptr_offset_y += b.CURRENT_BATCH_Y->size; gpus.dot_sparse(b.CURRENT_BATCH, B, out); ASSERT(sum(out) > -15000 && sum(out) < 15000, "sparse batching sparse dot output test"); if((i +1) == b.TOTAL_BATCHES) assert(test_eq(b.CURRENT_BATCH->rows,((int)ceil((X->rows*0.8))) % b.BATCH_SIZE,"after all sparse batches test: partial batch size")); b.allocate_next_batch_async(); b.replace_current_batch_with_next(); hipFree(s1->data); hipFree(s1->idx_cols); hipFree(s1->ptr_rows); free(s1); hipFree(B->data); hipFree(out->data); free(out); free(B); } assert(test_eq(index_rows+1,((int)ceil((X->rows*0.8))) +1,"after all sparse batches test: rows idx.")); assert(test_eq(index_y,y->ptr_rows[((int)ceil((y->rows*0.8))) ],"after all sparse batches test: data idx y")); assert(test_eq(index,X->ptr_rows[((int)ceil((y->rows*0.8)))],"after all sparse batches test: data idx X")); for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { Matrix *s1 = to_host(b.CURRENT_BATCH_CV); Matrix *s2 = to_host(b.CURRENT_BATCH_CV_Y); Matrix *B = ones(b.CURRENT_BATCH_CV->cols,20); Matrix *out = zeros(b.CURRENT_BATCH_CV->rows, B->cols); b.broadcast_batch_cv_to_processes(); for(int j = 0; j < b.CURRENT_BATCH_CV->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_CV_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH_CV->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV->idx_bytes,(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV->ptr_bytes,"test sparse batch bytes")); assert(test_eq(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows],b.CURRENT_BATCH_CV_Y->size,"test sparse batch size")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV_Y->idx_bytes,(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV_Y->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH_CV_Y->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH_CV->size; row_ptr_offset_y += b.CURRENT_BATCH_CV_Y->size; gpus.dot_sparse(b.CURRENT_BATCH_CV, B, out); ASSERT(sum(out) > -25000 && sum(out) < 25000, "sparse batching sparse dot output test"); if((i +1) == b.TOTAL_BATCHES_CV) assert(test_eq(b.CURRENT_BATCH_CV->rows,(X->rows - (int)ceil((X->rows*0.8))) % b.BATCH_SIZE_CV,"after all sparse batches test: partial batch size")); b.allocate_next_cv_batch_async(); b.replace_current_cv_batch_with_next(); hipFree(s1->data); hipFree(s1->idx_cols); hipFree(s1->ptr_rows); free(s1); hipFree(B->data); hipFree(out->data); free(out); free(B); } assert(test_eq(index_rows+1,X->rows +1,"after all sparse batches test: rows idx.")); assert(test_eq(index_y,y->ptr_rows[y->rows ],"after all sparse batches test: data idx y")); assert(test_eq(index,X->ptr_rows[y->rows],"after all sparse batches test: data idx X")); } if(gpus.MYGPUID != 0) { X = empty_sparse(1,1,1); y = empty_sparse(1,1,1); } b = BatchAllocator(); b.init(X,y,0.20,33,77,gpus, Distributed_weights_sparse); assert(test_eq(b.CURRENT_BATCH->rows,33,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->rows,33,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->cols,24,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->rows,77,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->rows,77,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->cols,24,"sparse distributed batch allocator test")); for(int epoch = 0; epoch < 3; epoch++) { index_rows = 0; index = 0; index_y = 0; row_ptr_offset = 0; row_ptr_offset_y = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { Matrix *B = ones(b.CURRENT_BATCH->rows,20); Matrix *out = zeros(b.CURRENT_BATCH->cols, B->cols); b.broadcast_batch_to_processes(); if(gpus.MYGPUID == 0) { Matrix *s1 = to_host(b.CURRENT_BATCH); Matrix *s2 = to_host(b.CURRENT_BATCH_Y); Matrix *m3 = gpus.sparse_to_dense(b.CURRENT_BATCH); Matrix *s3 = to_host((gpus.dense_to_sparse(m3))); //cout << sum(m3) << " vs " << sum(s3) << endl; for(int i = 0; i < s3->size; i++) { cout << s1->idx_cols[i] << " vs " << s3->idx_cols[i] << endl; //assert(test_eq(s1->data[i],s3->data[i],"dense to sparse and back equality.")); //assert(test_eq(s1->idx_cols[i],s3->idx_cols[i],"dense to sparse and back equality.")); } cout << "size: " << s1->size << endl; for(int i = 0; i < s3->rows+1; i++) cout << s1->ptr_rows[i] << " vs " << s3->ptr_rows[i] << endl; //assert(test_eq(s1->ptr_rows[i],s3->ptr_rows[i],"dense to sparse and back equality.")); for(int j = 0; j < b.CURRENT_BATCH->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH->rows +1)*sizeof(int),(int)b.CURRENT_BATCH->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH->size; row_ptr_offset_y += b.CURRENT_BATCH_Y->size; hipFree(s1->data); hipFree(s1->idx_cols); hipFree(s1->ptr_rows); free(s1); hipFree(s2->data); hipFree(s2->idx_cols); hipFree(s2->ptr_rows); free(s2); } cout << "pre Tdot" << endl; gpus.Tdot_sparse(b.CURRENT_BATCH, B, out); cout << "post Tdot" << endl; cout << sum(out) << endl; //ASSERT(sum(out) > -3000 && sum(out) < 3000, "sparse batching sparse dot output test"); b.allocate_next_batch_async(); b.replace_current_batch_with_next(); hipFree(B->data); hipFree(out->data); free(out); free(B); } for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { Matrix *B = ones(b.CURRENT_BATCH_CV->cols,20); Matrix *out = zeros(b.CURRENT_BATCH_CV->rows, B->cols); b.broadcast_batch_cv_to_processes(); if(gpus.MYGPUID == 0) { Matrix *s1 = to_host(b.CURRENT_BATCH_CV); Matrix *s2 = to_host(b.CURRENT_BATCH_CV_Y); for(int j = 0; j < b.CURRENT_BATCH_CV->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_CV_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH_CV->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV->idx_bytes,(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV->ptr_bytes,"test sparse batch bytes")); assert(test_eq(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows],b.CURRENT_BATCH_CV_Y->size,"test sparse batch size")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV_Y->idx_bytes,(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV_Y->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH_CV_Y->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH_CV->size; row_ptr_offset_y += b.CURRENT_BATCH_CV_Y->size; if((i +1) == b.TOTAL_BATCHES_CV) assert(test_eq(b.CURRENT_BATCH_CV->rows,(X->rows - (int)ceil((X->rows*0.8))) % b.BATCH_SIZE_CV,"after all sparse batches test: partial batch size")); hipFree(s1->data); hipFree(s1->idx_cols); hipFree(s1->ptr_rows); free(s1); } gpus.dot_sparse(b.CURRENT_BATCH_CV, B, out); ASSERT(sum(out) > -8000 && sum(out) < 8000, "sparse batching sparse dot output test"); b.allocate_next_cv_batch_async(); b.replace_current_cv_batch_with_next(); hipFree(B->data); hipFree(out->data); free(out); free(B); } } */ return 0; }
2b5288fcdc8ad5ce4361a3cba83630b933b4f49e.cu
#include <basicOps.cuh> #include <clusterNet.h> #include <assert.h> #include <stdio.h> #include <util.cuh> #include <batchAllocator.h> #include <basicOps.cuh> using std::cout; using std::endl; int run_batchAllocator_test(ClusterNet gpus) { Matrix *m1; Matrix *m2; Matrix *m_host; Matrix *m_host2; Matrix *m_host_dist; Matrix *m_host2_dist; //batch allocator test m1 = to_host(arange(10000,784)); m2 = to_host(arange(10000,1)); BatchAllocator b = BatchAllocator(); b.init(m1,m2,0.20,128,256); assert(test_matrix(b.CURRENT_BATCH,128,784)); assert(test_matrix(b.CURRENT_BATCH_Y,128,1)); assert(test_matrix(b.CURRENT_BATCH_CV,256,784)); assert(test_matrix(b.CURRENT_BATCH_CV_Y,256,1)); BatchAllocator b_dist = BatchAllocator(); b_dist.init(m1,m2,0.2,128,256,gpus,Distributed_weights); assert(test_matrix(b_dist.CURRENT_BATCH,128,784)); assert(test_matrix(b_dist.CURRENT_BATCH_Y,128,1)); assert(test_matrix(b_dist.CURRENT_BATCH_CV,256,784)); assert(test_matrix(b_dist.CURRENT_BATCH_CV_Y,256,1)); int value = 0; int value_y = 0; for(int epoch = 0; epoch < 2; epoch++) { value = 0; value_y = 0; for(int batchno = 0; batchno < b.TOTAL_BATCHES; batchno++) { assert(b.CURRENT_BATCH->rows == 128 || b.CURRENT_BATCH->rows == 8000%128); assert(b_dist.CURRENT_BATCH->rows == 128 || b_dist.CURRENT_BATCH->rows == 8000%128); m_host = to_host(b.CURRENT_BATCH); m_host2 = to_host(b.CURRENT_BATCH_Y); b.broadcast_batch_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH); m_host2_dist = to_host(b_dist.CURRENT_BATCH_Y); b_dist.broadcast_batch_to_processes(); for(int i = 0; i < b.CURRENT_BATCH->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,i,i,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,i,i,"Batch test")); value++; } b.allocate_next_batch_async(); b_dist.allocate_next_batch_async(); for(int i = 0; i < b.CURRENT_BATCH->rows; i++) { assert(test_eq(m_host2->data[i],(float)value_y,i,i,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,i,i,"Batch test")); value_y++; } b.replace_current_batch_with_next(); b_dist.replace_current_batch_with_next(); } assert(test_eq(value,6272000,"Batch test train 128")); assert(test_eq(value_y,8000,"Batch test train 128")); for(int batchno = 0; batchno < b.TOTAL_BATCHES_CV; batchno++) { assert(b.CURRENT_BATCH_CV->rows == 256 || b.CURRENT_BATCH_CV->rows == 2000%256); assert(b_dist.CURRENT_BATCH_CV->rows == 256 || b_dist.CURRENT_BATCH_CV->rows == 2000%256); m_host = to_host(b.CURRENT_BATCH_CV); m_host2 = to_host(b.CURRENT_BATCH_CV_Y); b.broadcast_batch_cv_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH_CV); m_host2_dist = to_host(b_dist.CURRENT_BATCH_CV_Y); b_dist.broadcast_batch_cv_to_processes(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,"Batch test")); value++; } b.allocate_next_cv_batch_async(); b_dist.allocate_next_cv_batch_async(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows; i++) { assert(test_eq(m_host2->data[i],(float)value_y,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,"Batch test")); value_y++; } b.replace_current_cv_batch_with_next(); b_dist.replace_current_cv_batch_with_next(); } } assert(test_eq(value,7840000,"Batch test")); assert(test_eq(value_y,10000,"Batch test")); m1 = to_host(arange(70000,784)); m2 = to_host(arange(70000,10)); b = BatchAllocator(); b.init(m1,m2,0.20,128,512); assert(test_matrix(b.CURRENT_BATCH,128,784)); assert(test_matrix(b.CURRENT_BATCH_Y,128,10)); assert(test_matrix(b.CURRENT_BATCH_CV,512,784)); assert(test_matrix(b.CURRENT_BATCH_CV_Y,512,10)); b_dist = BatchAllocator(); b_dist.init(m1,m2,0.2,128,512,gpus,Distributed_weights); assert(test_matrix(b_dist.CURRENT_BATCH,128,784)); assert(test_matrix(b_dist.CURRENT_BATCH_Y,128,10)); assert(test_matrix(b_dist.CURRENT_BATCH_CV,512,784)); assert(test_matrix(b_dist.CURRENT_BATCH_CV_Y,512,10)); for(int epoch = 0; epoch < 2; epoch++) { value = 0; value_y = 0; for(int batchno = 0; batchno < b.TOTAL_BATCHES; batchno++) { assert(b.CURRENT_BATCH->rows == 128 || b.CURRENT_BATCH->rows == 56000%128); assert(b_dist.CURRENT_BATCH->rows == 128 || b_dist.CURRENT_BATCH->rows == 56000%128); m_host = to_host(b.CURRENT_BATCH); m_host2 = to_host(b.CURRENT_BATCH_Y); b.broadcast_batch_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH); m_host2_dist = to_host(b_dist.CURRENT_BATCH_Y); b_dist.broadcast_batch_to_processes(); for(int i = 0; i < b.CURRENT_BATCH->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,"Batch test")); value++; } b.allocate_next_batch_async(); b_dist.allocate_next_batch_async(); for(int i = 0; i < b.CURRENT_BATCH->rows*10; i++) { assert(test_eq(m_host2->data[i],(float)value_y,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,"Batch test")); value_y++; } b.replace_current_batch_with_next(); b_dist.replace_current_batch_with_next(); } assert(test_eq(value,43904000,"Batch test")); assert(test_eq(value_y,560000,"Batch test")); for(int batchno = 0; batchno < b.TOTAL_BATCHES_CV; batchno++) { assert(b.CURRENT_BATCH_CV->rows == 512 || b.CURRENT_BATCH_CV->rows == 14000%512); assert(b_dist.CURRENT_BATCH_CV->rows == 512 || b_dist.CURRENT_BATCH_CV->rows == 14000%512); m_host = to_host(b.CURRENT_BATCH_CV); m_host2 = to_host(b.CURRENT_BATCH_CV_Y); b.broadcast_batch_cv_to_processes(); m_host_dist = to_host(b_dist.CURRENT_BATCH_CV); m_host2_dist = to_host(b_dist.CURRENT_BATCH_CV_Y); b_dist.broadcast_batch_cv_to_processes(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows*784; i++) { assert(test_eq(m_host->data[i],(float)value,"Batch test")); assert(test_eq(m_host_dist->data[i],(float)value,"Batch test")); value++; } b.allocate_next_cv_batch_async(); b_dist.allocate_next_cv_batch_async(); for(int i = 0; i < b.CURRENT_BATCH_CV->rows*10; i++) { assert(test_eq(m_host2->data[i],(float)value_y,"Batch test")); assert(test_eq(m_host2_dist->data[i],(float)value_y,"Batch test")); value_y++; } b.replace_current_cv_batch_with_next(); b_dist.replace_current_cv_batch_with_next(); } assert(test_eq(value,54880000,"Batch test")); assert(test_eq(value_y,700000,"Batch test")); } char buff[1024] = {0}; ssize_t len = ::readlink("/proc/self/exe", buff, sizeof(buff)-1); std::string path = std::string(buff); replace(path,"/build/testSuite.out","/tests/"); /* Matrix *X; Matrix *y; if(gpus.MYGPUID == 0) { X = read_sparse_hdf5((path + "crowdflower_X_test.hdf5").c_str()); y = read_sparse_hdf5((path + "crowdflower_y_test.hdf5").c_str()); } MPI_Barrier(MPI_COMM_WORLD); if(gpus.MYGPUID == 1) { X = read_sparse_hdf5((path + "crowdflower_X_test.hdf5").c_str()); y = read_sparse_hdf5((path + "crowdflower_y_test.hdf5").c_str()); } MPI_Barrier(MPI_COMM_WORLD); if(gpus.MYGPUID == 2) { X = read_sparse_hdf5((path + "crowdflower_X_test.hdf5").c_str()); y = read_sparse_hdf5((path + "crowdflower_y_test.hdf5").c_str()); } b = BatchAllocator(); b.init(X,y,0.20,128,256,gpus, Distributed_weights_sparse); assert(test_eq(b.CURRENT_BATCH->rows,128,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->rows,128,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->cols,24,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->rows,256,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->rows,256,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->cols,24,"sparse distributed batch allocator test")); int index = 0; int index_y = 0; int index_rows = 0; int row_ptr_offset = 0; int row_ptr_offset_y = 0; for(int epoch = 0; epoch < 3; epoch++) { index_rows = 0; index = 0; index_y = 0; row_ptr_offset = 0; row_ptr_offset_y = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { Matrix *s1 = to_host(b.CURRENT_BATCH); Matrix *s2 = to_host(b.CURRENT_BATCH_Y); Matrix *B = ones(b.CURRENT_BATCH->cols,20); Matrix *out = zeros(b.CURRENT_BATCH->rows, B->cols); b.broadcast_batch_to_processes(); for(int j = 0; j < b.CURRENT_BATCH->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH->idx_bytes,(int)b.CURRENT_BATCH->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH->rows +1)*sizeof(int),(int)b.CURRENT_BATCH->ptr_bytes,"test sparse batch bytes")); assert(test_eq(y->ptr_rows[index_rows + b.CURRENT_BATCH_Y->rows] - y->ptr_rows[index_rows],b.CURRENT_BATCH_Y->size,"test sparse batch size")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_Y->rows] - y->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_Y->idx_bytes,(int)b.CURRENT_BATCH_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_Y->rows] - y->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_Y->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_Y->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_Y->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH_Y->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH->size; row_ptr_offset_y += b.CURRENT_BATCH_Y->size; gpus.dot_sparse(b.CURRENT_BATCH, B, out); ASSERT(sum(out) > -15000 && sum(out) < 15000, "sparse batching sparse dot output test"); if((i +1) == b.TOTAL_BATCHES) assert(test_eq(b.CURRENT_BATCH->rows,((int)ceil((X->rows*0.8))) % b.BATCH_SIZE,"after all sparse batches test: partial batch size")); b.allocate_next_batch_async(); b.replace_current_batch_with_next(); cudaFree(s1->data); cudaFree(s1->idx_cols); cudaFree(s1->ptr_rows); free(s1); cudaFree(B->data); cudaFree(out->data); free(out); free(B); } assert(test_eq(index_rows+1,((int)ceil((X->rows*0.8))) +1,"after all sparse batches test: rows idx.")); assert(test_eq(index_y,y->ptr_rows[((int)ceil((y->rows*0.8))) ],"after all sparse batches test: data idx y")); assert(test_eq(index,X->ptr_rows[((int)ceil((y->rows*0.8)))],"after all sparse batches test: data idx X")); for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { Matrix *s1 = to_host(b.CURRENT_BATCH_CV); Matrix *s2 = to_host(b.CURRENT_BATCH_CV_Y); Matrix *B = ones(b.CURRENT_BATCH_CV->cols,20); Matrix *out = zeros(b.CURRENT_BATCH_CV->rows, B->cols); b.broadcast_batch_cv_to_processes(); for(int j = 0; j < b.CURRENT_BATCH_CV->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_CV_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH_CV->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV->idx_bytes,(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV->ptr_bytes,"test sparse batch bytes")); assert(test_eq(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows],b.CURRENT_BATCH_CV_Y->size,"test sparse batch size")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV_Y->idx_bytes,(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV_Y->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH_CV_Y->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH_CV->size; row_ptr_offset_y += b.CURRENT_BATCH_CV_Y->size; gpus.dot_sparse(b.CURRENT_BATCH_CV, B, out); ASSERT(sum(out) > -25000 && sum(out) < 25000, "sparse batching sparse dot output test"); if((i +1) == b.TOTAL_BATCHES_CV) assert(test_eq(b.CURRENT_BATCH_CV->rows,(X->rows - (int)ceil((X->rows*0.8))) % b.BATCH_SIZE_CV,"after all sparse batches test: partial batch size")); b.allocate_next_cv_batch_async(); b.replace_current_cv_batch_with_next(); cudaFree(s1->data); cudaFree(s1->idx_cols); cudaFree(s1->ptr_rows); free(s1); cudaFree(B->data); cudaFree(out->data); free(out); free(B); } assert(test_eq(index_rows+1,X->rows +1,"after all sparse batches test: rows idx.")); assert(test_eq(index_y,y->ptr_rows[y->rows ],"after all sparse batches test: data idx y")); assert(test_eq(index,X->ptr_rows[y->rows],"after all sparse batches test: data idx X")); } if(gpus.MYGPUID != 0) { X = empty_sparse(1,1,1); y = empty_sparse(1,1,1); } b = BatchAllocator(); b.init(X,y,0.20,33,77,gpus, Distributed_weights_sparse); assert(test_eq(b.CURRENT_BATCH->rows,33,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->rows,33,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_Y->cols,24,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->rows,77,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV->cols,9000,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->rows,77,"sparse distributed batch allocator test")); assert(test_eq(b.CURRENT_BATCH_CV_Y->cols,24,"sparse distributed batch allocator test")); for(int epoch = 0; epoch < 3; epoch++) { index_rows = 0; index = 0; index_y = 0; row_ptr_offset = 0; row_ptr_offset_y = 0; for(int i = 0; i < b.TOTAL_BATCHES; i++) { Matrix *B = ones(b.CURRENT_BATCH->rows,20); Matrix *out = zeros(b.CURRENT_BATCH->cols, B->cols); b.broadcast_batch_to_processes(); if(gpus.MYGPUID == 0) { Matrix *s1 = to_host(b.CURRENT_BATCH); Matrix *s2 = to_host(b.CURRENT_BATCH_Y); Matrix *m3 = gpus.sparse_to_dense(b.CURRENT_BATCH); Matrix *s3 = to_host((gpus.dense_to_sparse(m3))); //cout << sum(m3) << " vs " << sum(s3) << endl; for(int i = 0; i < s3->size; i++) { cout << s1->idx_cols[i] << " vs " << s3->idx_cols[i] << endl; //assert(test_eq(s1->data[i],s3->data[i],"dense to sparse and back equality.")); //assert(test_eq(s1->idx_cols[i],s3->idx_cols[i],"dense to sparse and back equality.")); } cout << "size: " << s1->size << endl; for(int i = 0; i < s3->rows+1; i++) cout << s1->ptr_rows[i] << " vs " << s3->ptr_rows[i] << endl; //assert(test_eq(s1->ptr_rows[i],s3->ptr_rows[i],"dense to sparse and back equality.")); for(int j = 0; j < b.CURRENT_BATCH->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH->rows +1)*sizeof(int),(int)b.CURRENT_BATCH->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH->size; row_ptr_offset_y += b.CURRENT_BATCH_Y->size; cudaFree(s1->data); cudaFree(s1->idx_cols); cudaFree(s1->ptr_rows); free(s1); cudaFree(s2->data); cudaFree(s2->idx_cols); cudaFree(s2->ptr_rows); free(s2); } cout << "pre Tdot" << endl; gpus.Tdot_sparse(b.CURRENT_BATCH, B, out); cout << "post Tdot" << endl; cout << sum(out) << endl; //ASSERT(sum(out) > -3000 && sum(out) < 3000, "sparse batching sparse dot output test"); b.allocate_next_batch_async(); b.replace_current_batch_with_next(); cudaFree(B->data); cudaFree(out->data); free(out); free(B); } for(int i = 0; i < b.TOTAL_BATCHES_CV; i++) { Matrix *B = ones(b.CURRENT_BATCH_CV->cols,20); Matrix *out = zeros(b.CURRENT_BATCH_CV->rows, B->cols); b.broadcast_batch_cv_to_processes(); if(gpus.MYGPUID == 0) { Matrix *s1 = to_host(b.CURRENT_BATCH_CV); Matrix *s2 = to_host(b.CURRENT_BATCH_CV_Y); for(int j = 0; j < b.CURRENT_BATCH_CV->size; j++) { assert(test_eq(X->data[index],s1->data[j],"sparse batch allocator data test")); assert(test_eq(X->idx_cols[index],s1->idx_cols[j],"sparse batch allocator data test")); index++; } for(int j = 0; j < b.CURRENT_BATCH_CV_Y->size; j++) { assert(test_eq(y->data[index_y],s2->data[j],"sparse batch allocator data test")); assert(test_eq(y->idx_cols[index_y],s2->idx_cols[j],"sparse batch allocator data test")); index_y++; } assert(test_eq(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows],b.CURRENT_BATCH_CV->size,"test sparse batch size")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV->idx_bytes,(int)b.CURRENT_BATCH_CV->bytes,"test sparse batch bytes")); assert(test_eq((int)(X->ptr_rows[index_rows + b.CURRENT_BATCH_CV->rows] - X->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV->ptr_bytes,"test sparse batch bytes")); assert(test_eq(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows],b.CURRENT_BATCH_CV_Y->size,"test sparse batch size")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(float),(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)b.CURRENT_BATCH_CV_Y->idx_bytes,(int)b.CURRENT_BATCH_CV_Y->bytes,"test sparse batch bytes")); assert(test_eq((int)(y->ptr_rows[index_rows + b.CURRENT_BATCH_CV_Y->rows] - y->ptr_rows[index_rows])*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->idx_bytes,"test sparse batch bytes")); assert(test_eq((int)(b.CURRENT_BATCH_CV_Y->rows +1)*sizeof(int),(int)b.CURRENT_BATCH_CV_Y->ptr_bytes,"test sparse batch bytes")); for(int j = 0; j < b.CURRENT_BATCH_CV_Y->rows+1; j++) { assert(test_eq(X->ptr_rows[index_rows],s1->ptr_rows[j] + row_ptr_offset,"sparse batch allocator data test")); assert(test_eq(y->ptr_rows[index_rows],s2->ptr_rows[j]+ row_ptr_offset_y,"sparse batch allocator data test")); index_rows++; } index_rows--; row_ptr_offset += b.CURRENT_BATCH_CV->size; row_ptr_offset_y += b.CURRENT_BATCH_CV_Y->size; if((i +1) == b.TOTAL_BATCHES_CV) assert(test_eq(b.CURRENT_BATCH_CV->rows,(X->rows - (int)ceil((X->rows*0.8))) % b.BATCH_SIZE_CV,"after all sparse batches test: partial batch size")); cudaFree(s1->data); cudaFree(s1->idx_cols); cudaFree(s1->ptr_rows); free(s1); } gpus.dot_sparse(b.CURRENT_BATCH_CV, B, out); ASSERT(sum(out) > -8000 && sum(out) < 8000, "sparse batching sparse dot output test"); b.allocate_next_cv_batch_async(); b.replace_current_cv_batch_with_next(); cudaFree(B->data); cudaFree(out->data); free(out); free(B); } } */ return 0; }
1577bb18f35cf9cfb72dc682e7b1daee540f6ce7.hip
// !!! This is a file automatically generated by hipify!!! // This file defines a CUDA benchmark which issues multiple kernels to a single // stream before waiting for all kernels to complete. The configuration for the // kernels is taken exclusively from the additional_info field in the // InitializationParameters struct. The actual kernels will simply be instances // of the same kernel as in the timer_spin benchmark. This benchmark ignores // all fields in its initialization parameters apart from cuda_device and // additional_info. // // The format of the necessary additional_info field is as follows. Each object // in the "actions" list must have a type that is one of "kernel", "malloc", // "free", "memset", "memcpy", or "synchronize". Memory operations such as // malloc, free memset, and memcpy operate on buffers separate from each other. // For example, a malloc doesn't need to precede a memset, because memset // buffers will be allocated during initialization. The only limitation is that // only a small number of unbalanced malloc and free operations are allowed. // Any unfreed mallocs from these actions will be freed during benchmark // cleanup. Synchronization actions are available solely to experiment with // scheduling, and are not necessary for the task. A stream-synchronization // request will be issued at the end of all actions regardless of whether an // explicit, additional synchronization action was carried out. // For more details about parameters for each action, see the annotated JSON // structure below: /* "additional_info": { "use_null_stream": <Boolean, defaults to false, set to true to use the null stream rather than the default stream>, "actions": [ { "delay": <A floating-point number of seconds to sleep before starting this action. Defaults to 0.0, which will insert no sleep at all.>, "type": <A string, from the list given above.>, "label": <A string, a label for this action.>, "parameters": <A JSON object with action-specific parameters.> }, { "type": "kernel", "label": "Kernel 1", "parameters": { "type": <A string: "timer_spin" or "counter_spin". Defaults to "timer_spin">, "duration": <If "type" is "timer_spin", this will be the number of nanoseconds to run the kernel. If type is "counter_spin", this will be the number of loop iterations to run.>, "shared_memory_size": <The number of shared 32-bit integers to use. Defaults to 0. Must be 0, 4096, 8192, or 10240.>, "block_count": <The number of thread blocks to use. Defaults to the value given in the benchmark parameters.>, "thread_count": <The number of threads per block to use. Defaults to the value given in the benchmark parameters.> }, { "type": "malloc", "label": "Malloc 1", "parameters": { "host": <Boolean. Defaults to false. If true, will allocate host memory.>, "size": <Number of bytes to allocate> } }, { "type": "free", "label": "Free 1", "parameters": { "host": <Boolean. Defaults to false. If true, will free host memory. The entire "parameters" block can be omitted here for the default.> } }, { "type": "memset", "label": "Memset 1", "parameters": { "async": <Boolean. Defaults to true. If false, will issue a null-stream memset regardless of use_null_stream's value.>, "size": <Number of bytes to set to 0> } }, { "type": "memcpy", "label": Memcpy 1", "parameters": { "async": <Boolean. Defaults to true. If false, issues a null-stream memcpy regardless of use_null_stream's value.>, "size": <Number of byte to copy>, "direction": <Either "deviceToDevice", "deviceToHost", or "hostToDevice"> } }, { "type": "synchronize", "label": "Sync 1", "parameters": { "device": <Boolean. Defaults to false (parameters can be omitted here entirely, too). If true, runs a hipDeviceSynchronize rather than hipStreamSynchronize.> } } } ] } */ // Actions are issued to the stream in the same order that they're specified // in the "actions" list. #include <hip/hip_runtime.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "benchmark_gpu_utilities.h" #include "library_interface.h" #include "third_party/cJSON.h" // This specifies the maximum number of un-freed malloc actions that can occur // before further allocations return an error instead. Any list with this many // or fewer (balanced) malloc and free actions can run indefinitely. #define MAX_MEMORY_ALLOCATION_COUNT (10) // This specifies the number of pre-allocated buffers that are allocated during // initialization, so that a number of free actions can be used without a // preceding malloc. This can be at most MAX_MEMORY_ALLOCATION_COUNT. #define INITIAL_ALLOCATION_COUNT (4) // This speicifies the size, in bytes, of the pre-allocated buffers. #define INITIAL_ALLOCATION_SIZE (1024) // This macro is used to create functions that statically use predefined // amounts of shared memory. This is used by the GENERATE_KERNEL macro. #define GENERATE_SHARED_MEMORY_FUNCTION(amount) \ static __device__ uint32_t UseSharedMemory_##amount(void) { \ __shared__ uint32_t shared_array[(amount)]; \ uint32_t elements_per_thread, i; \ elements_per_thread = (amount) / blockDim.x; \ for (i = 0; i < elements_per_thread; i++) { \ shared_array[threadIdx.x * elements_per_thread + i] = threadIdx.x; \ } \ return shared_array[threadIdx.x * elements_per_thread]; \ } // Generates kernels that use the given amount of shared memory. Kernels have // names like SharedMemGPUSpin_<amount>, and take the following parameters: // (int counter, uint64_t duration, uint64_t *block_times, // uint32_t *block_smids, uint64_t *junk). If the "counter" parameter is // nonzero, then a constant amount of computation will be carried out rather // than waiting for a constant amount of time. The "junk" parameter is used to // prevent optimizations, and must be NULL. Otherwise, this kernel operates // similarly to the simpler GPUSpin kernel in stream_action.cu. This WILL NOT // work for 0 bytes of shared memory--that's what the plain GPUSpin in // stream_action.cu is for. #define GENERATE_SPIN_KERNEL(amount) \ /* Produce a function that uses shared memory */ \ GENERATE_SHARED_MEMORY_FUNCTION(amount) \ static __global__ void SharedMemGPUSpin_##amount(int use_counter, \ uint64_t duration, uint64_t *block_times, uint32_t *block_smids, \ uint64_t *junk) { \ uint32_t shared_mem_res; \ uint64_t i, accumulator; \ uint64_t start_time = GlobalTimer64(); \ if (threadIdx.x == 0) { \ block_times[blockIdx.x * 2] = start_time; \ block_smids[blockIdx.x] = GetSMID(); \ } \ __syncthreads(); \ /* shared_mem_res is our thread index */ \ shared_mem_res = UseSharedMemory_##amount(); \ if (use_counter) { \ for (i = 0; i < duration; i++) { \ accumulator += i; \ } \ } else { \ while ((GlobalTimer64() - start_time) < duration) { \ continue; \ } \ } \ if (junk) *junk = accumulator; \ if (shared_mem_res == 0) { \ block_times[blockIdx.x * 2 + 1] = GlobalTimer64(); \ } \ } // This holds parameters for the kernel action. typedef struct { // The grid dimensions for this kernel. int block_count; int thread_count; // The amount of shared memory used by this kernel. int shared_memory_count; // If this is nonzero, the counter_spin kernel will be used, which performs // a constant amount of busywork computations. If this is zero, the // timer_spin kernel will be used instead, which waits until a certain number // of nanoseconds have elapsed. int use_counter_spin; // The number of either spin iterations or nanoseconds this kernel runs for // (depending on whether it is a timer spin or counter spin kernel). uint64_t duration; // Hold the times needed for a CUDA kernel. uint64_t *device_block_times; uint64_t *host_block_times; uint32_t *device_smids; uint32_t *host_smids; } KernelParameters; // This holds parameters for the hipMalloc action. typedef struct { // This is the number of bytes to allocate. uint64_t size; // If nonzero, call hipHostMalloc rather than hipMalloc. int allocate_host_memory; } MallocParameters; // This holds parameters for the hipFree action. typedef struct { // If nonzero, call hipHostFree rather than hipFree. int free_host_memory; } FreeParameters; // This holds parameters for the hipMemset action, which sets bytes to a // random 8-bit value. typedef struct { // If nonzero, then hipMemset will be called (associated with no stream), // rather than hipMemsetAsync, which will use the task's specified stream. int synchronous; // This contains the number of bytes to set. uint64_t size; } MemsetParameters; // This holds parameters for the hipMemcpy action, which copies data between // host and device, or two device buffers. typedef struct { // One of the hipMemcpyKind values. However, values 0 (host - host) and 4 // (unspecified) are not supported. hipMemcpyKind direction; // If nonzero, then hipMemcpy will be used. If 0, then hipMemcpyAsync is // used, associated with the task's stream. int synchronous; // The number of bytes to copy. uint64_t size; } MemcpyParameters; // This holds parameters for the synchronize action. typedef struct { // If this is nonzero, then hipDeviceSynchronize will be called. Otherwise, // hipStreamSynchronize is called, associated with the task's stream. int sync_device; } SyncParameters; // This is used as a tag to identify the parameters and behavior to carry out // for each action supported by the benchmark. typedef enum { ACTION_UNINITIALIZED = 0, ACTION_KERNEL, ACTION_MALLOC, ACTION_FREE, ACTION_MEMSET, ACTION_MEMCPY, ACTION_SYNC, } ActionType; // This defines the behavior and parameters for all potential actions. typedef struct { // The number of seconds to sleep after the current action's completion, // before launching this one. double delay; // The label (typically a kernel name) to give this action. char *label; ActionType type; union { KernelParameters kernel; MallocParameters malloc; FreeParameters free; MemsetParameters memset; MemcpyParameters memcpy; SyncParameters sync; } parameters; } ActionConfig; // Holds local information for each instantiation of this benchmark. typedef struct { // The CUDA stream with which all operations will be associated. hipStream_t stream; // The CUDA stream with which copy_out operations will be associated. May // differ from the regular stream, because this will never be the NULL // stream. hipStream_t copy_out_stream; // This will be set to 1 if the stream was created and must be closed during // cleanup (it can remain 0 if the NULL stream is used). int stream_created; // The number of actions to perform per execution. int action_count; // The list of actions to perform. ActionConfig *actions; // The number of actions which are kernel launches. int kernel_count; // Information to provide to the host process about block start and end times // for each kernel action. KernelTimes *kernel_times; // A buffer of host memory for copies and memsets. May be NULL if not needed. // Is guaranteed to be the size of the largest copy or memset needed by any // action. uint8_t *host_copy_buffer; // A buffer of device memory for copies and memsets. May be NULL if not // needed. This is guaranteed to be the size of the largest copy or memset // needed by any action. uint8_t *device_copy_buffer; // This will be a secondary device buffer, but will only be allocated if a // device-to-device memory copy is used. uint8_t *device_secondary_buffer; // This is a stack of pointers to device memory allocated by hipMalloc // actions. uint8_t **device_memory_allocations; // Holds the number of pointers in the device_memory_allocations list. This // increases with each hipMalloc action and decreases with each hipFree. int device_memory_allocation_count; // This is a stack of pointers to host memory allocated by hipHostMalloc. // It works in the same way as device_memory_allocations. uint8_t **host_memory_allocations; // This is analagous to device_memory_allocation_count, but for host memory // allocations. int host_memory_allocation_count; } TaskState; // Use the macros defined in stream_action.h to generate a set of kernels using // various amounts of static shared memory. GENERATE_SPIN_KERNEL(4096); GENERATE_SPIN_KERNEL(8192); GENERATE_SPIN_KERNEL(10240); // A basic kernel that wastes GPU cycles without using shared memory. The // duration parameter specifies the number of nanoseconds to wait if // use_counter is 0. If use_counter is nonzero, duration specifies a number of // loop iterations to spin instead. The junk parameter must be NULL and is used // to prevent optimization. static __global__ void GPUSpin(int use_counter, uint64_t duration, uint64_t *block_times, uint32_t *block_smids, uint64_t *junk) { uint64_t i, accumulator; uint64_t start_time = GlobalTimer64(); // Have one thread record the block's start time and SM ID. if (threadIdx.x == 0) { block_times[blockIdx.x * 2] = start_time; block_smids[blockIdx.x] = GetSMID(); } __syncthreads(); if (use_counter) { // Write to the accumulator (which must be potentially returned) to prevent // this loop from being optimized out. for (i = 0; i < duration; i++) { accumulator += i; } } else { // Wait until the specified number of nanoseconds has elapsed. while ((GlobalTimer64() - start_time) < duration) { continue; } } // Make it look like the junk value can be used to prevent the loop updating // the accumulator from being removed by the optimizer. if (junk) *junk = accumulator; // Have one thread write the block end time (simple, but may be slightly // inaccurate if other warps finish later). if (threadIdx.x == 0) { block_times[blockIdx.x * 2 + 1] = GlobalTimer64(); } } // Frees any data and clears out an ActionConfig struct. For use during // cleanup. static void CleanupAction(ActionConfig *action) { uint64_t *tmp64; uint32_t *tmp32; if (action->label) free(action->label); if (action->type == ACTION_KERNEL) { // For now, only kernel actions require extra cleanup. tmp64 = action->parameters.kernel.device_block_times; if (tmp64) CheckCUDAError(hipFree(tmp64)); tmp64 = action->parameters.kernel.host_block_times; if (tmp64) CheckCUDAError(hipHostFree(tmp64)); tmp32 = action->parameters.kernel.device_smids; if (tmp32) CheckCUDAError(hipFree(tmp32)); tmp32 = action->parameters.kernel.host_smids; if (tmp32) CheckCUDAError(hipHostFree(tmp32)); } memset(action, 0, sizeof(*action)); } // Implements the cleanup fucntion required by the interface, but is also used // internally to clean up during a faulty Initialize(). That's why all of the // pointers are checked to be non-NULL. This is also why it's very important to // ensure that any fields and pointers are zero before any initialization. static void Cleanup(void *data) { TaskState *state = (TaskState *) data; int i; ActionConfig *action = NULL; for (i = 0; i < state->action_count; i++) { action = state->actions + i; CleanupAction(action); } if (state->actions) free(state->actions); if (state->kernel_times) free(state->kernel_times); // The CheckCUDAError macros here are just to print a message on error, since // we can't really do any additional error handling during cleanup. if (state->stream_created) { // Remember that state->stream may be the NULL stream or may be another // reference to this same stream. In either case, we don't need to destroy // it. CheckCUDAError(hipStreamDestroy(state->copy_out_stream)); } if (state->host_copy_buffer) { CheckCUDAError(hipHostFree(state->host_copy_buffer)); } if (state->device_copy_buffer) { CheckCUDAError(hipFree(state->device_copy_buffer)); } if (state->device_secondary_buffer) { CheckCUDAError(hipFree(state->device_secondary_buffer)); } for (i = 0; i < state->device_memory_allocation_count; i++) { CheckCUDAError(hipFree(state->device_memory_allocations[i])); } if (state->device_memory_allocations) free(state->device_memory_allocations); for (i = 0; i < state->host_memory_allocation_count; i++) { CheckCUDAError(hipHostFree(state->host_memory_allocations[i])); } if (state->host_memory_allocations) free(state->host_memory_allocations); memset(state, 0, sizeof(*state)); free(state); } // Returns nonzero if all of the keys in the JSON object are in the list of // valid keys. static int VerifyJSONKeys(cJSON *object, const char* const valid_keys[], int valid_count) { int i, found; // We'll be passed a top-level object here. object = object->child; while (object != NULL) { found = 0; if (!object->string) { printf("Found JSON object without a name in stream_action settings.\n"); return 0; } for (i = 0; i < valid_count; i++) { if (strcmp(object->string, valid_keys[i]) == 0) { found = 1; break; } } if (!found) { printf("Unexpected setting in stream_action.so settings: %s\n", object->string); return 0; } object = object->next; } return 1; } // Takes a cJSON object and returns 1 if it's true, 0 if it's false, and -1 if // it's invalid or not a boolean. Returns -1 if object is NULL. static int GetCJSONBoolean(cJSON *object) { if (!object) return -1; if (object->type == cJSON_True) return 1; if (object->type == cJSON_False) return 0; return -1; } // Since this is such a long string of code, it gets moved into a separate // function. Parses the parameters for a kernel action. Requires the cJSON // *parameters* object for a kernel action, and fills in the KernelParameters. // Returns 0 on error. static int ParseKernelParameters(cJSON *json_parameters, KernelParameters *kernel_config, int default_block_count, int default_thread_count) { cJSON *entry = NULL; // Due to the complexity of this config, this can forestall confusing errors // by pointing out misspelled keys. static const char* const valid_keys[] = { "type", "thread_count", "block_count", "shared_memory_size", "comment", "duration", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } // Determine whether the kernel should be a timer spin (constant time) or // counter spin (constant effort). The default is constant time, if the // setting isn't provided. entry = cJSON_GetObjectItem(json_parameters, "type"); if (entry) { if (entry->type != cJSON_String) { printf("Invalid kernel type for kernel action.\n"); return 0; } if (strcmp(entry->valuestring, "timer_spin") == 0) { kernel_config->use_counter_spin = 0; } else if (strcmp(entry->valuestring, "counter_spin") == 0) { kernel_config->use_counter_spin = 1; } else { printf("Unsupported kernel type for kernel action: %s\n", entry->valuestring); return 0; } } else { kernel_config->use_counter_spin = 0; } // Get the one required numerical parameter: duration. entry = cJSON_GetObjectItem(json_parameters, "duration"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid duration for kernel action.\n"); return 0; } kernel_config->duration = (uint64_t) entry->valuedouble; // Get the block and thread counts, which default to the benchmark setting // if they aren't provided. kernel_config->block_count = default_block_count; entry = cJSON_GetObjectItem(json_parameters, "block_count"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid block count for kernel action.\n"); return 0; } kernel_config->block_count = entry->valueint; } kernel_config->thread_count = default_thread_count; entry = cJSON_GetObjectItem(json_parameters, "thread_count"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid thread count for kernel action.\n"); return 0; } kernel_config->thread_count = entry->valueint; } // Unlike the other numbers, the shared_memory_count is optional and needs // validation. entry = cJSON_GetObjectItem(json_parameters, "shared_memory_size"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid shared memory size for kernel action.\n"); return 0; } kernel_config->shared_memory_count = entry->valueint; } else { kernel_config->shared_memory_count = 0; } switch (kernel_config->shared_memory_count) { case 0: case 4096: case 8192: case 10240: break; default: printf("Unsupported shared memory size for kernel action: %d\n", kernel_config->shared_memory_count); return 0; } return 1; } // Parses parameters for the malloc action. Returns 0 on error. static int ParseMallocParameters(cJSON *json_parameters, MallocParameters *malloc_config) { cJSON *entry = NULL; int host = 0; static const char* const valid_keys[] = { "size", "host", "comment", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "host"); if (entry) { host = GetCJSONBoolean(entry); } if (host < 0) { printf("Invalid host setting for malloc action.\n"); return 0; } malloc_config->allocate_host_memory = host; entry = cJSON_GetObjectItem(json_parameters, "size"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid size setting for malloc action.\n"); return 0; } malloc_config->size = (uint64_t) entry->valuedouble; return 1; } // Parses the given (optional) parameters for the hipFree action. Returns 0 // on error. Since the parameters are optional, json_parameters can be NULL. static int ParseFreeParameters(cJSON *json_parameters, FreeParameters *free_config) { cJSON *entry = NULL; int host = 0; static const char* const valid_keys[] = { "host", "comment", }; // The config here is optional. free_config->free_host_memory = 0; if (!json_parameters) return 1; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "host"); if (entry) { host = GetCJSONBoolean(entry); } if (host < 0) { printf("Invalid host setting for malloc action.\n"); return 0; } free_config->free_host_memory = host; return 1; } // Parses JSON parameters for the memset action. Returns 0 on error. static int ParseMemsetParameters(cJSON *json_parameters, MemsetParameters *memset_config) { cJSON *entry = NULL; int async = 0; static const char* const valid_keys[] = { "async", "size", "comment", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "size"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid size for memset action.\n"); return 0; } memset_config->size = (uint64_t) entry->valuedouble; entry = cJSON_GetObjectItem(json_parameters, "async"); if (entry) { async = GetCJSONBoolean(entry); } if (async < 0) { printf("Invalid async setting for memset action.\n"); return 0; } memset_config->synchronous = !async; return 1; } // Parses JSON parameters for the memcpy action. Returns 0 on error. static int ParseMemcpyParameters(cJSON *json_parameters, MemcpyParameters *memcpy_config) { cJSON *entry = NULL; int async = 0; static const char* const valid_keys[] = { "async", "size", "direction", "comment", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "size"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid size for memcpy action.\n"); return 0; } memcpy_config->size = (uint64_t) entry->valuedouble; entry = cJSON_GetObjectItem(json_parameters, "async"); if (entry) { async = GetCJSONBoolean(entry); } if (async < 0) { printf("Invalid async setting for memcpy action.\n"); return 0; } memcpy_config->synchronous = !async; entry = cJSON_GetObjectItem(json_parameters, "direction"); if (!entry || (entry->type != cJSON_String)) { printf("Missing/invalid direction for memcpy action.\n"); return 0; } if (strcmp(entry->valuestring, "deviceToHost") == 0) { memcpy_config->direction = hipMemcpyDeviceToHost; } else if (strcmp(entry->valuestring, "hostToDevice") == 0) { memcpy_config->direction = hipMemcpyHostToDevice; } else if (strcmp(entry->valuestring, "deviceToDevice") == 0) { memcpy_config->direction = hipMemcpyDeviceToDevice; } else { printf("Unsupported direction for memcpy action: %s\n", entry->valuestring); return 0; } return 1; } // Parses the JSON parameters for the "synchronize" action. Returns 0 on error. // The json_parameters can be NULL, in which case the sync parameters will take // their default values. static int ParseSyncParameters(cJSON *json_parameters, SyncParameters *sync_config) { cJSON *entry = NULL; static const char* const valid_keys[] = { "device", "comment", }; sync_config->sync_device = 0; if (!json_parameters) return 1; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "device"); if (entry) { sync_config->sync_device = GetCJSONBoolean(entry); } if (sync_config->sync_device < 0) { sync_config->sync_device = 0; printf("Invalid device setting for sync action.\n"); return 0; } return 1; } // Parses a JSON action object in order to fill in the given ActionConfig. // Returns 0 on error and 1 on success. May partially initialize action on // error, so the caller may need to clean it up. However, the action type is // guaranteed to be valid if any other fields are set. static int ParseSingleAction(cJSON *object, ActionConfig *action, InitializationParameters *params) { cJSON *entry = NULL; ActionType type = ACTION_UNINITIALIZED; static const char* const valid_keys[] = { "type", "label", "delay", "parameters", "comment", }; // Validate keys to find confusing spelling mistakes that may make a setting // take its default value unintentionally. if (!VerifyJSONKeys(object, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } // Start with the hardest property: the action's type. entry = cJSON_GetObjectItem(object, "type"); if (!entry || (entry->type != cJSON_String)) { printf("Missing/invalid action type for stream_action.so.\n"); return 0; } if (strcmp(entry->valuestring, "kernel") == 0) { type = ACTION_KERNEL; } else if (strcmp(entry->valuestring, "malloc") == 0) { type = ACTION_MALLOC; } else if (strcmp(entry->valuestring, "free") == 0) { type = ACTION_FREE; } else if (strcmp(entry->valuestring, "memset") == 0) { type = ACTION_MEMSET; } else if (strcmp(entry->valuestring, "memcpy") == 0) { type = ACTION_MEMCPY; } else if (strcmp(entry->valuestring, "synchronize") == 0) { type = ACTION_SYNC; } else { printf("Unsupported action type for stream_action.so: %s\n", entry->valuestring); return 0; } action->type = type; entry = cJSON_GetObjectItem(object, "label"); if (!entry || (entry->type != cJSON_String)) { printf("Missing/invalid action label for stream_action.so.\n"); return 0; } action->label = strdup(entry->valuestring); if (!action->label) return 0; entry = cJSON_GetObjectItem(object, "delay"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid delay for stream_action.so.\n"); return 0; } action->delay = entry->valuedouble; } // Last, parse the action-specific parameters. Remember that additional // parameters are optional for some actions, so only ensure that the // parameters are an object if they're non-NULL. entry = cJSON_GetObjectItem(object, "parameters"); if (entry && (entry->type != cJSON_Object)) { printf("Invalid action parameters for stream_action.so.\n"); return 0; } // Get kernel config parsing over with first, since it's the most complex. if (type == ACTION_KERNEL) { if (!entry) { printf("Missing kernel parameters for stream_action.so.\n"); return 0; } if (!ParseKernelParameters(entry, &(action->parameters.kernel), params->block_count, params->thread_count)) { return 0; } } if (type == ACTION_MALLOC) { if (!entry) { printf("Missing malloc parameters for stream_action.so.\n"); return 0; } if (!ParseMallocParameters(entry, &(action->parameters.malloc))) return 0; } if (type == ACTION_FREE) { // It's okay for "entry" to be NULL here. if (!ParseFreeParameters(entry, &(action->parameters.free))) return 0; } if (type == ACTION_MEMSET) { if (!entry) { printf("Missing memset parameters for stream_action.so.\n"); return 0; } if (!ParseMemsetParameters(entry, &(action->parameters.memset))) return 0; } if (type == ACTION_MEMCPY) { if (!entry) { printf("Missing memcpy parameters for stream_action.so.\n"); return 0; } if (!ParseMemcpyParameters(entry, &(action->parameters.memcpy))) return 0; } if (type == ACTION_SYNC) { // It's okay for "entry" to be NULL here, too. if (!ParseSyncParameters(entry, &(action->parameters.sync))) return 0; } return 1; } // Takes a TaskState struct to be initialized and a JSON configuration string. // Parses the JSON configuration and fills the appropriate fields in the state // struct. The stream_priority value is needed, because this function will // create the CUDA stream if the use_null_stream setting is not true. Returns 0 // on error. static int ParseParameters(TaskState *state, InitializationParameters *params) { cJSON *json_root = NULL; cJSON *list_head = NULL; cJSON *entry = NULL; ActionConfig *actions = NULL; ActionConfig *action = NULL; int i = 0, action_count = 0, use_null_stream = 0; static const char* const valid_keys[] = { "actions", "use_null_stream", "comment", }; json_root = cJSON_Parse(params->additional_info); if (!json_root || (json_root->type != cJSON_Object)) { printf("Missing/invalid additional_info for stream_action.so.\n"); goto ErrorCleanup; } if (!VerifyJSONKeys(json_root, valid_keys, sizeof(valid_keys) / sizeof(char*))) { goto ErrorCleanup; } // First, check for the "use_null_stream" setting. entry = cJSON_GetObjectItem(json_root, "use_null_stream"); if (entry) use_null_stream = GetCJSONBoolean(entry); if (use_null_stream < 0) { printf("Invalid use_null_stream setting in stream_action.so.\n"); goto ErrorCleanup; } // Always use a user-defined stream for copy_out operations. if (!CheckCUDAError(CreateCUDAStreamWithPriority(params->stream_priority, &(state->copy_out_stream)))) { goto ErrorCleanup; } state->stream_created = 1; // If the NULL stream wasn't specified, then use the user-defined stream // for all other operations, too. if (use_null_stream) { state->stream = 0; } else { state->stream = state->copy_out_stream; } // Get the actions list, ensuring it's an array with at least one element. list_head = cJSON_GetObjectItem(json_root, "actions"); if (!list_head || (list_head->type != cJSON_Array) || !list_head->child) { printf("Missing/invalid list of actions for stream_action.so.\n"); goto ErrorCleanup; } // Count the number of actions in the list. entry = list_head->child; action_count = 1; while (entry->next) { action_count++; entry = entry->next; } // Allocate and initialize the internal list of ActionConfig structs. actions = (ActionConfig *) calloc(action_count, sizeof(*actions)); if (!actions) goto ErrorCleanup; entry = list_head->child; for (i = 0; i < action_count; i++) { action = actions + i; if (!ParseSingleAction(entry, action, params)) goto ErrorCleanup; entry = entry->next; } // Clean up and return success. state->actions = actions; state->action_count = action_count; cJSON_Delete(json_root); return 1; ErrorCleanup: if (json_root) cJSON_Delete(json_root); if (actions) { for (i = 0; i < action_count; i++) { CleanupAction(actions + i); } free(actions); } return 0; } // Allocates buffers needed by a single kernel action. Returns 0 on error. static int AllocateKernelActionMemory(KernelParameters *parameters) { size_t block_times_size = 2 * parameters->block_count * sizeof(uint64_t); size_t smids_size = parameters->block_count * sizeof(uint32_t); if (!CheckCUDAError(hipMalloc(&parameters->device_block_times, block_times_size))) { return 0; } if (!CheckCUDAError(hipMalloc(&parameters->device_smids, smids_size))) { return 0; } if (!CheckCUDAError(hipHostMalloc(&parameters->host_block_times, block_times_size))) { return 0; } if (!CheckCUDAError(hipHostMalloc(&parameters->host_smids, smids_size))) { return 0; } return 1; } // Preallocates a set of buffers so that a limited number of free actions don't // necessarily need to follow malloc actions. Returns 0 on error. static int PreallocateFreeActionBuffers(TaskState *state) { int i; uint8_t **dest = NULL; for (i = 0; i < INITIAL_ALLOCATION_COUNT; i++) { dest = state->device_memory_allocations + i; if (!CheckCUDAError(hipMalloc(dest, INITIAL_ALLOCATION_SIZE))) { return 0; } // Increment these values one step at a time, so they can be cleaned up // properly if one of the later allocations fails. state->device_memory_allocation_count++; dest = state->host_memory_allocations + i; if (!CheckCUDAError(hipHostMalloc(dest, INITIAL_ALLOCATION_SIZE))) { return 0; } state->host_memory_allocation_count++; } return 1; } // Takes a TaskState after fully parsing InitializationParameters (i.e. the // actions list is populated). Allocates necessary buffers for kernel actions, // memory sets and copies, holding pointers for malloc actions, and buffers of // data to report to the calling process during copy_out. Returns 0 on error. static int AllocateMemory(TaskState *state) { int i; uint64_t current_size; uint64_t max_size = 0; int secondary_buffer_needed = 0; int malloc_action_exists = 0; int kernel_count = 0; ActionConfig *action = NULL; // Collect aggregate information about all actions, and allocate the kernel // action's buffers while we're at it. for (i = 0; i < state->action_count; i++) { action = state->actions + i; switch (action->type) { case ACTION_KERNEL: kernel_count++; if (!AllocateKernelActionMemory(&(action->parameters.kernel))) { return 0; } break; case ACTION_MALLOC: malloc_action_exists = 1; break; case ACTION_FREE: malloc_action_exists = 1; case ACTION_MEMSET: current_size = action->parameters.memset.size; if (current_size > max_size) max_size = current_size; break; case ACTION_MEMCPY: current_size = action->parameters.memcpy.size; if (current_size > max_size) max_size = current_size; if (action->parameters.memcpy.direction == hipMemcpyDeviceToDevice) { secondary_buffer_needed = 1; } break; default: break; } } // Start by allocating device memory. if (!CheckCUDAError(hipMalloc(&state->device_copy_buffer, max_size))) { return 0; } // Only allocate a second device buffer if a device-to-device memcpy action // is present. if (secondary_buffer_needed) { if (!CheckCUDAError(hipMalloc(&state->device_secondary_buffer, max_size))) { return 0; } } // Now allocate host memory. if (!CheckCUDAError(hipHostMalloc(&state->host_copy_buffer, max_size))) { return 0; } if (malloc_action_exists) { state->device_memory_allocations = (uint8_t**) calloc( MAX_MEMORY_ALLOCATION_COUNT, sizeof(uint8_t*)); if (!state->device_memory_allocations) { printf("Failed allocating list of device memory allocation pointers.\n"); return 0; } state->host_memory_allocations = (uint8_t**) calloc( MAX_MEMORY_ALLOCATION_COUNT, sizeof(uint8_t*)); if (!state->host_memory_allocations) { printf("Failed allocating list of host memory allocation pointers.\n"); return 0; } if (!PreallocateFreeActionBuffers(state)) return 0; } // Any pointers contained in the individual KernelTimes entries are simply // copied from KernelParameters structs after execution--they don't need to // be allocated here. state->kernel_times = (KernelTimes*) calloc(kernel_count, sizeof(KernelTimes)); if (!state->kernel_times) { printf("Failed allocating list of kernel times.\n"); return 0; } state->kernel_count = kernel_count; return 1; } // Initializes the tasks' kernel_times array. Must be called after memory // allocation. This is done once because most of the fields in the kernel_times // array never change, apart from cuda_launch_times. Returns 0 on error. static int InitializeKernelTimes(TaskState *state) { int i; int kernel_index = 0; KernelTimes *current_times = NULL; ActionConfig *action = NULL; KernelParameters *params = NULL; for (i = 0; i < state->action_count; i++) { action = state->actions + i; if (action->type != ACTION_KERNEL) continue; params = &(action->parameters.kernel); current_times = state->kernel_times + kernel_index; current_times->kernel_name = action->label; current_times->block_count = params->block_count; current_times->thread_count = params->thread_count; current_times->shared_memory = params->shared_memory_count * 4; current_times->block_times = params->host_block_times; current_times->block_smids = params->host_smids; kernel_index++; } return 1; } static void* Initialize(InitializationParameters *params) { TaskState *state = NULL; state = (TaskState *) malloc(sizeof(*state)); if (!state) { printf("Error allocating memory for stream_action task state.\n"); return NULL; } memset(state, 0, sizeof(*state)); if (!CheckCUDAError(hipSetDevice(params->cuda_device))) { Cleanup(state); return NULL; } // Parse the configuration string, initialize the action configs, and create // the CUDA stream (if a non-NULL stream is used). if (!ParseParameters(state, params)) { Cleanup(state); return NULL; } if (!AllocateMemory(state)) { Cleanup(state); return NULL; } if (!InitializeKernelTimes(state)) { Cleanup(state); return NULL; } return state; } // Nothing needs to be copied to the GPU at this stage in the benchmark. static int CopyIn(void *data) { return 1; } // Copies device data to host buffers for a single kernel action. Requires a // stream on which the copy should run. Returns 0 on error. static int CopyKernelActionMemoryOut(KernelParameters *kernel, hipStream_t stream) { size_t block_times_size = 2 * kernel->block_count * sizeof(uint64_t); size_t block_smids_size = kernel->block_count * sizeof(uint32_t); if (!CheckCUDAError(hipMemcpyAsync(kernel->host_block_times, kernel->device_block_times, block_times_size, hipMemcpyDeviceToHost, stream))) { return 0; } if (!CheckCUDAError(hipMemcpyAsync(kernel->host_smids, kernel->device_smids, block_smids_size, hipMemcpyDeviceToHost, stream))) { return 0; } return 1; } // Provides the caller with information about the kernel actions. static int CopyOut(void *data, TimingInformation *times) { TaskState *state = (TaskState *) data; int i; for (i = 0; i < state->action_count; i++) { if (state->actions[i].type != ACTION_KERNEL) continue; if (!CopyKernelActionMemoryOut(&(state->actions[i].parameters.kernel), state->copy_out_stream)) { return 0; } } if (!CheckCUDAError(hipStreamSynchronize(state->copy_out_stream))) return 0; // The kernel_times structs were already filled in with the correct pointers // during initialization, and the cuda_launch_times were filled in during the // execute phase. So now, all that needs to be done is provide the correct // pointer. times->kernel_count = state->kernel_count; times->kernel_info = state->kernel_times; times->resulting_data_size = 0; times->resulting_data = NULL; return 1; } // Executes a kernel action. Requires the index of the kernel action (its order // relative only to other kernel actions) in order to fill in the CUDA launch // times in the correct entry in the kernel_times array. Returns 0 on error. static int ExecuteKernelAction(TaskState *state, KernelParameters *params, int kernel_index) { KernelTimes *kernel_time = state->kernel_times + kernel_index; kernel_time->cuda_launch_times[0] = CurrentSeconds(); switch (params->shared_memory_count) { case 0: hipLaunchKernelGGL(( GPUSpin), dim3(params->block_count), dim3(params->thread_count), 0, state->stream, params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; case 4096: hipLaunchKernelGGL(( SharedMemGPUSpin_4096), dim3(params->block_count), dim3(params->thread_count), 0, state->stream, params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; case 8192: hipLaunchKernelGGL(( SharedMemGPUSpin_8192), dim3(params->block_count), dim3(params->thread_count), 0, state->stream, params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; case 10240: hipLaunchKernelGGL(( SharedMemGPUSpin_10240), dim3(params->block_count), dim3(params->thread_count), 0, state->stream, params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; default: printf("Unsupported kernel shared memory count: %d\n", params->shared_memory_count); return 0; } // Record the time after the kernel launch returns, but we don't know when // synchronization will complete in this benchmark, so set that entry to 0. kernel_time->cuda_launch_times[1] = CurrentSeconds(); kernel_time->cuda_launch_times[2] = 0; return 1; } // Executes a malloc action. Returns 0 on error. static int ExecuteMallocAction(TaskState *state, MallocParameters *params) { int next_index = 0; uint8_t **destination = NULL; if (params->allocate_host_memory) { next_index = state->host_memory_allocation_count; } else { next_index = state->device_memory_allocation_count; } if (next_index >= MAX_MEMORY_ALLOCATION_COUNT) { printf("Can't execute malloc action: too many unfreed %s allocations.\n", params->allocate_host_memory ? "host" : "device"); return 0; } if (params->allocate_host_memory) { destination = state->host_memory_allocations + next_index; if (!CheckCUDAError(hipHostMalloc(destination, params->size))) return 0; state->host_memory_allocation_count++; return 1; } destination = state->device_memory_allocations + next_index; if (!CheckCUDAError(hipMalloc(destination, params->size))) return 0; state->device_memory_allocation_count++; return 1; } // Executes a free action. Returns 0 on error. static int ExecuteFreeAction(TaskState *state, FreeParameters *params) { if (params->free_host_memory) { if (state->host_memory_allocation_count == 0) { printf("Can't execute free action: No host memory allocations.\n"); return 0; } state->host_memory_allocation_count--; if (!CheckCUDAError(hipHostFree(state->host_memory_allocations[ state->host_memory_allocation_count]))) { return 0; } return 1; } if (state->device_memory_allocation_count == 0) { printf("Can't execute free action: No device memory allocations.\n"); return 0; } state->device_memory_allocation_count--; if (!CheckCUDAError(hipFree(state->device_memory_allocations[ state->device_memory_allocation_count]))) { return 0; } return 1; } // Executes a memset action. Fills a device buffer with a random value. Returns // 0 on error. static int ExecuteMemsetAction(TaskState *state, MemsetParameters *params) { if (params->synchronous) { if (!CheckCUDAError(hipMemset(state->device_copy_buffer, rand(), params->size))) { return 0; } return 1; } if (!CheckCUDAError(hipMemsetAsync(state->device_copy_buffer, rand(), params->size, state->stream))) { return 0; } return 1; } // Executes a memcpy action. Returns 0 on error. static int ExecuteMemcpyAction(TaskState *state, MemcpyParameters *params) { uint8_t *src = NULL; uint8_t *dest = NULL; switch (params->direction) { case hipMemcpyDeviceToDevice: src = state->device_copy_buffer; dest = state->device_secondary_buffer; break; case hipMemcpyDeviceToHost: src = state->device_copy_buffer; dest = state->host_copy_buffer; break; case hipMemcpyHostToDevice: src = state->host_copy_buffer; dest = state->device_copy_buffer; break; default: printf("Unsupported direction for memcpy action: %d\n", (int) params->direction); return 0; } if (params->synchronous) { if (!CheckCUDAError(hipMemcpy(dest, src, params->size, params->direction))) { return 0; } return 1; } if (!CheckCUDAError(hipMemcpyAsync(dest, src, params->size, params->direction, state->stream))) { return 0; } return 1; } // Executes a synchronization action. Returns 0 on error. static int ExecuteSyncAction(TaskState *state, SyncParameters *params) { if (params->sync_device) { if (!CheckCUDAError(hipDeviceSynchronize())) return 0; return 1; } if (!CheckCUDAError(hipStreamSynchronize(state->stream))) return 0; return 1; } // Sleeps for at least the given number of seconds, with a microsecond // granularity. static void SleepSeconds(double seconds) { uint64_t to_sleep = (uint64_t) (seconds * 1e6); usleep(to_sleep); } // Executes each action in the order it appears in the list. static int Execute(void *data) { TaskState *state = (TaskState *) data; ActionConfig *action = NULL; int kernel_index = 0; int i; for (i = 0; i < state->action_count; i++) { action = state->actions + i; if (action->delay > 0.0) { SleepSeconds(state->actions[i].delay); } switch (action->type) { case ACTION_KERNEL: if (!ExecuteKernelAction(state, &(action->parameters.kernel), kernel_index)) { return 0; } kernel_index++; break; case ACTION_MALLOC: if (!ExecuteMallocAction(state, &(action->parameters.malloc))) { return 0; } break; case ACTION_FREE: if (!ExecuteFreeAction(state, &(action->parameters.free))) { return 0; } break; case ACTION_MEMSET: if (!ExecuteMemsetAction(state, &(action->parameters.memset))) { return 0; } break; case ACTION_MEMCPY: if (!ExecuteMemcpyAction(state, &(action->parameters.memcpy))) { return 0; } break; case ACTION_SYNC: if (!ExecuteSyncAction(state, &(action->parameters.sync))) { return 0; } break; default: printf("Attempted to execute invalid action: %d\n", action->type); return 0; } } if (!CheckCUDAError(hipStreamSynchronize(state->stream))) return 0; return 1; } static const char* GetName(void) { return "Sequential action execution"; } int RegisterFunctions(BenchmarkLibraryFunctions *functions) { functions->initialize = Initialize; functions->copy_in = CopyIn; functions->execute = Execute; functions->copy_out = CopyOut; functions->cleanup = Cleanup; functions->get_name = GetName; return 1; }
1577bb18f35cf9cfb72dc682e7b1daee540f6ce7.cu
// This file defines a CUDA benchmark which issues multiple kernels to a single // stream before waiting for all kernels to complete. The configuration for the // kernels is taken exclusively from the additional_info field in the // InitializationParameters struct. The actual kernels will simply be instances // of the same kernel as in the timer_spin benchmark. This benchmark ignores // all fields in its initialization parameters apart from cuda_device and // additional_info. // // The format of the necessary additional_info field is as follows. Each object // in the "actions" list must have a type that is one of "kernel", "malloc", // "free", "memset", "memcpy", or "synchronize". Memory operations such as // malloc, free memset, and memcpy operate on buffers separate from each other. // For example, a malloc doesn't need to precede a memset, because memset // buffers will be allocated during initialization. The only limitation is that // only a small number of unbalanced malloc and free operations are allowed. // Any unfreed mallocs from these actions will be freed during benchmark // cleanup. Synchronization actions are available solely to experiment with // scheduling, and are not necessary for the task. A stream-synchronization // request will be issued at the end of all actions regardless of whether an // explicit, additional synchronization action was carried out. // For more details about parameters for each action, see the annotated JSON // structure below: /* "additional_info": { "use_null_stream": <Boolean, defaults to false, set to true to use the null stream rather than the default stream>, "actions": [ { "delay": <A floating-point number of seconds to sleep before starting this action. Defaults to 0.0, which will insert no sleep at all.>, "type": <A string, from the list given above.>, "label": <A string, a label for this action.>, "parameters": <A JSON object with action-specific parameters.> }, { "type": "kernel", "label": "Kernel 1", "parameters": { "type": <A string: "timer_spin" or "counter_spin". Defaults to "timer_spin">, "duration": <If "type" is "timer_spin", this will be the number of nanoseconds to run the kernel. If type is "counter_spin", this will be the number of loop iterations to run.>, "shared_memory_size": <The number of shared 32-bit integers to use. Defaults to 0. Must be 0, 4096, 8192, or 10240.>, "block_count": <The number of thread blocks to use. Defaults to the value given in the benchmark parameters.>, "thread_count": <The number of threads per block to use. Defaults to the value given in the benchmark parameters.> }, { "type": "malloc", "label": "Malloc 1", "parameters": { "host": <Boolean. Defaults to false. If true, will allocate host memory.>, "size": <Number of bytes to allocate> } }, { "type": "free", "label": "Free 1", "parameters": { "host": <Boolean. Defaults to false. If true, will free host memory. The entire "parameters" block can be omitted here for the default.> } }, { "type": "memset", "label": "Memset 1", "parameters": { "async": <Boolean. Defaults to true. If false, will issue a null-stream memset regardless of use_null_stream's value.>, "size": <Number of bytes to set to 0> } }, { "type": "memcpy", "label": Memcpy 1", "parameters": { "async": <Boolean. Defaults to true. If false, issues a null-stream memcpy regardless of use_null_stream's value.>, "size": <Number of byte to copy>, "direction": <Either "deviceToDevice", "deviceToHost", or "hostToDevice"> } }, { "type": "synchronize", "label": "Sync 1", "parameters": { "device": <Boolean. Defaults to false (parameters can be omitted here entirely, too). If true, runs a cudaDeviceSynchronize rather than cudaStreamSynchronize.> } } } ] } */ // Actions are issued to the stream in the same order that they're specified // in the "actions" list. #include <cuda_runtime.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "benchmark_gpu_utilities.h" #include "library_interface.h" #include "third_party/cJSON.h" // This specifies the maximum number of un-freed malloc actions that can occur // before further allocations return an error instead. Any list with this many // or fewer (balanced) malloc and free actions can run indefinitely. #define MAX_MEMORY_ALLOCATION_COUNT (10) // This specifies the number of pre-allocated buffers that are allocated during // initialization, so that a number of free actions can be used without a // preceding malloc. This can be at most MAX_MEMORY_ALLOCATION_COUNT. #define INITIAL_ALLOCATION_COUNT (4) // This speicifies the size, in bytes, of the pre-allocated buffers. #define INITIAL_ALLOCATION_SIZE (1024) // This macro is used to create functions that statically use predefined // amounts of shared memory. This is used by the GENERATE_KERNEL macro. #define GENERATE_SHARED_MEMORY_FUNCTION(amount) \ static __device__ uint32_t UseSharedMemory_##amount(void) { \ __shared__ uint32_t shared_array[(amount)]; \ uint32_t elements_per_thread, i; \ elements_per_thread = (amount) / blockDim.x; \ for (i = 0; i < elements_per_thread; i++) { \ shared_array[threadIdx.x * elements_per_thread + i] = threadIdx.x; \ } \ return shared_array[threadIdx.x * elements_per_thread]; \ } // Generates kernels that use the given amount of shared memory. Kernels have // names like SharedMemGPUSpin_<amount>, and take the following parameters: // (int counter, uint64_t duration, uint64_t *block_times, // uint32_t *block_smids, uint64_t *junk). If the "counter" parameter is // nonzero, then a constant amount of computation will be carried out rather // than waiting for a constant amount of time. The "junk" parameter is used to // prevent optimizations, and must be NULL. Otherwise, this kernel operates // similarly to the simpler GPUSpin kernel in stream_action.cu. This WILL NOT // work for 0 bytes of shared memory--that's what the plain GPUSpin in // stream_action.cu is for. #define GENERATE_SPIN_KERNEL(amount) \ /* Produce a function that uses shared memory */ \ GENERATE_SHARED_MEMORY_FUNCTION(amount) \ static __global__ void SharedMemGPUSpin_##amount(int use_counter, \ uint64_t duration, uint64_t *block_times, uint32_t *block_smids, \ uint64_t *junk) { \ uint32_t shared_mem_res; \ uint64_t i, accumulator; \ uint64_t start_time = GlobalTimer64(); \ if (threadIdx.x == 0) { \ block_times[blockIdx.x * 2] = start_time; \ block_smids[blockIdx.x] = GetSMID(); \ } \ __syncthreads(); \ /* shared_mem_res is our thread index */ \ shared_mem_res = UseSharedMemory_##amount(); \ if (use_counter) { \ for (i = 0; i < duration; i++) { \ accumulator += i; \ } \ } else { \ while ((GlobalTimer64() - start_time) < duration) { \ continue; \ } \ } \ if (junk) *junk = accumulator; \ if (shared_mem_res == 0) { \ block_times[blockIdx.x * 2 + 1] = GlobalTimer64(); \ } \ } // This holds parameters for the kernel action. typedef struct { // The grid dimensions for this kernel. int block_count; int thread_count; // The amount of shared memory used by this kernel. int shared_memory_count; // If this is nonzero, the counter_spin kernel will be used, which performs // a constant amount of busywork computations. If this is zero, the // timer_spin kernel will be used instead, which waits until a certain number // of nanoseconds have elapsed. int use_counter_spin; // The number of either spin iterations or nanoseconds this kernel runs for // (depending on whether it is a timer spin or counter spin kernel). uint64_t duration; // Hold the times needed for a CUDA kernel. uint64_t *device_block_times; uint64_t *host_block_times; uint32_t *device_smids; uint32_t *host_smids; } KernelParameters; // This holds parameters for the cudaMalloc action. typedef struct { // This is the number of bytes to allocate. uint64_t size; // If nonzero, call cudaMallocHost rather than cudaMalloc. int allocate_host_memory; } MallocParameters; // This holds parameters for the cudaFree action. typedef struct { // If nonzero, call cudaFreeHost rather than cudaFree. int free_host_memory; } FreeParameters; // This holds parameters for the cudaMemset action, which sets bytes to a // random 8-bit value. typedef struct { // If nonzero, then cudaMemset will be called (associated with no stream), // rather than cudaMemsetAsync, which will use the task's specified stream. int synchronous; // This contains the number of bytes to set. uint64_t size; } MemsetParameters; // This holds parameters for the cudaMemcpy action, which copies data between // host and device, or two device buffers. typedef struct { // One of the cudaMemcpyKind values. However, values 0 (host - host) and 4 // (unspecified) are not supported. cudaMemcpyKind direction; // If nonzero, then cudaMemcpy will be used. If 0, then cudaMemcpyAsync is // used, associated with the task's stream. int synchronous; // The number of bytes to copy. uint64_t size; } MemcpyParameters; // This holds parameters for the synchronize action. typedef struct { // If this is nonzero, then cudaDeviceSynchronize will be called. Otherwise, // cudaStreamSynchronize is called, associated with the task's stream. int sync_device; } SyncParameters; // This is used as a tag to identify the parameters and behavior to carry out // for each action supported by the benchmark. typedef enum { ACTION_UNINITIALIZED = 0, ACTION_KERNEL, ACTION_MALLOC, ACTION_FREE, ACTION_MEMSET, ACTION_MEMCPY, ACTION_SYNC, } ActionType; // This defines the behavior and parameters for all potential actions. typedef struct { // The number of seconds to sleep after the current action's completion, // before launching this one. double delay; // The label (typically a kernel name) to give this action. char *label; ActionType type; union { KernelParameters kernel; MallocParameters malloc; FreeParameters free; MemsetParameters memset; MemcpyParameters memcpy; SyncParameters sync; } parameters; } ActionConfig; // Holds local information for each instantiation of this benchmark. typedef struct { // The CUDA stream with which all operations will be associated. cudaStream_t stream; // The CUDA stream with which copy_out operations will be associated. May // differ from the regular stream, because this will never be the NULL // stream. cudaStream_t copy_out_stream; // This will be set to 1 if the stream was created and must be closed during // cleanup (it can remain 0 if the NULL stream is used). int stream_created; // The number of actions to perform per execution. int action_count; // The list of actions to perform. ActionConfig *actions; // The number of actions which are kernel launches. int kernel_count; // Information to provide to the host process about block start and end times // for each kernel action. KernelTimes *kernel_times; // A buffer of host memory for copies and memsets. May be NULL if not needed. // Is guaranteed to be the size of the largest copy or memset needed by any // action. uint8_t *host_copy_buffer; // A buffer of device memory for copies and memsets. May be NULL if not // needed. This is guaranteed to be the size of the largest copy or memset // needed by any action. uint8_t *device_copy_buffer; // This will be a secondary device buffer, but will only be allocated if a // device-to-device memory copy is used. uint8_t *device_secondary_buffer; // This is a stack of pointers to device memory allocated by cudaMalloc // actions. uint8_t **device_memory_allocations; // Holds the number of pointers in the device_memory_allocations list. This // increases with each cudaMalloc action and decreases with each cudaFree. int device_memory_allocation_count; // This is a stack of pointers to host memory allocated by cudaMallocHost. // It works in the same way as device_memory_allocations. uint8_t **host_memory_allocations; // This is analagous to device_memory_allocation_count, but for host memory // allocations. int host_memory_allocation_count; } TaskState; // Use the macros defined in stream_action.h to generate a set of kernels using // various amounts of static shared memory. GENERATE_SPIN_KERNEL(4096); GENERATE_SPIN_KERNEL(8192); GENERATE_SPIN_KERNEL(10240); // A basic kernel that wastes GPU cycles without using shared memory. The // duration parameter specifies the number of nanoseconds to wait if // use_counter is 0. If use_counter is nonzero, duration specifies a number of // loop iterations to spin instead. The junk parameter must be NULL and is used // to prevent optimization. static __global__ void GPUSpin(int use_counter, uint64_t duration, uint64_t *block_times, uint32_t *block_smids, uint64_t *junk) { uint64_t i, accumulator; uint64_t start_time = GlobalTimer64(); // Have one thread record the block's start time and SM ID. if (threadIdx.x == 0) { block_times[blockIdx.x * 2] = start_time; block_smids[blockIdx.x] = GetSMID(); } __syncthreads(); if (use_counter) { // Write to the accumulator (which must be potentially returned) to prevent // this loop from being optimized out. for (i = 0; i < duration; i++) { accumulator += i; } } else { // Wait until the specified number of nanoseconds has elapsed. while ((GlobalTimer64() - start_time) < duration) { continue; } } // Make it look like the junk value can be used to prevent the loop updating // the accumulator from being removed by the optimizer. if (junk) *junk = accumulator; // Have one thread write the block end time (simple, but may be slightly // inaccurate if other warps finish later). if (threadIdx.x == 0) { block_times[blockIdx.x * 2 + 1] = GlobalTimer64(); } } // Frees any data and clears out an ActionConfig struct. For use during // cleanup. static void CleanupAction(ActionConfig *action) { uint64_t *tmp64; uint32_t *tmp32; if (action->label) free(action->label); if (action->type == ACTION_KERNEL) { // For now, only kernel actions require extra cleanup. tmp64 = action->parameters.kernel.device_block_times; if (tmp64) CheckCUDAError(cudaFree(tmp64)); tmp64 = action->parameters.kernel.host_block_times; if (tmp64) CheckCUDAError(cudaFreeHost(tmp64)); tmp32 = action->parameters.kernel.device_smids; if (tmp32) CheckCUDAError(cudaFree(tmp32)); tmp32 = action->parameters.kernel.host_smids; if (tmp32) CheckCUDAError(cudaFreeHost(tmp32)); } memset(action, 0, sizeof(*action)); } // Implements the cleanup fucntion required by the interface, but is also used // internally to clean up during a faulty Initialize(). That's why all of the // pointers are checked to be non-NULL. This is also why it's very important to // ensure that any fields and pointers are zero before any initialization. static void Cleanup(void *data) { TaskState *state = (TaskState *) data; int i; ActionConfig *action = NULL; for (i = 0; i < state->action_count; i++) { action = state->actions + i; CleanupAction(action); } if (state->actions) free(state->actions); if (state->kernel_times) free(state->kernel_times); // The CheckCUDAError macros here are just to print a message on error, since // we can't really do any additional error handling during cleanup. if (state->stream_created) { // Remember that state->stream may be the NULL stream or may be another // reference to this same stream. In either case, we don't need to destroy // it. CheckCUDAError(cudaStreamDestroy(state->copy_out_stream)); } if (state->host_copy_buffer) { CheckCUDAError(cudaFreeHost(state->host_copy_buffer)); } if (state->device_copy_buffer) { CheckCUDAError(cudaFree(state->device_copy_buffer)); } if (state->device_secondary_buffer) { CheckCUDAError(cudaFree(state->device_secondary_buffer)); } for (i = 0; i < state->device_memory_allocation_count; i++) { CheckCUDAError(cudaFree(state->device_memory_allocations[i])); } if (state->device_memory_allocations) free(state->device_memory_allocations); for (i = 0; i < state->host_memory_allocation_count; i++) { CheckCUDAError(cudaFreeHost(state->host_memory_allocations[i])); } if (state->host_memory_allocations) free(state->host_memory_allocations); memset(state, 0, sizeof(*state)); free(state); } // Returns nonzero if all of the keys in the JSON object are in the list of // valid keys. static int VerifyJSONKeys(cJSON *object, const char* const valid_keys[], int valid_count) { int i, found; // We'll be passed a top-level object here. object = object->child; while (object != NULL) { found = 0; if (!object->string) { printf("Found JSON object without a name in stream_action settings.\n"); return 0; } for (i = 0; i < valid_count; i++) { if (strcmp(object->string, valid_keys[i]) == 0) { found = 1; break; } } if (!found) { printf("Unexpected setting in stream_action.so settings: %s\n", object->string); return 0; } object = object->next; } return 1; } // Takes a cJSON object and returns 1 if it's true, 0 if it's false, and -1 if // it's invalid or not a boolean. Returns -1 if object is NULL. static int GetCJSONBoolean(cJSON *object) { if (!object) return -1; if (object->type == cJSON_True) return 1; if (object->type == cJSON_False) return 0; return -1; } // Since this is such a long string of code, it gets moved into a separate // function. Parses the parameters for a kernel action. Requires the cJSON // *parameters* object for a kernel action, and fills in the KernelParameters. // Returns 0 on error. static int ParseKernelParameters(cJSON *json_parameters, KernelParameters *kernel_config, int default_block_count, int default_thread_count) { cJSON *entry = NULL; // Due to the complexity of this config, this can forestall confusing errors // by pointing out misspelled keys. static const char* const valid_keys[] = { "type", "thread_count", "block_count", "shared_memory_size", "comment", "duration", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } // Determine whether the kernel should be a timer spin (constant time) or // counter spin (constant effort). The default is constant time, if the // setting isn't provided. entry = cJSON_GetObjectItem(json_parameters, "type"); if (entry) { if (entry->type != cJSON_String) { printf("Invalid kernel type for kernel action.\n"); return 0; } if (strcmp(entry->valuestring, "timer_spin") == 0) { kernel_config->use_counter_spin = 0; } else if (strcmp(entry->valuestring, "counter_spin") == 0) { kernel_config->use_counter_spin = 1; } else { printf("Unsupported kernel type for kernel action: %s\n", entry->valuestring); return 0; } } else { kernel_config->use_counter_spin = 0; } // Get the one required numerical parameter: duration. entry = cJSON_GetObjectItem(json_parameters, "duration"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid duration for kernel action.\n"); return 0; } kernel_config->duration = (uint64_t) entry->valuedouble; // Get the block and thread counts, which default to the benchmark setting // if they aren't provided. kernel_config->block_count = default_block_count; entry = cJSON_GetObjectItem(json_parameters, "block_count"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid block count for kernel action.\n"); return 0; } kernel_config->block_count = entry->valueint; } kernel_config->thread_count = default_thread_count; entry = cJSON_GetObjectItem(json_parameters, "thread_count"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid thread count for kernel action.\n"); return 0; } kernel_config->thread_count = entry->valueint; } // Unlike the other numbers, the shared_memory_count is optional and needs // validation. entry = cJSON_GetObjectItem(json_parameters, "shared_memory_size"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid shared memory size for kernel action.\n"); return 0; } kernel_config->shared_memory_count = entry->valueint; } else { kernel_config->shared_memory_count = 0; } switch (kernel_config->shared_memory_count) { case 0: case 4096: case 8192: case 10240: break; default: printf("Unsupported shared memory size for kernel action: %d\n", kernel_config->shared_memory_count); return 0; } return 1; } // Parses parameters for the malloc action. Returns 0 on error. static int ParseMallocParameters(cJSON *json_parameters, MallocParameters *malloc_config) { cJSON *entry = NULL; int host = 0; static const char* const valid_keys[] = { "size", "host", "comment", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "host"); if (entry) { host = GetCJSONBoolean(entry); } if (host < 0) { printf("Invalid host setting for malloc action.\n"); return 0; } malloc_config->allocate_host_memory = host; entry = cJSON_GetObjectItem(json_parameters, "size"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid size setting for malloc action.\n"); return 0; } malloc_config->size = (uint64_t) entry->valuedouble; return 1; } // Parses the given (optional) parameters for the cudaFree action. Returns 0 // on error. Since the parameters are optional, json_parameters can be NULL. static int ParseFreeParameters(cJSON *json_parameters, FreeParameters *free_config) { cJSON *entry = NULL; int host = 0; static const char* const valid_keys[] = { "host", "comment", }; // The config here is optional. free_config->free_host_memory = 0; if (!json_parameters) return 1; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "host"); if (entry) { host = GetCJSONBoolean(entry); } if (host < 0) { printf("Invalid host setting for malloc action.\n"); return 0; } free_config->free_host_memory = host; return 1; } // Parses JSON parameters for the memset action. Returns 0 on error. static int ParseMemsetParameters(cJSON *json_parameters, MemsetParameters *memset_config) { cJSON *entry = NULL; int async = 0; static const char* const valid_keys[] = { "async", "size", "comment", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "size"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid size for memset action.\n"); return 0; } memset_config->size = (uint64_t) entry->valuedouble; entry = cJSON_GetObjectItem(json_parameters, "async"); if (entry) { async = GetCJSONBoolean(entry); } if (async < 0) { printf("Invalid async setting for memset action.\n"); return 0; } memset_config->synchronous = !async; return 1; } // Parses JSON parameters for the memcpy action. Returns 0 on error. static int ParseMemcpyParameters(cJSON *json_parameters, MemcpyParameters *memcpy_config) { cJSON *entry = NULL; int async = 0; static const char* const valid_keys[] = { "async", "size", "direction", "comment", }; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "size"); if (!entry || (entry->type != cJSON_Number)) { printf("Missing/invalid size for memcpy action.\n"); return 0; } memcpy_config->size = (uint64_t) entry->valuedouble; entry = cJSON_GetObjectItem(json_parameters, "async"); if (entry) { async = GetCJSONBoolean(entry); } if (async < 0) { printf("Invalid async setting for memcpy action.\n"); return 0; } memcpy_config->synchronous = !async; entry = cJSON_GetObjectItem(json_parameters, "direction"); if (!entry || (entry->type != cJSON_String)) { printf("Missing/invalid direction for memcpy action.\n"); return 0; } if (strcmp(entry->valuestring, "deviceToHost") == 0) { memcpy_config->direction = cudaMemcpyDeviceToHost; } else if (strcmp(entry->valuestring, "hostToDevice") == 0) { memcpy_config->direction = cudaMemcpyHostToDevice; } else if (strcmp(entry->valuestring, "deviceToDevice") == 0) { memcpy_config->direction = cudaMemcpyDeviceToDevice; } else { printf("Unsupported direction for memcpy action: %s\n", entry->valuestring); return 0; } return 1; } // Parses the JSON parameters for the "synchronize" action. Returns 0 on error. // The json_parameters can be NULL, in which case the sync parameters will take // their default values. static int ParseSyncParameters(cJSON *json_parameters, SyncParameters *sync_config) { cJSON *entry = NULL; static const char* const valid_keys[] = { "device", "comment", }; sync_config->sync_device = 0; if (!json_parameters) return 1; if (!VerifyJSONKeys(json_parameters, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } entry = cJSON_GetObjectItem(json_parameters, "device"); if (entry) { sync_config->sync_device = GetCJSONBoolean(entry); } if (sync_config->sync_device < 0) { sync_config->sync_device = 0; printf("Invalid device setting for sync action.\n"); return 0; } return 1; } // Parses a JSON action object in order to fill in the given ActionConfig. // Returns 0 on error and 1 on success. May partially initialize action on // error, so the caller may need to clean it up. However, the action type is // guaranteed to be valid if any other fields are set. static int ParseSingleAction(cJSON *object, ActionConfig *action, InitializationParameters *params) { cJSON *entry = NULL; ActionType type = ACTION_UNINITIALIZED; static const char* const valid_keys[] = { "type", "label", "delay", "parameters", "comment", }; // Validate keys to find confusing spelling mistakes that may make a setting // take its default value unintentionally. if (!VerifyJSONKeys(object, valid_keys, sizeof(valid_keys) / sizeof(char*))) { return 0; } // Start with the hardest property: the action's type. entry = cJSON_GetObjectItem(object, "type"); if (!entry || (entry->type != cJSON_String)) { printf("Missing/invalid action type for stream_action.so.\n"); return 0; } if (strcmp(entry->valuestring, "kernel") == 0) { type = ACTION_KERNEL; } else if (strcmp(entry->valuestring, "malloc") == 0) { type = ACTION_MALLOC; } else if (strcmp(entry->valuestring, "free") == 0) { type = ACTION_FREE; } else if (strcmp(entry->valuestring, "memset") == 0) { type = ACTION_MEMSET; } else if (strcmp(entry->valuestring, "memcpy") == 0) { type = ACTION_MEMCPY; } else if (strcmp(entry->valuestring, "synchronize") == 0) { type = ACTION_SYNC; } else { printf("Unsupported action type for stream_action.so: %s\n", entry->valuestring); return 0; } action->type = type; entry = cJSON_GetObjectItem(object, "label"); if (!entry || (entry->type != cJSON_String)) { printf("Missing/invalid action label for stream_action.so.\n"); return 0; } action->label = strdup(entry->valuestring); if (!action->label) return 0; entry = cJSON_GetObjectItem(object, "delay"); if (entry) { if (entry->type != cJSON_Number) { printf("Invalid delay for stream_action.so.\n"); return 0; } action->delay = entry->valuedouble; } // Last, parse the action-specific parameters. Remember that additional // parameters are optional for some actions, so only ensure that the // parameters are an object if they're non-NULL. entry = cJSON_GetObjectItem(object, "parameters"); if (entry && (entry->type != cJSON_Object)) { printf("Invalid action parameters for stream_action.so.\n"); return 0; } // Get kernel config parsing over with first, since it's the most complex. if (type == ACTION_KERNEL) { if (!entry) { printf("Missing kernel parameters for stream_action.so.\n"); return 0; } if (!ParseKernelParameters(entry, &(action->parameters.kernel), params->block_count, params->thread_count)) { return 0; } } if (type == ACTION_MALLOC) { if (!entry) { printf("Missing malloc parameters for stream_action.so.\n"); return 0; } if (!ParseMallocParameters(entry, &(action->parameters.malloc))) return 0; } if (type == ACTION_FREE) { // It's okay for "entry" to be NULL here. if (!ParseFreeParameters(entry, &(action->parameters.free))) return 0; } if (type == ACTION_MEMSET) { if (!entry) { printf("Missing memset parameters for stream_action.so.\n"); return 0; } if (!ParseMemsetParameters(entry, &(action->parameters.memset))) return 0; } if (type == ACTION_MEMCPY) { if (!entry) { printf("Missing memcpy parameters for stream_action.so.\n"); return 0; } if (!ParseMemcpyParameters(entry, &(action->parameters.memcpy))) return 0; } if (type == ACTION_SYNC) { // It's okay for "entry" to be NULL here, too. if (!ParseSyncParameters(entry, &(action->parameters.sync))) return 0; } return 1; } // Takes a TaskState struct to be initialized and a JSON configuration string. // Parses the JSON configuration and fills the appropriate fields in the state // struct. The stream_priority value is needed, because this function will // create the CUDA stream if the use_null_stream setting is not true. Returns 0 // on error. static int ParseParameters(TaskState *state, InitializationParameters *params) { cJSON *json_root = NULL; cJSON *list_head = NULL; cJSON *entry = NULL; ActionConfig *actions = NULL; ActionConfig *action = NULL; int i = 0, action_count = 0, use_null_stream = 0; static const char* const valid_keys[] = { "actions", "use_null_stream", "comment", }; json_root = cJSON_Parse(params->additional_info); if (!json_root || (json_root->type != cJSON_Object)) { printf("Missing/invalid additional_info for stream_action.so.\n"); goto ErrorCleanup; } if (!VerifyJSONKeys(json_root, valid_keys, sizeof(valid_keys) / sizeof(char*))) { goto ErrorCleanup; } // First, check for the "use_null_stream" setting. entry = cJSON_GetObjectItem(json_root, "use_null_stream"); if (entry) use_null_stream = GetCJSONBoolean(entry); if (use_null_stream < 0) { printf("Invalid use_null_stream setting in stream_action.so.\n"); goto ErrorCleanup; } // Always use a user-defined stream for copy_out operations. if (!CheckCUDAError(CreateCUDAStreamWithPriority(params->stream_priority, &(state->copy_out_stream)))) { goto ErrorCleanup; } state->stream_created = 1; // If the NULL stream wasn't specified, then use the user-defined stream // for all other operations, too. if (use_null_stream) { state->stream = 0; } else { state->stream = state->copy_out_stream; } // Get the actions list, ensuring it's an array with at least one element. list_head = cJSON_GetObjectItem(json_root, "actions"); if (!list_head || (list_head->type != cJSON_Array) || !list_head->child) { printf("Missing/invalid list of actions for stream_action.so.\n"); goto ErrorCleanup; } // Count the number of actions in the list. entry = list_head->child; action_count = 1; while (entry->next) { action_count++; entry = entry->next; } // Allocate and initialize the internal list of ActionConfig structs. actions = (ActionConfig *) calloc(action_count, sizeof(*actions)); if (!actions) goto ErrorCleanup; entry = list_head->child; for (i = 0; i < action_count; i++) { action = actions + i; if (!ParseSingleAction(entry, action, params)) goto ErrorCleanup; entry = entry->next; } // Clean up and return success. state->actions = actions; state->action_count = action_count; cJSON_Delete(json_root); return 1; ErrorCleanup: if (json_root) cJSON_Delete(json_root); if (actions) { for (i = 0; i < action_count; i++) { CleanupAction(actions + i); } free(actions); } return 0; } // Allocates buffers needed by a single kernel action. Returns 0 on error. static int AllocateKernelActionMemory(KernelParameters *parameters) { size_t block_times_size = 2 * parameters->block_count * sizeof(uint64_t); size_t smids_size = parameters->block_count * sizeof(uint32_t); if (!CheckCUDAError(cudaMalloc(&parameters->device_block_times, block_times_size))) { return 0; } if (!CheckCUDAError(cudaMalloc(&parameters->device_smids, smids_size))) { return 0; } if (!CheckCUDAError(cudaMallocHost(&parameters->host_block_times, block_times_size))) { return 0; } if (!CheckCUDAError(cudaMallocHost(&parameters->host_smids, smids_size))) { return 0; } return 1; } // Preallocates a set of buffers so that a limited number of free actions don't // necessarily need to follow malloc actions. Returns 0 on error. static int PreallocateFreeActionBuffers(TaskState *state) { int i; uint8_t **dest = NULL; for (i = 0; i < INITIAL_ALLOCATION_COUNT; i++) { dest = state->device_memory_allocations + i; if (!CheckCUDAError(cudaMalloc(dest, INITIAL_ALLOCATION_SIZE))) { return 0; } // Increment these values one step at a time, so they can be cleaned up // properly if one of the later allocations fails. state->device_memory_allocation_count++; dest = state->host_memory_allocations + i; if (!CheckCUDAError(cudaMallocHost(dest, INITIAL_ALLOCATION_SIZE))) { return 0; } state->host_memory_allocation_count++; } return 1; } // Takes a TaskState after fully parsing InitializationParameters (i.e. the // actions list is populated). Allocates necessary buffers for kernel actions, // memory sets and copies, holding pointers for malloc actions, and buffers of // data to report to the calling process during copy_out. Returns 0 on error. static int AllocateMemory(TaskState *state) { int i; uint64_t current_size; uint64_t max_size = 0; int secondary_buffer_needed = 0; int malloc_action_exists = 0; int kernel_count = 0; ActionConfig *action = NULL; // Collect aggregate information about all actions, and allocate the kernel // action's buffers while we're at it. for (i = 0; i < state->action_count; i++) { action = state->actions + i; switch (action->type) { case ACTION_KERNEL: kernel_count++; if (!AllocateKernelActionMemory(&(action->parameters.kernel))) { return 0; } break; case ACTION_MALLOC: malloc_action_exists = 1; break; case ACTION_FREE: malloc_action_exists = 1; case ACTION_MEMSET: current_size = action->parameters.memset.size; if (current_size > max_size) max_size = current_size; break; case ACTION_MEMCPY: current_size = action->parameters.memcpy.size; if (current_size > max_size) max_size = current_size; if (action->parameters.memcpy.direction == cudaMemcpyDeviceToDevice) { secondary_buffer_needed = 1; } break; default: break; } } // Start by allocating device memory. if (!CheckCUDAError(cudaMalloc(&state->device_copy_buffer, max_size))) { return 0; } // Only allocate a second device buffer if a device-to-device memcpy action // is present. if (secondary_buffer_needed) { if (!CheckCUDAError(cudaMalloc(&state->device_secondary_buffer, max_size))) { return 0; } } // Now allocate host memory. if (!CheckCUDAError(cudaMallocHost(&state->host_copy_buffer, max_size))) { return 0; } if (malloc_action_exists) { state->device_memory_allocations = (uint8_t**) calloc( MAX_MEMORY_ALLOCATION_COUNT, sizeof(uint8_t*)); if (!state->device_memory_allocations) { printf("Failed allocating list of device memory allocation pointers.\n"); return 0; } state->host_memory_allocations = (uint8_t**) calloc( MAX_MEMORY_ALLOCATION_COUNT, sizeof(uint8_t*)); if (!state->host_memory_allocations) { printf("Failed allocating list of host memory allocation pointers.\n"); return 0; } if (!PreallocateFreeActionBuffers(state)) return 0; } // Any pointers contained in the individual KernelTimes entries are simply // copied from KernelParameters structs after execution--they don't need to // be allocated here. state->kernel_times = (KernelTimes*) calloc(kernel_count, sizeof(KernelTimes)); if (!state->kernel_times) { printf("Failed allocating list of kernel times.\n"); return 0; } state->kernel_count = kernel_count; return 1; } // Initializes the tasks' kernel_times array. Must be called after memory // allocation. This is done once because most of the fields in the kernel_times // array never change, apart from cuda_launch_times. Returns 0 on error. static int InitializeKernelTimes(TaskState *state) { int i; int kernel_index = 0; KernelTimes *current_times = NULL; ActionConfig *action = NULL; KernelParameters *params = NULL; for (i = 0; i < state->action_count; i++) { action = state->actions + i; if (action->type != ACTION_KERNEL) continue; params = &(action->parameters.kernel); current_times = state->kernel_times + kernel_index; current_times->kernel_name = action->label; current_times->block_count = params->block_count; current_times->thread_count = params->thread_count; current_times->shared_memory = params->shared_memory_count * 4; current_times->block_times = params->host_block_times; current_times->block_smids = params->host_smids; kernel_index++; } return 1; } static void* Initialize(InitializationParameters *params) { TaskState *state = NULL; state = (TaskState *) malloc(sizeof(*state)); if (!state) { printf("Error allocating memory for stream_action task state.\n"); return NULL; } memset(state, 0, sizeof(*state)); if (!CheckCUDAError(cudaSetDevice(params->cuda_device))) { Cleanup(state); return NULL; } // Parse the configuration string, initialize the action configs, and create // the CUDA stream (if a non-NULL stream is used). if (!ParseParameters(state, params)) { Cleanup(state); return NULL; } if (!AllocateMemory(state)) { Cleanup(state); return NULL; } if (!InitializeKernelTimes(state)) { Cleanup(state); return NULL; } return state; } // Nothing needs to be copied to the GPU at this stage in the benchmark. static int CopyIn(void *data) { return 1; } // Copies device data to host buffers for a single kernel action. Requires a // stream on which the copy should run. Returns 0 on error. static int CopyKernelActionMemoryOut(KernelParameters *kernel, cudaStream_t stream) { size_t block_times_size = 2 * kernel->block_count * sizeof(uint64_t); size_t block_smids_size = kernel->block_count * sizeof(uint32_t); if (!CheckCUDAError(cudaMemcpyAsync(kernel->host_block_times, kernel->device_block_times, block_times_size, cudaMemcpyDeviceToHost, stream))) { return 0; } if (!CheckCUDAError(cudaMemcpyAsync(kernel->host_smids, kernel->device_smids, block_smids_size, cudaMemcpyDeviceToHost, stream))) { return 0; } return 1; } // Provides the caller with information about the kernel actions. static int CopyOut(void *data, TimingInformation *times) { TaskState *state = (TaskState *) data; int i; for (i = 0; i < state->action_count; i++) { if (state->actions[i].type != ACTION_KERNEL) continue; if (!CopyKernelActionMemoryOut(&(state->actions[i].parameters.kernel), state->copy_out_stream)) { return 0; } } if (!CheckCUDAError(cudaStreamSynchronize(state->copy_out_stream))) return 0; // The kernel_times structs were already filled in with the correct pointers // during initialization, and the cuda_launch_times were filled in during the // execute phase. So now, all that needs to be done is provide the correct // pointer. times->kernel_count = state->kernel_count; times->kernel_info = state->kernel_times; times->resulting_data_size = 0; times->resulting_data = NULL; return 1; } // Executes a kernel action. Requires the index of the kernel action (its order // relative only to other kernel actions) in order to fill in the CUDA launch // times in the correct entry in the kernel_times array. Returns 0 on error. static int ExecuteKernelAction(TaskState *state, KernelParameters *params, int kernel_index) { KernelTimes *kernel_time = state->kernel_times + kernel_index; kernel_time->cuda_launch_times[0] = CurrentSeconds(); switch (params->shared_memory_count) { case 0: GPUSpin<<<params->block_count, params->thread_count, 0, state->stream>>>( params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; case 4096: SharedMemGPUSpin_4096<<<params->block_count, params->thread_count, 0, state->stream>>>(params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; case 8192: SharedMemGPUSpin_8192<<<params->block_count, params->thread_count, 0, state->stream>>>(params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; case 10240: SharedMemGPUSpin_10240<<<params->block_count, params->thread_count, 0, state->stream>>>(params->use_counter_spin, params->duration, params->device_block_times, params->device_smids, NULL); break; default: printf("Unsupported kernel shared memory count: %d\n", params->shared_memory_count); return 0; } // Record the time after the kernel launch returns, but we don't know when // synchronization will complete in this benchmark, so set that entry to 0. kernel_time->cuda_launch_times[1] = CurrentSeconds(); kernel_time->cuda_launch_times[2] = 0; return 1; } // Executes a malloc action. Returns 0 on error. static int ExecuteMallocAction(TaskState *state, MallocParameters *params) { int next_index = 0; uint8_t **destination = NULL; if (params->allocate_host_memory) { next_index = state->host_memory_allocation_count; } else { next_index = state->device_memory_allocation_count; } if (next_index >= MAX_MEMORY_ALLOCATION_COUNT) { printf("Can't execute malloc action: too many unfreed %s allocations.\n", params->allocate_host_memory ? "host" : "device"); return 0; } if (params->allocate_host_memory) { destination = state->host_memory_allocations + next_index; if (!CheckCUDAError(cudaMallocHost(destination, params->size))) return 0; state->host_memory_allocation_count++; return 1; } destination = state->device_memory_allocations + next_index; if (!CheckCUDAError(cudaMalloc(destination, params->size))) return 0; state->device_memory_allocation_count++; return 1; } // Executes a free action. Returns 0 on error. static int ExecuteFreeAction(TaskState *state, FreeParameters *params) { if (params->free_host_memory) { if (state->host_memory_allocation_count == 0) { printf("Can't execute free action: No host memory allocations.\n"); return 0; } state->host_memory_allocation_count--; if (!CheckCUDAError(cudaFreeHost(state->host_memory_allocations[ state->host_memory_allocation_count]))) { return 0; } return 1; } if (state->device_memory_allocation_count == 0) { printf("Can't execute free action: No device memory allocations.\n"); return 0; } state->device_memory_allocation_count--; if (!CheckCUDAError(cudaFree(state->device_memory_allocations[ state->device_memory_allocation_count]))) { return 0; } return 1; } // Executes a memset action. Fills a device buffer with a random value. Returns // 0 on error. static int ExecuteMemsetAction(TaskState *state, MemsetParameters *params) { if (params->synchronous) { if (!CheckCUDAError(cudaMemset(state->device_copy_buffer, rand(), params->size))) { return 0; } return 1; } if (!CheckCUDAError(cudaMemsetAsync(state->device_copy_buffer, rand(), params->size, state->stream))) { return 0; } return 1; } // Executes a memcpy action. Returns 0 on error. static int ExecuteMemcpyAction(TaskState *state, MemcpyParameters *params) { uint8_t *src = NULL; uint8_t *dest = NULL; switch (params->direction) { case cudaMemcpyDeviceToDevice: src = state->device_copy_buffer; dest = state->device_secondary_buffer; break; case cudaMemcpyDeviceToHost: src = state->device_copy_buffer; dest = state->host_copy_buffer; break; case cudaMemcpyHostToDevice: src = state->host_copy_buffer; dest = state->device_copy_buffer; break; default: printf("Unsupported direction for memcpy action: %d\n", (int) params->direction); return 0; } if (params->synchronous) { if (!CheckCUDAError(cudaMemcpy(dest, src, params->size, params->direction))) { return 0; } return 1; } if (!CheckCUDAError(cudaMemcpyAsync(dest, src, params->size, params->direction, state->stream))) { return 0; } return 1; } // Executes a synchronization action. Returns 0 on error. static int ExecuteSyncAction(TaskState *state, SyncParameters *params) { if (params->sync_device) { if (!CheckCUDAError(cudaDeviceSynchronize())) return 0; return 1; } if (!CheckCUDAError(cudaStreamSynchronize(state->stream))) return 0; return 1; } // Sleeps for at least the given number of seconds, with a microsecond // granularity. static void SleepSeconds(double seconds) { uint64_t to_sleep = (uint64_t) (seconds * 1e6); usleep(to_sleep); } // Executes each action in the order it appears in the list. static int Execute(void *data) { TaskState *state = (TaskState *) data; ActionConfig *action = NULL; int kernel_index = 0; int i; for (i = 0; i < state->action_count; i++) { action = state->actions + i; if (action->delay > 0.0) { SleepSeconds(state->actions[i].delay); } switch (action->type) { case ACTION_KERNEL: if (!ExecuteKernelAction(state, &(action->parameters.kernel), kernel_index)) { return 0; } kernel_index++; break; case ACTION_MALLOC: if (!ExecuteMallocAction(state, &(action->parameters.malloc))) { return 0; } break; case ACTION_FREE: if (!ExecuteFreeAction(state, &(action->parameters.free))) { return 0; } break; case ACTION_MEMSET: if (!ExecuteMemsetAction(state, &(action->parameters.memset))) { return 0; } break; case ACTION_MEMCPY: if (!ExecuteMemcpyAction(state, &(action->parameters.memcpy))) { return 0; } break; case ACTION_SYNC: if (!ExecuteSyncAction(state, &(action->parameters.sync))) { return 0; } break; default: printf("Attempted to execute invalid action: %d\n", action->type); return 0; } } if (!CheckCUDAError(cudaStreamSynchronize(state->stream))) return 0; return 1; } static const char* GetName(void) { return "Sequential action execution"; } int RegisterFunctions(BenchmarkLibraryFunctions *functions) { functions->initialize = Initialize; functions->copy_in = CopyIn; functions->execute = Execute; functions->copy_out = CopyOut; functions->cleanup = Cleanup; functions->get_name = GetName; return 1; }
bf2335c7c79b32287f3cef2ec6326bc55f8f625b.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <random> #include <tuple> #include <vector> #include <iostream> #include <ctc.h> #include "test.h" bool small_test() { const int alphabet_size = 5; const int T = 2; std::vector<float> activations = {0.1f, 0.6f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.6f, 0.1f, 0.1f}; // Calculate the score analytically float expected_score; { std::vector<float> probs(activations.size()); softmax(activations.data(), alphabet_size, T, probs.data()); // Score calculation is specific to the given activations above expected_score = probs[1] * probs[7]; } hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *activations_gpu; throw_on_error(hipMalloc(&activations_gpu, activations.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> labels = {1, 2}; std::vector<int> label_lengths = {2}; std::vector<int> lengths; lengths.push_back(T); float score; ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in small_test"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, nullptr, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score, ctc_gpu_workspace, options), "Error: compute_ctc_loss in small_test"); score = ::exp(-score); const float eps = 1e-6; const float lb = expected_score - eps; const float ub = expected_score + eps; throw_on_error(hipFree(activations_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return (score > lb && score < ub); } int offset(int t, int n, int a) { constexpr int minibatch = 2; constexpr int alphabet_size = 6; return (t * minibatch + n) * alphabet_size + a; } bool options_test() { const int alphabet_size = 6; const int T = 5; const int minibatch = 2; std::vector<float> activations = {0.633766f, 0.221185f, 0.0917319f, 0.0129757f, 0.0142857f, 0.0260553f, 0.30176f, 0.28562f, 0.0831517f, 0.0862751f, 0.0816851f, 0.161508f, 0.111121f, 0.588392f, 0.278779f, 0.0055756f, 0.00569609f, 0.010436f, 0.24082f, 0.397533f, 0.0557226f, 0.0546814f, 0.0557528f, 0.19549f, 0.0357786f, 0.633813f, 0.321418f, 0.00249248f, 0.00272882f, 0.0037688f, 0.230246f, 0.450868f, 0.0389607f, 0.038309f, 0.0391602f, 0.202456f, 0.0663296f, 0.643849f, 0.280111f, 0.00283995f, 0.0035545f, 0.00331533f, 0.280884f, 0.429522f, 0.0326593f, 0.0339046f, 0.0326856f, 0.190345f, 0.458235f, 0.396634f, 0.123377f, 0.00648837f, 0.00903441f, 0.00623107f, 0.423286f, 0.315517f, 0.0338439f, 0.0393744f, 0.0339315f, 0.154046f}; std::vector<float> expected_grads = // from tensorflow {-0.366234f, 0.221185f, 0.0917319f, 0.0129757f, 0.0142857f, 0.0260553f, -0.69824f, 0.28562f, 0.0831517f, 0.0862751f, 0.0816851f, 0.161508f, 0.111121f, -0.411608f, 0.278779f, 0.0055756f, 0.00569609f, 0.010436f, 0.24082f, -0.602467f, 0.0557226f, 0.0546814f, 0.0557528f, 0.19549f, 0.0357786f, 0.633813f, -0.678582f, 0.00249248f, 0.00272882f, 0.0037688f, 0.230246f, 0.450868f, 0.0389607f, 0.038309f, 0.0391602f, -0.797544f, 0.0663296f, -0.356151f, 0.280111f, 0.00283995f, 0.0035545f, 0.00331533f, 0.280884f, -0.570478f, 0.0326593f, 0.0339046f, 0.0326856f, 0.190345f, -0.541765f, 0.396634f, 0.123377f, 0.00648837f, 0.00903441f, 0.00623107f, -0.576714f, 0.315517f, 0.0338439f, 0.0393744f, 0.0339315f, 0.154046f}; // Calculate the expected scores analytically auto& a = activations; double expected_score[2]; expected_score[0] = -::log(a[offset(0, 0, 0)] * a[offset(1, 0, 1)] * a[offset(2, 0, 2)] * a[offset(3, 0, 1)] * a[offset(4, 0, 0)]); expected_score[1] = 5.42262f; // from tensorflow // now take the log to account for the softmax for (auto& a : activations) { a = ::log(a); } hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *activations_gpu; throw_on_error(hipMalloc(&activations_gpu, activations.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> labels = {0, 1, 2, 1, 0, 0, 1, 1, 0}; std::vector<int> label_lengths = {5, 4}; std::vector<int> lengths = {5, 5}; float score[2]; float *grads_gpu; throw_on_error(hipMalloc(&grads_gpu, (alphabet_size * T * minibatch) * sizeof(float)), "hipMalloc"); ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; options.blank_label = 5; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in options_test"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score[0], ctc_gpu_workspace, options), "Error: compute_ctc_loss in options_test"); std::vector<float> grads(alphabet_size * T * minibatch); throw_on_error(hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream), "hipMemcpyAsync"); throw_on_error(hipStreamSynchronize(stream), "hipStreamSynchronize"); throw_on_error(hipFree(activations_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); const double eps = 1e-4; bool result = true; for (int i = 0; i < grads.size(); i++) { const double lb = expected_grads[i] - eps; const double ub = expected_grads[i] + eps; if (!(grads[i] > lb && grads[i] < ub)) { std::cerr << "grad mismatch in options_test" << " expected grad: " << expected_grads[i] << " calculated score: " << grads[i] << " !(" << lb << " < " << grads[i] << " < " << ub << ")" << std::endl; result = false; } } for (int i = 0; i < 2; i++) { const double lb = expected_score[i] - eps; const double ub = expected_score[i] + eps; if (!(score[i] > lb && score[i] < ub)) { std::cerr << "score mismatch in options_test" << " expected score: " << expected_score[i] << " calculated score: " << score[i] << std::endl; result = false; } } return result; } bool inf_test() { const int alphabet_size = 15; const int T = 50; const int L = 10; const int minibatch = 1; std::vector<int> labels = genLabels(alphabet_size, L); labels[0] = 2; std::vector<int> label_lengths = {L}; std::vector<float> acts = genActs(alphabet_size * T * minibatch); for (int i = 0; i < T; ++i) acts[alphabet_size * i + 2] = -1e30; hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *acts_gpu; throw_on_error(hipMalloc(&acts_gpu, acts.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> lengths; lengths.push_back(T); float *grads_gpu; throw_on_error(hipMalloc(&grads_gpu, (alphabet_size * T) * sizeof(float)), "hipMalloc"); float cost; ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in inf_test"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &cost, ctc_gpu_workspace, options), "Error: compute_ctc_loss in inf_test"); bool status = std::isinf(cost); std::vector<float> grads(alphabet_size * T); throw_on_error(hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream), "hipMemcpyAsync"); throw_on_error(hipStreamSynchronize(stream), "hipStreamSynchronize"); for (int i = 0; i < alphabet_size * T; ++i) status &= !std::isnan(grads[i]); throw_on_error(hipFree(acts_gpu), "hipFree"); throw_on_error(hipFree(grads_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return status; } float grad_check(int T, int alphabet_size, std::vector<float>& acts, const std::vector<std::vector<int>>& labels, const std::vector<int>& lengths) { float epsilon = 1e-2; const int minibatch = labels.size(); hipStream_t stream; throw_on_error(hipStreamCreate(&stream), "hipStreamCreate"); float *acts_gpu; throw_on_error(hipMalloc(&acts_gpu, acts.size() * sizeof(float)), "hipMalloc"); throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<int> flat_labels; std::vector<int> label_lengths; for (const auto& l : labels) { flat_labels.insert(flat_labels.end(), l.begin(), l.end()); label_lengths.push_back(l.size()); } std::vector<float> costs(minibatch); float *grads_gpu; throw_on_error(hipMalloc(&grads_gpu, acts.size() * sizeof(float)), "hipMalloc"); ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in grad_check"); char *ctc_gpu_workspace; throw_on_error(hipMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "hipMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costs.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (0) in grad_check"); std::vector<float> grads(acts.size()); throw_on_error(hipMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), hipMemcpyDeviceToHost, stream), "hipMemcpyAsync"); throw_on_error(hipStreamSynchronize(stream), "hipStreamSynchronize"); std::vector<float> num_grad(grads.size()); //perform 2nd order central differencing for (int i = 0; i < T * alphabet_size * minibatch; ++i) { acts[i] += epsilon; throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); std::vector<float> costsP1(minibatch); std::vector<float> costsP2(minibatch); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP1.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (1) in grad_check"); acts[i] -= 2 * epsilon; throw_on_error(hipMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), hipMemcpyHostToDevice, stream), "hipMemcpyAsync"); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP2.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (2) in grad_check"); float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.); float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.); acts[i] += epsilon; num_grad[i] = (costP1 - costP2) / (2 * epsilon); } float diff = rel_diff(grads, num_grad); throw_on_error(hipFree(acts_gpu), "hipFree"); throw_on_error(hipFree(grads_gpu), "hipFree"); throw_on_error(hipFree(ctc_gpu_workspace), "hipFree"); throw_on_error(hipStreamDestroy(stream), "hipStreamDestroy"); return diff; } bool run_tests() { std::vector<std::tuple<int, int, int, int, float>> problem_sizes = { std::make_tuple(28, 50, 15, 1, 1e-5) }; bool status = true; for (auto problem : problem_sizes) { int alphabet_size, T, L, minibatch; float tol; std::tie(alphabet_size, T, L, minibatch, tol) = problem; std::vector<float> acts = genActs(alphabet_size * T * minibatch); std::vector<std::vector<int>> labels; std::vector<int> sizes; for (int mb = 0; mb < minibatch; ++mb) { int actual_length = L; labels.push_back(genLabels(alphabet_size, actual_length)); sizes.push_back(T); } float diff = grad_check(T, alphabet_size, acts, labels, sizes); status &= (diff < tol); } return status; } int main(void) { if (get_warpctc_version() != 2) { std::cerr << "Invalid WarpCTC version." << std::endl; return 1; } std::cout << "Running GPU tests" << std::endl; throw_on_error(hipSetDevice(0), "hipSetDevice"); bool status = true; status &= small_test(); status &= options_test(); status &= inf_test(); status &= run_tests(); if (status) { std::cout << "Tests pass" << std::endl; return 0; } else { std::cout << "Some or all tests fail" << std::endl; return 1; } }
bf2335c7c79b32287f3cef2ec6326bc55f8f625b.cu
#include <cmath> #include <random> #include <tuple> #include <vector> #include <iostream> #include <ctc.h> #include "test.h" bool small_test() { const int alphabet_size = 5; const int T = 2; std::vector<float> activations = {0.1f, 0.6f, 0.1f, 0.1f, 0.1f, 0.1f, 0.1f, 0.6f, 0.1f, 0.1f}; // Calculate the score analytically float expected_score; { std::vector<float> probs(activations.size()); softmax(activations.data(), alphabet_size, T, probs.data()); // Score calculation is specific to the given activations above expected_score = probs[1] * probs[7]; } cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *activations_gpu; throw_on_error(cudaMalloc(&activations_gpu, activations.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> labels = {1, 2}; std::vector<int> label_lengths = {2}; std::vector<int> lengths; lengths.push_back(T); float score; ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in small_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, nullptr, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score, ctc_gpu_workspace, options), "Error: compute_ctc_loss in small_test"); score = std::exp(-score); const float eps = 1e-6; const float lb = expected_score - eps; const float ub = expected_score + eps; throw_on_error(cudaFree(activations_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return (score > lb && score < ub); } int offset(int t, int n, int a) { constexpr int minibatch = 2; constexpr int alphabet_size = 6; return (t * minibatch + n) * alphabet_size + a; } bool options_test() { const int alphabet_size = 6; const int T = 5; const int minibatch = 2; std::vector<float> activations = {0.633766f, 0.221185f, 0.0917319f, 0.0129757f, 0.0142857f, 0.0260553f, 0.30176f, 0.28562f, 0.0831517f, 0.0862751f, 0.0816851f, 0.161508f, 0.111121f, 0.588392f, 0.278779f, 0.0055756f, 0.00569609f, 0.010436f, 0.24082f, 0.397533f, 0.0557226f, 0.0546814f, 0.0557528f, 0.19549f, 0.0357786f, 0.633813f, 0.321418f, 0.00249248f, 0.00272882f, 0.0037688f, 0.230246f, 0.450868f, 0.0389607f, 0.038309f, 0.0391602f, 0.202456f, 0.0663296f, 0.643849f, 0.280111f, 0.00283995f, 0.0035545f, 0.00331533f, 0.280884f, 0.429522f, 0.0326593f, 0.0339046f, 0.0326856f, 0.190345f, 0.458235f, 0.396634f, 0.123377f, 0.00648837f, 0.00903441f, 0.00623107f, 0.423286f, 0.315517f, 0.0338439f, 0.0393744f, 0.0339315f, 0.154046f}; std::vector<float> expected_grads = // from tensorflow {-0.366234f, 0.221185f, 0.0917319f, 0.0129757f, 0.0142857f, 0.0260553f, -0.69824f, 0.28562f, 0.0831517f, 0.0862751f, 0.0816851f, 0.161508f, 0.111121f, -0.411608f, 0.278779f, 0.0055756f, 0.00569609f, 0.010436f, 0.24082f, -0.602467f, 0.0557226f, 0.0546814f, 0.0557528f, 0.19549f, 0.0357786f, 0.633813f, -0.678582f, 0.00249248f, 0.00272882f, 0.0037688f, 0.230246f, 0.450868f, 0.0389607f, 0.038309f, 0.0391602f, -0.797544f, 0.0663296f, -0.356151f, 0.280111f, 0.00283995f, 0.0035545f, 0.00331533f, 0.280884f, -0.570478f, 0.0326593f, 0.0339046f, 0.0326856f, 0.190345f, -0.541765f, 0.396634f, 0.123377f, 0.00648837f, 0.00903441f, 0.00623107f, -0.576714f, 0.315517f, 0.0338439f, 0.0393744f, 0.0339315f, 0.154046f}; // Calculate the expected scores analytically auto& a = activations; double expected_score[2]; expected_score[0] = -std::log(a[offset(0, 0, 0)] * a[offset(1, 0, 1)] * a[offset(2, 0, 2)] * a[offset(3, 0, 1)] * a[offset(4, 0, 0)]); expected_score[1] = 5.42262f; // from tensorflow // now take the log to account for the softmax for (auto& a : activations) { a = std::log(a); } cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *activations_gpu; throw_on_error(cudaMalloc(&activations_gpu, activations.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(activations_gpu, activations.data(), activations.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> labels = {0, 1, 2, 1, 0, 0, 1, 1, 0}; std::vector<int> label_lengths = {5, 4}; std::vector<int> lengths = {5, 5}; float score[2]; float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, (alphabet_size * T * minibatch) * sizeof(float)), "cudaMalloc"); ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; options.blank_label = 5; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in options_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(activations_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &score[0], ctc_gpu_workspace, options), "Error: compute_ctc_loss in options_test"); std::vector<float> grads(alphabet_size * T * minibatch); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); throw_on_error(cudaFree(activations_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); const double eps = 1e-4; bool result = true; for (int i = 0; i < grads.size(); i++) { const double lb = expected_grads[i] - eps; const double ub = expected_grads[i] + eps; if (!(grads[i] > lb && grads[i] < ub)) { std::cerr << "grad mismatch in options_test" << " expected grad: " << expected_grads[i] << " calculated score: " << grads[i] << " !(" << lb << " < " << grads[i] << " < " << ub << ")" << std::endl; result = false; } } for (int i = 0; i < 2; i++) { const double lb = expected_score[i] - eps; const double ub = expected_score[i] + eps; if (!(score[i] > lb && score[i] < ub)) { std::cerr << "score mismatch in options_test" << " expected score: " << expected_score[i] << " calculated score: " << score[i] << std::endl; result = false; } } return result; } bool inf_test() { const int alphabet_size = 15; const int T = 50; const int L = 10; const int minibatch = 1; std::vector<int> labels = genLabels(alphabet_size, L); labels[0] = 2; std::vector<int> label_lengths = {L}; std::vector<float> acts = genActs(alphabet_size * T * minibatch); for (int i = 0; i < T; ++i) acts[alphabet_size * i + 2] = -1e30; cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> lengths; lengths.push_back(T); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, (alphabet_size * T) * sizeof(float)), "cudaMalloc"); float cost; ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in inf_test"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, labels.data(), label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), &cost, ctc_gpu_workspace, options), "Error: compute_ctc_loss in inf_test"); bool status = std::isinf(cost); std::vector<float> grads(alphabet_size * T); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); for (int i = 0; i < alphabet_size * T; ++i) status &= !std::isnan(grads[i]); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return status; } float grad_check(int T, int alphabet_size, std::vector<float>& acts, const std::vector<std::vector<int>>& labels, const std::vector<int>& lengths) { float epsilon = 1e-2; const int minibatch = labels.size(); cudaStream_t stream; throw_on_error(cudaStreamCreate(&stream), "cudaStreamCreate"); float *acts_gpu; throw_on_error(cudaMalloc(&acts_gpu, acts.size() * sizeof(float)), "cudaMalloc"); throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<int> flat_labels; std::vector<int> label_lengths; for (const auto& l : labels) { flat_labels.insert(flat_labels.end(), l.begin(), l.end()); label_lengths.push_back(l.size()); } std::vector<float> costs(minibatch); float *grads_gpu; throw_on_error(cudaMalloc(&grads_gpu, acts.size() * sizeof(float)), "cudaMalloc"); ctcOptions options{}; options.loc = CTC_GPU; options.stream = stream; size_t gpu_alloc_bytes; throw_on_error(get_workspace_size(label_lengths.data(), lengths.data(), alphabet_size, lengths.size(), options, &gpu_alloc_bytes), "Error: get_workspace_size in grad_check"); char *ctc_gpu_workspace; throw_on_error(cudaMalloc(&ctc_gpu_workspace, gpu_alloc_bytes), "cudaMalloc"); throw_on_error(compute_ctc_loss(acts_gpu, grads_gpu, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costs.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (0) in grad_check"); std::vector<float> grads(acts.size()); throw_on_error(cudaMemcpyAsync(grads.data(), grads_gpu, grads.size() * sizeof(float), cudaMemcpyDeviceToHost, stream), "cudaMemcpyAsync"); throw_on_error(cudaStreamSynchronize(stream), "cudaStreamSynchronize"); std::vector<float> num_grad(grads.size()); //perform 2nd order central differencing for (int i = 0; i < T * alphabet_size * minibatch; ++i) { acts[i] += epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); std::vector<float> costsP1(minibatch); std::vector<float> costsP2(minibatch); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP1.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (1) in grad_check"); acts[i] -= 2 * epsilon; throw_on_error(cudaMemcpyAsync(acts_gpu, acts.data(), acts.size() * sizeof(float), cudaMemcpyHostToDevice, stream), "cudaMemcpyAsync"); throw_on_error(compute_ctc_loss(acts_gpu, NULL, flat_labels.data(), label_lengths.data(), lengths.data(), alphabet_size, minibatch, costsP2.data(), ctc_gpu_workspace, options), "Error: compute_ctc_loss (2) in grad_check"); float costP1 = std::accumulate(costsP1.begin(), costsP1.end(), 0.); float costP2 = std::accumulate(costsP2.begin(), costsP2.end(), 0.); acts[i] += epsilon; num_grad[i] = (costP1 - costP2) / (2 * epsilon); } float diff = rel_diff(grads, num_grad); throw_on_error(cudaFree(acts_gpu), "cudaFree"); throw_on_error(cudaFree(grads_gpu), "cudaFree"); throw_on_error(cudaFree(ctc_gpu_workspace), "cudaFree"); throw_on_error(cudaStreamDestroy(stream), "cudaStreamDestroy"); return diff; } bool run_tests() { std::vector<std::tuple<int, int, int, int, float>> problem_sizes = { std::make_tuple(28, 50, 15, 1, 1e-5) }; bool status = true; for (auto problem : problem_sizes) { int alphabet_size, T, L, minibatch; float tol; std::tie(alphabet_size, T, L, minibatch, tol) = problem; std::vector<float> acts = genActs(alphabet_size * T * minibatch); std::vector<std::vector<int>> labels; std::vector<int> sizes; for (int mb = 0; mb < minibatch; ++mb) { int actual_length = L; labels.push_back(genLabels(alphabet_size, actual_length)); sizes.push_back(T); } float diff = grad_check(T, alphabet_size, acts, labels, sizes); status &= (diff < tol); } return status; } int main(void) { if (get_warpctc_version() != 2) { std::cerr << "Invalid WarpCTC version." << std::endl; return 1; } std::cout << "Running GPU tests" << std::endl; throw_on_error(cudaSetDevice(0), "cudaSetDevice"); bool status = true; status &= small_test(); status &= options_test(); status &= inf_test(); status &= run_tests(); if (status) { std::cout << "Tests pass" << std::endl; return 0; } else { std::cout << "Some or all tests fail" << std::endl; return 1; } }
dacc8d1f83d1521ef8be72fcd1fbaec3b1863e23.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _SCAN_BEST_KERNEL_CU_ #define _SCAN_BEST_KERNEL_CU_ // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 16 banks on G80 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(float *s_data, const float *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(float* g_odata, const float* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(float* s_data, float *g_blockSums, int blockIndex) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; } // zero the last element in the scan so it will propagate back to the front s_data[index] = 0; } } __device__ unsigned int buildSum(float *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } return stride; } __device__ void scanRootToLeaves(float *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); float t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(float *data, int blockIndex, float *blockSums) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(float *g_odata, const float *g_idata, float *g_blockSums, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ float s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } __global__ void uniformAdd(float *g_data, float *uniforms, int n, int blockOffset, int baseIndex) { __shared__ float uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; __syncthreads(); // note two adds per thread g_data[address] += uni; if(threadIdx.x + blockDim.x < n) g_data[address + blockDim.x] += uni; } #endif // #ifndef _SCAN_BEST_KERNEL_CU_
dacc8d1f83d1521ef8be72fcd1fbaec3b1863e23.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _SCAN_BEST_KERNEL_CU_ #define _SCAN_BEST_KERNEL_CU_ // Define this to more rigorously avoid bank conflicts, // even at the lower (root) levels of the tree // Note that due to the higher addressing overhead, performance // is lower with ZERO_BANK_CONFLICTS enabled. It is provided // as an example. //#define ZERO_BANK_CONFLICTS // 16 banks on G80 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS + (index) >> (2*LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(index) ((index) >> LOG_NUM_BANKS) #endif /////////////////////////////////////////////////////////////////////////////// // Work-efficient compute implementation of scan, one thread per 2 elements // Work-efficient: O(log(n)) steps, and O(n) adds. // Also shared storage efficient: Uses n + n/NUM_BANKS shared memory -- no ping-ponging // Also avoids most bank conflicts using single-element offsets every NUM_BANKS elements. // // In addition, If ZERO_BANK_CONFLICTS is defined, uses // n + n/NUM_BANKS + n/(NUM_BANKS*NUM_BANKS) // shared memory. If ZERO_BANK_CONFLICTS is defined, avoids ALL bank conflicts using // single-element offsets every NUM_BANKS elements, plus additional single-element offsets // after every NUM_BANKS^2 elements. // // Uses a balanced tree type algorithm. See Blelloch, 1990 "Prefix Sums // and Their Applications", or Prins and Chatterjee PRAM course notes: // http://www.cs.unc.edu/~prins/Classes/203/Handouts/pram.pdf // // This work-efficient version is based on the algorithm presented in Guy Blelloch's // excellent paper "Prefix sums and their applications". // http://www-2.cs.cmu.edu/afs/cs.cmu.edu/project/scandal/public/papers/CMU-CS-90-190.html // // Pro: Work Efficient, very few bank conflicts (or zero if ZERO_BANK_CONFLICTS is defined) // Con: More instructions to compute bank-conflict-free shared memory addressing, // and slightly more shared memory storage used. // template <bool isNP2> __device__ void loadSharedChunkFromMem(float *s_data, const float *g_idata, int n, int baseIndex, int& ai, int& bi, int& mem_ai, int& mem_bi, int& bankOffsetA, int& bankOffsetB) { int thid = threadIdx.x; mem_ai = baseIndex + threadIdx.x; mem_bi = mem_ai + blockDim.x; ai = thid; bi = thid + blockDim.x; // compute spacing to avoid bank conflicts bankOffsetA = CONFLICT_FREE_OFFSET(ai); bankOffsetB = CONFLICT_FREE_OFFSET(bi); // Cache the computational window in shared memory // pad values beyond n with zeros s_data[ai + bankOffsetA] = g_idata[mem_ai]; if (isNP2) // compile-time decision { s_data[bi + bankOffsetB] = (bi < n) ? g_idata[mem_bi] : 0; } else { s_data[bi + bankOffsetB] = g_idata[mem_bi]; } } template <bool isNP2> __device__ void storeSharedChunkToMem(float* g_odata, const float* s_data, int n, int ai, int bi, int mem_ai, int mem_bi, int bankOffsetA, int bankOffsetB) { __syncthreads(); // write results to global memory g_odata[mem_ai] = s_data[ai + bankOffsetA]; if (isNP2) // compile-time decision { if (bi < n) g_odata[mem_bi] = s_data[bi + bankOffsetB]; } else { g_odata[mem_bi] = s_data[bi + bankOffsetB]; } } template <bool storeSum> __device__ void clearLastElement(float* s_data, float *g_blockSums, int blockIndex) { if (threadIdx.x == 0) { int index = (blockDim.x << 1) - 1; index += CONFLICT_FREE_OFFSET(index); if (storeSum) // compile-time decision { // write this block's total sum to the corresponding index in the blockSums array g_blockSums[blockIndex] = s_data[index]; } // zero the last element in the scan so it will propagate back to the front s_data[index] = 0; } } __device__ unsigned int buildSum(float *s_data) { unsigned int thid = threadIdx.x; unsigned int stride = 1; // build the sum in place up the tree for (int d = blockDim.x; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); s_data[bi] += s_data[ai]; } stride *= 2; } return stride; } __device__ void scanRootToLeaves(float *s_data, unsigned int stride) { unsigned int thid = threadIdx.x; // traverse down the tree building the scan in place for (int d = 1; d <= blockDim.x; d *= 2) { stride >>= 1; __syncthreads(); if (thid < d) { int i = __mul24(__mul24(2, stride), thid); int ai = i + stride - 1; int bi = ai + stride; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); float t = s_data[ai]; s_data[ai] = s_data[bi]; s_data[bi] += t; } } } template <bool storeSum> __device__ void prescanBlock(float *data, int blockIndex, float *blockSums) { int stride = buildSum(data); // build the sum in place up the tree clearLastElement<storeSum>(data, blockSums, (blockIndex == 0) ? blockIdx.x : blockIndex); scanRootToLeaves(data, stride); // traverse down tree to build the scan } template <bool storeSum, bool isNP2> __global__ void prescan(float *g_odata, const float *g_idata, float *g_blockSums, int n, int blockIndex, int baseIndex) { int ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB; extern __shared__ float s_data[]; // load data into shared memory loadSharedChunkFromMem<isNP2>(s_data, g_idata, n, (baseIndex == 0) ? __mul24(blockIdx.x, (blockDim.x << 1)):baseIndex, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); // scan the data in each block prescanBlock<storeSum>(s_data, blockIndex, g_blockSums); // write results to device memory storeSharedChunkToMem<isNP2>(g_odata, s_data, n, ai, bi, mem_ai, mem_bi, bankOffsetA, bankOffsetB); } __global__ void uniformAdd(float *g_data, float *uniforms, int n, int blockOffset, int baseIndex) { __shared__ float uni; if (threadIdx.x == 0) uni = uniforms[blockIdx.x + blockOffset]; unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x; __syncthreads(); // note two adds per thread g_data[address] += uni; if(threadIdx.x + blockDim.x < n) g_data[address + blockDim.x] += uni; } #endif // #ifndef _SCAN_BEST_KERNEL_CU_
5726f9247bd80933840d70aff072fc417d2b708c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "CalculateTransSample.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *input = NULL; hipMalloc(&input, XSIZE*YSIZE); float *output = NULL; hipMalloc(&output, XSIZE*YSIZE); const int wtss = 1; const int htss = 1; const int wts = 1; const int hts = 1; const int ratio = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( CalculateTransSample), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,wtss,htss,wts,hts,ratio); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( CalculateTransSample), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,wtss,htss,wts,hts,ratio); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( CalculateTransSample), dim3(gridBlock),dim3(threadBlock), 0, 0, input,output,wtss,htss,wts,hts,ratio); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5726f9247bd80933840d70aff072fc417d2b708c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "CalculateTransSample.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *input = NULL; cudaMalloc(&input, XSIZE*YSIZE); float *output = NULL; cudaMalloc(&output, XSIZE*YSIZE); const int wtss = 1; const int htss = 1; const int wts = 1; const int hts = 1; const int ratio = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); CalculateTransSample<<<gridBlock,threadBlock>>>(input,output,wtss,htss,wts,hts,ratio); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { CalculateTransSample<<<gridBlock,threadBlock>>>(input,output,wtss,htss,wts,hts,ratio); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { CalculateTransSample<<<gridBlock,threadBlock>>>(input,output,wtss,htss,wts,hts,ratio); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
85f40283b116bc27242e89a38b2ea63c47bb84a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Implementing Breadth first search on CUDA using algorithm given in HiPC'07 * paper "Accelerating Large Graph Algorithms on the GPU using CUDA" * * Copyright (c) 2008 * International Institute of Information Technology - Hyderabad. * All rights reserved. * * Permission to use, copy, modify and distribute this software and its * documentation for educational purpose is hereby granted without fee, * provided that the above copyright notice and this permission notice * appear in all copies of this software and that you do not sell the software. * * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, * IMPLIED OR OTHERWISE. * * Created by Pawan Harish. * * Modified by Shinpei Kato. */ #define NUM_ITR 1000000 __global__ void Kernel2(int *g_graph_mask, int *g_updating_graph_mask, int *g_graph_visited, int *g_over, int no_of_nodes) { for (int itr = 0; itr < NUM_ITR; ++itr) { int tid = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x; if (tid < no_of_nodes) { if (g_updating_graph_mask[tid]) { g_graph_mask[tid] = true; g_graph_visited[tid] = true; *g_over = true; g_updating_graph_mask[tid] = false; } } } }
85f40283b116bc27242e89a38b2ea63c47bb84a4.cu
/* * Implementing Breadth first search on CUDA using algorithm given in HiPC'07 * paper "Accelerating Large Graph Algorithms on the GPU using CUDA" * * Copyright (c) 2008 * International Institute of Information Technology - Hyderabad. * All rights reserved. * * Permission to use, copy, modify and distribute this software and its * documentation for educational purpose is hereby granted without fee, * provided that the above copyright notice and this permission notice * appear in all copies of this software and that you do not sell the software. * * THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, * IMPLIED OR OTHERWISE. * * Created by Pawan Harish. * * Modified by Shinpei Kato. */ #define NUM_ITR 1000000 __global__ void Kernel2(int *g_graph_mask, int *g_updating_graph_mask, int *g_graph_visited, int *g_over, int no_of_nodes) { for (int itr = 0; itr < NUM_ITR; ++itr) { int tid = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x; if (tid < no_of_nodes) { if (g_updating_graph_mask[tid]) { g_graph_mask[tid] = true; g_graph_visited[tid] = true; *g_over = true; g_updating_graph_mask[tid] = false; } } } }
da76e80d5b9383cc4ec663be9ae36cdda568b046.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <vector> #include <iostream> #include "yololayer.h" #include "cuda_utils.h" #define API_EXPORTS namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } } YoloLayerPlugin::~YoloLayerPlugin() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipFree(mAnchor[ii])); } CUDA_CHECK(hipHostFree(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(hipHostMalloc(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(hipMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(hipMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, hipMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { //output the result to channel int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT { YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *output, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); for (int k = 0; k < CHECK_COUNT; ++k) { float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < IGNORE_THRESH) continue; int class_id = 0; float max_cls_prob = 0.0; for (int i = 5; i < info_len_i; ++i) { float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); if (p > max_cls_prob) { max_cls_prob = p; class_id = i - 5; } } float *res_count = output + bnIdx * outputElem; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection); Detection *det = (Detection*)(data); int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; det->conf = box_prob * max_cls_prob; det->class_id = class_id; } } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, hipStream_t stream, int batchSize) { int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float))); } int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; //printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight); CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, hipStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; std::vector<Yolo::YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin::destroy() YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
da76e80d5b9383cc4ec663be9ae36cdda568b046.cu
#include <assert.h> #include <vector> #include <iostream> #include "yololayer.h" #include "cuda_utils.h" #define API_EXPORTS namespace Tn { template<typename T> void write(char*& buffer, const T& val) { *reinterpret_cast<T*>(buffer) = val; buffer += sizeof(T); } template<typename T> void read(const char*& buffer, T& val) { val = *reinterpret_cast<const T*>(buffer); buffer += sizeof(T); } } using namespace Yolo; namespace nvinfer1 { YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, const std::vector<Yolo::YoloKernel>& vYoloKernel) { mClassCount = classCount; mYoloV5NetWidth = netWidth; mYoloV5NetHeight = netHeight; mMaxOutObject = maxOut; mYoloKernel = vYoloKernel; mKernelCount = vYoloKernel.size(); CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } } YoloLayerPlugin::~YoloLayerPlugin() { for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaFree(mAnchor[ii])); } CUDA_CHECK(cudaFreeHost(mAnchor)); } // create the plugin at runtime from a byte stream YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) { using namespace Tn; const char *d = reinterpret_cast<const char *>(data), *a = d; read(d, mClassCount); read(d, mThreadCount); read(d, mKernelCount); read(d, mYoloV5NetWidth); read(d, mYoloV5NetHeight); read(d, mMaxOutObject); mYoloKernel.resize(mKernelCount); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(mYoloKernel.data(), d, kernelSize); d += kernelSize; CUDA_CHECK(cudaMallocHost(&mAnchor, mKernelCount * sizeof(void*))); size_t AnchorLen = sizeof(float)* CHECK_COUNT * 2; for (int ii = 0; ii < mKernelCount; ii++) { CUDA_CHECK(cudaMalloc(&mAnchor[ii], AnchorLen)); const auto& yolo = mYoloKernel[ii]; CUDA_CHECK(cudaMemcpy(mAnchor[ii], yolo.anchors, AnchorLen, cudaMemcpyHostToDevice)); } assert(d == a + length); } void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT { using namespace Tn; char* d = static_cast<char*>(buffer), *a = d; write(d, mClassCount); write(d, mThreadCount); write(d, mKernelCount); write(d, mYoloV5NetWidth); write(d, mYoloV5NetHeight); write(d, mMaxOutObject); auto kernelSize = mKernelCount * sizeof(YoloKernel); memcpy(d, mYoloKernel.data(), kernelSize); d += kernelSize; assert(d == a + getSerializationSize()); } size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT { return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size() + sizeof(mYoloV5NetWidth) + sizeof(mYoloV5NetHeight) + sizeof(mMaxOutObject); } int YoloLayerPlugin::initialize() TRT_NOEXCEPT { return 0; } Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims) TRT_NOEXCEPT { //output the result to channel int totalsize = mMaxOutObject * sizeof(Detection) / sizeof(float); return Dims3(totalsize + 1, 1, 1); } // Set plugin namespace void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT { mPluginNamespace = pluginNamespace; } const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT { return mPluginNamespace; } // Return the DataType of the plugin output at the requested index DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT { return DataType::kFLOAT; } // Return true if output tensor is broadcast across a batch. bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT { return false; } // Return true if plugin can use input that is broadcast across batch without replication. bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT { return false; } void YoloLayerPlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) TRT_NOEXCEPT { } // Attach the plugin object to an execution context and grant the plugin the access to some context resource. void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT { } // Detach the plugin object from its execution context. void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {} const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT { return "1"; } void YoloLayerPlugin::destroy() TRT_NOEXCEPT { delete this; } // Clone the plugin IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT { YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, mYoloKernel); p->setPluginNamespace(mPluginNamespace); return p; } __device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); }; __global__ void CalDetection(const float *input, float *output, int noElements, const int netwidth, const int netheight, int maxoutobject, int yoloWidth, int yoloHeight, const float anchors[CHECK_COUNT * 2], int classes, int outputElem) { int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx >= noElements) return; int total_grid = yoloWidth * yoloHeight; int bnIdx = idx / total_grid; idx = idx - total_grid * bnIdx; int info_len_i = 5 + classes; const float* curInput = input + bnIdx * (info_len_i * total_grid * CHECK_COUNT); for (int k = 0; k < CHECK_COUNT; ++k) { float box_prob = Logist(curInput[idx + k * info_len_i * total_grid + 4 * total_grid]); if (box_prob < IGNORE_THRESH) continue; int class_id = 0; float max_cls_prob = 0.0; for (int i = 5; i < info_len_i; ++i) { float p = Logist(curInput[idx + k * info_len_i * total_grid + i * total_grid]); if (p > max_cls_prob) { max_cls_prob = p; class_id = i - 5; } } float *res_count = output + bnIdx * outputElem; int count = (int)atomicAdd(res_count, 1); if (count >= maxoutobject) return; char *data = (char*)res_count + sizeof(float) + count * sizeof(Detection); Detection *det = (Detection*)(data); int row = idx / yoloWidth; int col = idx % yoloWidth; //Location // pytorch: // y = x[i].sigmoid() // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh // X: (sigmoid(tx) + cx)/FeaturemapW * netwidth det->bbox[0] = (col - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 0 * total_grid])) * netwidth / yoloWidth; det->bbox[1] = (row - 0.5f + 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 1 * total_grid])) * netheight / yoloHeight; // W: (Pw * e^tw) / FeaturemapW * netwidth // v5: https://github.com/ultralytics/yolov5/issues/471 det->bbox[2] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 2 * total_grid]); det->bbox[2] = det->bbox[2] * det->bbox[2] * anchors[2 * k]; det->bbox[3] = 2.0f * Logist(curInput[idx + k * info_len_i * total_grid + 3 * total_grid]); det->bbox[3] = det->bbox[3] * det->bbox[3] * anchors[2 * k + 1]; det->conf = box_prob * max_cls_prob; det->class_id = class_id; } } void YoloLayerPlugin::forwardGpu(const float* const* inputs, float *output, cudaStream_t stream, int batchSize) { int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float); for (int idx = 0; idx < batchSize; ++idx) { CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float))); } int numElem = 0; for (unsigned int i = 0; i < mYoloKernel.size(); ++i) { const auto& yolo = mYoloKernel[i]; numElem = yolo.width * yolo.height * batchSize; if (numElem < mThreadCount) mThreadCount = numElem; //printf("Net: %d %d \n", mYoloV5NetWidth, mYoloV5NetHeight); CalDetection << < (numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> > (inputs[i], output, numElem, mYoloV5NetWidth, mYoloV5NetHeight, mMaxOutObject, yolo.width, yolo.height, (float*)mAnchor[i], mClassCount, outputElem); } } int YoloLayerPlugin::enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT { forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, batchSize); return 0; } PluginFieldCollection YoloPluginCreator::mFC{}; std::vector<PluginField> YoloPluginCreator::mPluginAttributes; YoloPluginCreator::YoloPluginCreator() { mPluginAttributes.clear(); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT { return "YoloLayer_TRT"; } const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT { return "1"; } const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT { return &mFC; } IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT { assert(fc->nbFields == 2); assert(strcmp(fc->fields[0].name, "netinfo") == 0); assert(strcmp(fc->fields[1].name, "kernels") == 0); int *p_netinfo = (int*)(fc->fields[0].data); int class_count = p_netinfo[0]; int input_w = p_netinfo[1]; int input_h = p_netinfo[2]; int max_output_object_count = p_netinfo[3]; std::vector<Yolo::YoloKernel> kernels(fc->fields[1].length); memcpy(&kernels[0], fc->fields[1].data, kernels.size() * sizeof(Yolo::YoloKernel)); YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, kernels); obj->setPluginNamespace(mNamespace.c_str()); return obj; } IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT { // This object will be deleted when the network is destroyed, which will // call YoloLayerPlugin::destroy() YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength); obj->setPluginNamespace(mNamespace.c_str()); return obj; } }
5bf4bff74b336e4b9e0b2a64b849157d87a75e84.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <cuml/ensemble/randomforest.hpp> #include <queue> #include <raft/cuda_utils.cuh> #include <random> namespace ML { using namespace MLCommon; template <typename T> // template useless for now. struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; }; template <typename T> class RfClassifierDepthTest : public ::testing::TestWithParam<int> { protected: void basicTest() { const int max_depth = ::testing::TestWithParam<int>::GetParam(); params = RfInputs<T>{5000, 10, 1, 1.0f, 1.0f, max_depth, -1, false, false, 8, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}; DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); // Populate data (assume Col major) std::mt19937 gen(0); std::vector<T> data_h(data_len); std::normal_distribution<> d{0, 1}; for (int col = 0; col < params.n_cols; ++col) { for (int row = 0; row < params.n_rows; ++row) { data_h[row + col * params.n_rows] = d(gen); } } raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h.resize(params.n_rows); for (int row = 0; row < params.n_rows; ++row) { labels_h[row] = (data_h[row + 2 * params.n_rows] * data_h[row + 3 * params.n_rows] > 0.5); } preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); CUDA_CHECK(hipStreamSynchronize(stream)); } void SetUp() override { basicTest(); } void TearDown() override { labels_h.clear(); labels_map.clear(); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(data)); delete forest; } protected: RfInputs<T> params; T* data; int* labels; std::vector<int> labels_h; std::map<int, int> labels_map; // unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; }; template <typename T> class RfRegressorDepthTest : public ::testing::TestWithParam<int> { protected: void basicTest() { const int max_depth = ::testing::TestWithParam<int>::GetParam(); params = RfInputs<T>{5000, 10, 1, 1.0f, 1.0f, max_depth, -1, false, false, 8, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MSE}; DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); // Populate data (assume Col major) std::mt19937 gen(0); std::vector<T> data_h(data_len); std::normal_distribution<> d{0, 1}; for (int col = 0; col < params.n_cols; ++col) { for (int row = 0; row < params.n_rows; ++row) { data_h[row + col * params.n_rows] = d(gen); } } raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h.resize(params.n_rows); for (int row = 0; row < params.n_rows; ++row) { labels_h[row] = (data_h[row + 2 * params.n_rows] * data_h[row + 3 * params.n_rows]); } raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, T>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, rf_params); CUDA_CHECK(hipStreamSynchronize(stream)); } void SetUp() override { basicTest(); } void TearDown() override { labels_h.clear(); CUDA_CHECK(hipFree(labels)); CUDA_CHECK(hipFree(data)); delete forest; } protected: RfInputs<T> params; T* data; T* labels; std::vector<T> labels_h; RandomForestMetaData<T, T>* forest; }; template <typename L, typename T> int MaxDepthOfDecisionTree(const DecisionTree::TreeMetaDataNode<T, L>* tree) { const auto& node_array = tree->sparsetree; std::queue<std::pair<int, int>> q; // (node ID, depth) // Traverse the tree breadth-first int initial_depth = 0; q.emplace(0, initial_depth); int max_depth = initial_depth; while (!q.empty()) { int node_id, depth; std::tie(node_id, depth) = q.front(); q.pop(); max_depth = ::max(depth, max_depth); const SparseTreeNode<T, L>& node = node_array.at(node_id); if (node.colid != -1) { q.emplace(node.left_child_id, depth + 1); q.emplace(node.left_child_id + 1, depth + 1); } } return max_depth; } typedef RfClassifierDepthTest<float> RfClassifierDepthTestF; TEST_P(RfClassifierDepthTestF, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } typedef RfClassifierDepthTest<double> RfClassifierDepthTestD; TEST_P(RfClassifierDepthTestD, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } INSTANTIATE_TEST_CASE_P(RfClassifierDepthTests, RfClassifierDepthTestF, ::testing::Range(0, 19)); INSTANTIATE_TEST_CASE_P(RfClassifierDepthTests, RfClassifierDepthTestD, ::testing::Range(0, 19)); typedef RfRegressorDepthTest<float> RfRegressorDepthTestF; TEST_P(RfRegressorDepthTestF, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } typedef RfRegressorDepthTest<double> RfRegressorDepthTestD; TEST_P(RfRegressorDepthTestD, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } INSTANTIATE_TEST_CASE_P(RfRegressorDepthTests, RfRegressorDepthTestF, ::testing::Range(0, 19)); INSTANTIATE_TEST_CASE_P(RfRegressorDepthTests, RfRegressorDepthTestD, ::testing::Range(0, 19)); } // end namespace ML
5bf4bff74b336e4b9e0b2a64b849157d87a75e84.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <cuml/ensemble/randomforest.hpp> #include <queue> #include <raft/cuda_utils.cuh> #include <random> namespace ML { using namespace MLCommon; template <typename T> // template useless for now. struct RfInputs { int n_rows; int n_cols; int n_trees; float max_features; float max_samples; int max_depth; int max_leaves; bool bootstrap; bool bootstrap_features; int n_bins; int split_algo; int min_samples_leaf; int min_samples_split; float min_impurity_decrease; int n_streams; CRITERION split_criterion; }; template <typename T> class RfClassifierDepthTest : public ::testing::TestWithParam<int> { protected: void basicTest() { const int max_depth = ::testing::TestWithParam<int>::GetParam(); params = RfInputs<T>{5000, 10, 1, 1.0f, 1.0f, max_depth, -1, false, false, 8, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::ENTROPY}; DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); // Populate data (assume Col major) std::mt19937 gen(0); std::vector<T> data_h(data_len); std::normal_distribution<> d{0, 1}; for (int col = 0; col < params.n_cols; ++col) { for (int row = 0; row < params.n_rows; ++row) { data_h[row + col * params.n_rows] = d(gen); } } raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h.resize(params.n_rows); for (int row = 0; row < params.n_rows; ++row) { labels_h[row] = (data_h[row + 2 * params.n_rows] * data_h[row + 3 * params.n_rows] > 0.5); } preprocess_labels(params.n_rows, labels_h, labels_map); raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, int>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, labels_map.size(), rf_params); CUDA_CHECK(cudaStreamSynchronize(stream)); } void SetUp() override { basicTest(); } void TearDown() override { labels_h.clear(); labels_map.clear(); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(data)); delete forest; } protected: RfInputs<T> params; T* data; int* labels; std::vector<int> labels_h; std::map<int, int> labels_map; // unique map of labels to int vals starting from 0 RandomForestMetaData<T, int>* forest; }; template <typename T> class RfRegressorDepthTest : public ::testing::TestWithParam<int> { protected: void basicTest() { const int max_depth = ::testing::TestWithParam<int>::GetParam(); params = RfInputs<T>{5000, 10, 1, 1.0f, 1.0f, max_depth, -1, false, false, 8, SPLIT_ALGO::GLOBAL_QUANTILE, 2, 2, 0.0, 2, CRITERION::MSE}; DecisionTree::DecisionTreeParams tree_params; set_tree_params(tree_params, params.max_depth, params.max_leaves, params.max_features, params.n_bins, params.split_algo, params.min_samples_leaf, params.min_samples_split, params.min_impurity_decrease, params.bootstrap_features, params.split_criterion, false); RF_params rf_params; set_all_rf_params(rf_params, params.n_trees, params.bootstrap, params.max_samples, 0, params.n_streams, tree_params); int data_len = params.n_rows * params.n_cols; raft::allocate(data, data_len); raft::allocate(labels, params.n_rows); cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); // Populate data (assume Col major) std::mt19937 gen(0); std::vector<T> data_h(data_len); std::normal_distribution<> d{0, 1}; for (int col = 0; col < params.n_cols; ++col) { for (int row = 0; row < params.n_rows; ++row) { data_h[row + col * params.n_rows] = d(gen); } } raft::update_device(data, data_h.data(), data_len, stream); // Populate labels labels_h.resize(params.n_rows); for (int row = 0; row < params.n_rows; ++row) { labels_h[row] = (data_h[row + 2 * params.n_rows] * data_h[row + 3 * params.n_rows]); } raft::update_device(labels, labels_h.data(), params.n_rows, stream); forest = new typename ML::RandomForestMetaData<T, T>; null_trees_ptr(forest); raft::handle_t handle(rf_params.n_streams); handle.set_stream(stream); fit(handle, forest, data, params.n_rows, params.n_cols, labels, rf_params); CUDA_CHECK(cudaStreamSynchronize(stream)); } void SetUp() override { basicTest(); } void TearDown() override { labels_h.clear(); CUDA_CHECK(cudaFree(labels)); CUDA_CHECK(cudaFree(data)); delete forest; } protected: RfInputs<T> params; T* data; T* labels; std::vector<T> labels_h; RandomForestMetaData<T, T>* forest; }; template <typename L, typename T> int MaxDepthOfDecisionTree(const DecisionTree::TreeMetaDataNode<T, L>* tree) { const auto& node_array = tree->sparsetree; std::queue<std::pair<int, int>> q; // (node ID, depth) // Traverse the tree breadth-first int initial_depth = 0; q.emplace(0, initial_depth); int max_depth = initial_depth; while (!q.empty()) { int node_id, depth; std::tie(node_id, depth) = q.front(); q.pop(); max_depth = std::max(depth, max_depth); const SparseTreeNode<T, L>& node = node_array.at(node_id); if (node.colid != -1) { q.emplace(node.left_child_id, depth + 1); q.emplace(node.left_child_id + 1, depth + 1); } } return max_depth; } typedef RfClassifierDepthTest<float> RfClassifierDepthTestF; TEST_P(RfClassifierDepthTestF, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } typedef RfClassifierDepthTest<double> RfClassifierDepthTestD; TEST_P(RfClassifierDepthTestD, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } INSTANTIATE_TEST_CASE_P(RfClassifierDepthTests, RfClassifierDepthTestF, ::testing::Range(0, 19)); INSTANTIATE_TEST_CASE_P(RfClassifierDepthTests, RfClassifierDepthTestD, ::testing::Range(0, 19)); typedef RfRegressorDepthTest<float> RfRegressorDepthTestF; TEST_P(RfRegressorDepthTestF, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } typedef RfRegressorDepthTest<double> RfRegressorDepthTestD; TEST_P(RfRegressorDepthTestD, Fit) { CUML_LOG_INFO("Param max_depth = %d", params.max_depth); for (int i = 0; i < forest->rf_params.n_trees; i++) { int actual_max_depth = MaxDepthOfDecisionTree(&(forest->trees[i])); ASSERT_EQ(actual_max_depth, params.max_depth); ASSERT_EQ(actual_max_depth, forest->trees[i].depth_counter); } } INSTANTIATE_TEST_CASE_P(RfRegressorDepthTests, RfRegressorDepthTestF, ::testing::Range(0, 19)); INSTANTIATE_TEST_CASE_P(RfRegressorDepthTests, RfRegressorDepthTestD, ::testing::Range(0, 19)); } // end namespace ML
443945d74e6d45c4d5ab99c7a6e314d1b28480d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "cuda_executor.hpp" extern "C" { #include "slice_param.h" #include "graph/tensor.h" #include "operator/op.h" #include "utility/log.h" } __global__ void slice(float *y, float *x, int elem_num, int res) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idx_new = idx + res; if (idx < elem_num) { y[idx] = x[idx_new]; } } void slice_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_uint2voidx gpu_addr_map) { struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); /* init grid and block */ int bs = 1024; int s = ceil((output_tensor->elem_num + bs - 1.) / bs); dim3 grid = dim3(s); struct slice_param* param = (struct slice_param*)ir_node->op.param_mem; int res = 1; for (uint8_t i = input_tensor->dim_num-1; i > param->axis; i--) { res *= input_tensor->dims[i]; } res *= param->begin; hipLaunchKernelGGL(( slice), dim3(grid), dim3(bs), 0, 0, (float*)gpu_addr_map[output_tensor->index], (float*)gpu_addr_map[input_tensor->index], output_tensor->elem_num, res); } void CUDAEngine::AddSliceNode(struct graph* ir_graph, struct node* ir_node) { TLOG_INFO("Tengine GPU: Support OP(%d) OP_SLICE.\n", ir_node->index); slice_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map); this->ops.push_back(std::bind(&slice_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map)); }
443945d74e6d45c4d5ab99c7a6e314d1b28480d4.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "cuda_executor.hpp" extern "C" { #include "slice_param.h" #include "graph/tensor.h" #include "operator/op.h" #include "utility/log.h" } __global__ void slice(float *y, float *x, int elem_num, int res) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idx_new = idx + res; if (idx < elem_num) { y[idx] = x[idx_new]; } } void slice_gpu_kernel(struct graph* ir_graph, struct node* ir_node, dict_uint2voidx gpu_addr_map) { struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); /* init grid and block */ int bs = 1024; int s = ceil((output_tensor->elem_num + bs - 1.) / bs); dim3 grid = dim3(s); struct slice_param* param = (struct slice_param*)ir_node->op.param_mem; int res = 1; for (uint8_t i = input_tensor->dim_num-1; i > param->axis; i--) { res *= input_tensor->dims[i]; } res *= param->begin; slice<<<grid, bs>>>((float*)gpu_addr_map[output_tensor->index], (float*)gpu_addr_map[input_tensor->index], output_tensor->elem_num, res); } void CUDAEngine::AddSliceNode(struct graph* ir_graph, struct node* ir_node) { TLOG_INFO("Tengine GPU: Support OP(%d) OP_SLICE.\n", ir_node->index); slice_gpu_kernel(ir_graph, ir_node, this->gpu_addr_map); this->ops.push_back(std::bind(&slice_gpu_kernel, ir_graph, ir_node, this->gpu_addr_map)); }
bc5f6f42a8250ced3e4054a4e1bb4d427acd8fb5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "workshop.h" __global__ void add(int a, int b, int *c) { *c = a + b; } int main(int argc, char **argv) { int c, *dev_c; HANDLE_ERROR( hipMalloc(&dev_c, sizeof(int)) ); hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, 2, 7, dev_c); HANDLE_ERROR( hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost) ); printf("2 + 7 = %d\n", c); }
bc5f6f42a8250ced3e4054a4e1bb4d427acd8fb5.cu
#include <stdio.h> #include "workshop.h" __global__ void add(int a, int b, int *c) { *c = a + b; } int main(int argc, char **argv) { int c, *dev_c; HANDLE_ERROR( cudaMalloc(&dev_c, sizeof(int)) ); add<<<1, 1>>>(2, 7, dev_c); HANDLE_ERROR( cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost) ); printf("2 + 7 = %d\n", c); }
ce32b7f928bcbe6fafd731df7cef848a86e5256c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions normal d -> s @author Peng Du */ #include "common_magma.h" #define qmod(a, b) ((a)-(__mul24((b), (a)/(b)))) #define b_copy() \ do { \ dim3 dimBlock( (M>=MAX_THREAD_PER_BLOCK) ? MAX_THREAD_PER_BLOCK : (WARP_SIZE*((M/WARP_SIZE)+(M%WARP_SIZE!=0))), 1 ); \ dim3 dimGrid( (M - 1)/dimBlock.x + 1, N ); \ hipLaunchKernelGGL(( b_copy_kernel), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , M, N, b, ldb, d_x, M); \ } while(0) // no magma_device_sync -- async function #define MAX_THREAD_PER_BLOCK 512 #define WARP_SIZE 32 #define BLOCK_SIZE 16 // inner blocking size, <=32 #define NB 128 // outer blocking size, >BLOCK_SIZE __global__ void b_copy_kernel(int M, int N, double *b, int ldb, double *d_x, int ldx); extern "C" void diag_dtrtri(magma_int_t M, char uplo, char diag, const double *A, double *d_dinvA, magma_int_t lda); /* * magmablas_dtrsm */ extern "C" void magmablas_dtrsm_work( char side, char uplo, char tran, char diag, magma_int_t M, magma_int_t N, double alpha, const double* A, magma_int_t lda, double* b, magma_int_t ldb, int flag, double *d_dinvA, double *d_x ) { /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 Purpose ======= dtrsm solves one of the matrix equations on gpu op( A )*x = alpha*b, or x*op( A ) = alpha*b, where alpha is a scalar, x and b are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A^T. The matrix X is overwritten on B. When M or N is not a multiple of blocking size, which is 32 for now, hipblasDtrsm will be called instead. There soon will not be this limitation both for arbitrary problem size and blocking size. This is an asynchronous version of magmablas_dtrsm with "workspace" as an argument. Arguments ========== side CHARACTER*1. On entry, side specifies whether op( A ) appears on the left or right of X as follows: side = 'L' or 'l' op( A )*X = alpha*B. side = 'R' or 'r' X*op( A ) = alpha*B. Unchanged on exit. uplo CHARACTER*1. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: uplo = 'U' or 'u' A is an upper triangular matrix. uplo = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. tran CHARACTER*1. On entry, tran specifies the form of op( A ) to be used in the matrix multiplication as follows: tran = 'N' or 'n' op( A ) = A. tran = 'T' or 't' op( A ) = A^T. tran = 'C' or 'c' op( A ) = A^T. Unchanged on exit. diag CHARACTER*1. On entry, diag specifies whether or not A is unit triangular as follows: diag = 'U' or 'u' A is assumed to be unit triangular. diag = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. m INTEGER. On entry, m specifies the number of rows of B. m must be at least zero. Unchanged on exit. n INTEGER. On entry, n specifies the number of columns of B. n must be at least zero. Unchanged on exit. alpha REAL. On entry, alpha specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A REAL array of DIMENSION ( lda, k ), where k is m when side = 'L' or 'l' and is n when side = 'R' or 'r'. Before entry with uplo = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. lda INTEGER. On entry, lda specifies the first dimension of A as declared in the calling (sub) program. When side = 'L' or 'l' then lda must be at least max( 1, m ), when side = 'R' or 'r' then lda must be at least max( 1, n ). Unchanged on exit. b REAL array of DIMENSION ( ldb, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. ldb INTEGER. On entry, ldb specifies the first dimension of B as declared in the calling (sub) program. ldb must be at least max( 1, m ). Unchanged on exit. flag BOOLEAN. If flag is true, invert diagonal blocks. If flag is false, assume diagonal blocks are already inverted. (?) d_dinvA workspace of size NB*((M+NB-1)/NB))*NB, on device. d_x workspace of size N*M, on device. Level 3 Blas routine. ===================================================================== */ int i; /* quick return on wrong size */ if (M <= 0 || N <= 0) return; if (side == 'l' || side == 'L') { // side=L /* invert the diagonals */ if (flag == 1) { diag_dtrtri (M, uplo, diag, A, d_dinvA, lda); } if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); hipblasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); return; } hipblasDgemm ('N', 'N', M-NB, N, NB, -1.0, A+NB, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for (i=NB; i < M; i += NB) { MM = min (M-i, NB); hipblasDgemm ('N', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; hipblasDgemm ('N', 'N', M-i-NB, N, NB, -1.0, A+i*lda+i+NB, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = (M%NB==0) ? NB : (M%NB); i = M-MM; hipblasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) { b_copy(); return; } hipblasDgemm ('N', 'N', i, N, MM, -1.0, A+i*lda, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for (i=M-MM-NB; i >= 0; i -= NB) { hipblasDgemm ('N', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) break; hipblasDgemm ('N', 'N', i, N, NB, -1.0, A+i*lda, lda, d_x+i, M, 1.0, b, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = (M%NB==0) ? NB : (M%NB); i = M-MM; hipblasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) { b_copy(); return; } hipblasDgemm ('T', 'N', i, N, MM, -1.0, A+i, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for (i=M-MM-NB; i >= 0; i -= NB) { hipblasDgemm ('T', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) break; hipblasDgemm ('T', 'N', i, N, NB, -1.0, A+i, lda, d_x+i, M, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); hipblasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); return; } hipblasDgemm ('T', 'N', M-NB, N, NB, -1.0, A+(NB)*lda, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for (i=NB; i < M; i += NB) { MM = min (M-i, NB); hipblasDgemm ('T', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; hipblasDgemm ('T', 'N', M-i-NB, N, NB, -1.0, A+(i+NB)*lda+i, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } } } else { // side=R /* invert the diagonals */ if (flag == 1) { diag_dtrtri (N, uplo, diag, A, d_dinvA, lda); } if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = (N%NB==0) ? NB : (N%NB); i = N-NN; hipblasDgemm ('N', 'N', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); return; } hipblasDgemm ('N', 'N', M, i, NN, -1.0, d_x+i*M, M, A+i, lda, alpha, b, ldb); /* the rest blocks */ for (i=N-NN-NB; i >= 0; i -= NB) { hipblasDgemm ('N', 'N', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; hipblasDgemm ('N', 'N', M, i, NB, -1.0, d_x+i*M, M, A+i, lda, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); hipblasDgemm ('N', 'N', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); return; } hipblasDgemm ('N', 'N', M, N-NB, NB, -1.0, d_x, M, A+NB*lda, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for (i=NB; i < N; i += NB) { NN = min(NB, N-i); hipblasDgemm ('N', 'N', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; hipblasDgemm ('N', 'N', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+(i+NB)*lda+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); hipblasDgemm ('N', 'T', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); return; } hipblasDgemm ('N', 'T', M, N-NB, NB, -1.0, d_x, M, A+NB, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for (i=NB; i < N; i += NB) { NN = min(NB, N-i); hipblasDgemm ('N', 'T', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; hipblasDgemm ('N', 'T', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+i*lda+NB+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = (N%NB==0) ? NB : (N%NB); i = N-NN; hipblasDgemm ('N', 'T', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); return; } hipblasDgemm ('N', 'T', M, i, NN, -1.0, d_x+i*M, M, A+i*lda, lda, alpha, b, ldb); /* the rest blocks */ for (i=N-NN-NB; i >= 0; i -= NB) { hipblasDgemm ('N', 'T', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; hipblasDgemm ('N', 'T', M, i, NB, -1.0, d_x+i*M, M, A+i*lda, lda, 1.0, b, ldb); } } } } b_copy(); }
ce32b7f928bcbe6fafd731df7cef848a86e5256c.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions normal d -> s @author Peng Du */ #include "common_magma.h" #define qmod(a, b) ((a)-(__mul24((b), (a)/(b)))) #define b_copy() \ do { \ dim3 dimBlock( (M>=MAX_THREAD_PER_BLOCK) ? MAX_THREAD_PER_BLOCK : (WARP_SIZE*((M/WARP_SIZE)+(M%WARP_SIZE!=0))), 1 ); \ dim3 dimGrid( (M - 1)/dimBlock.x + 1, N ); \ b_copy_kernel<<< dimGrid, dimBlock, 0, magma_stream >>>(M, N, b, ldb, d_x, M); \ } while(0) // no magma_device_sync -- async function #define MAX_THREAD_PER_BLOCK 512 #define WARP_SIZE 32 #define BLOCK_SIZE 16 // inner blocking size, <=32 #define NB 128 // outer blocking size, >BLOCK_SIZE __global__ void b_copy_kernel(int M, int N, double *b, int ldb, double *d_x, int ldx); extern "C" void diag_dtrtri(magma_int_t M, char uplo, char diag, const double *A, double *d_dinvA, magma_int_t lda); /* * magmablas_dtrsm */ extern "C" void magmablas_dtrsm_work( char side, char uplo, char tran, char diag, magma_int_t M, magma_int_t N, double alpha, const double* A, magma_int_t lda, double* b, magma_int_t ldb, int flag, double *d_dinvA, double *d_x ) { /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 Purpose ======= dtrsm solves one of the matrix equations on gpu op( A )*x = alpha*b, or x*op( A ) = alpha*b, where alpha is a scalar, x and b are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A^T. The matrix X is overwritten on B. When M or N is not a multiple of blocking size, which is 32 for now, cublasDtrsm will be called instead. There soon will not be this limitation both for arbitrary problem size and blocking size. This is an asynchronous version of magmablas_dtrsm with "workspace" as an argument. Arguments ========== side CHARACTER*1. On entry, side specifies whether op( A ) appears on the left or right of X as follows: side = 'L' or 'l' op( A )*X = alpha*B. side = 'R' or 'r' X*op( A ) = alpha*B. Unchanged on exit. uplo CHARACTER*1. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: uplo = 'U' or 'u' A is an upper triangular matrix. uplo = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. tran CHARACTER*1. On entry, tran specifies the form of op( A ) to be used in the matrix multiplication as follows: tran = 'N' or 'n' op( A ) = A. tran = 'T' or 't' op( A ) = A^T. tran = 'C' or 'c' op( A ) = A^T. Unchanged on exit. diag CHARACTER*1. On entry, diag specifies whether or not A is unit triangular as follows: diag = 'U' or 'u' A is assumed to be unit triangular. diag = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. m INTEGER. On entry, m specifies the number of rows of B. m must be at least zero. Unchanged on exit. n INTEGER. On entry, n specifies the number of columns of B. n must be at least zero. Unchanged on exit. alpha REAL. On entry, alpha specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A REAL array of DIMENSION ( lda, k ), where k is m when side = 'L' or 'l' and is n when side = 'R' or 'r'. Before entry with uplo = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. lda INTEGER. On entry, lda specifies the first dimension of A as declared in the calling (sub) program. When side = 'L' or 'l' then lda must be at least max( 1, m ), when side = 'R' or 'r' then lda must be at least max( 1, n ). Unchanged on exit. b REAL array of DIMENSION ( ldb, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. ldb INTEGER. On entry, ldb specifies the first dimension of B as declared in the calling (sub) program. ldb must be at least max( 1, m ). Unchanged on exit. flag BOOLEAN. If flag is true, invert diagonal blocks. If flag is false, assume diagonal blocks are already inverted. (?) d_dinvA workspace of size NB*((M+NB-1)/NB))*NB, on device. d_x workspace of size N*M, on device. Level 3 Blas routine. ===================================================================== */ int i; /* quick return on wrong size */ if (M <= 0 || N <= 0) return; if (side == 'l' || side == 'L') { // side=L /* invert the diagonals */ if (flag == 1) { diag_dtrtri (M, uplo, diag, A, d_dinvA, lda); } if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); cublasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); return; } cublasDgemm ('N', 'N', M-NB, N, NB, -1.0, A+NB, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for (i=NB; i < M; i += NB) { MM = min (M-i, NB); cublasDgemm ('N', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; cublasDgemm ('N', 'N', M-i-NB, N, NB, -1.0, A+i*lda+i+NB, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = (M%NB==0) ? NB : (M%NB); i = M-MM; cublasDgemm ('N', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) { b_copy(); return; } cublasDgemm ('N', 'N', i, N, MM, -1.0, A+i*lda, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for (i=M-MM-NB; i >= 0; i -= NB) { cublasDgemm ('N', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) break; cublasDgemm ('N', 'N', i, N, NB, -1.0, A+i*lda, lda, d_x+i, M, 1.0, b, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = (M%NB==0) ? NB : (M%NB); i = M-MM; cublasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) { b_copy(); return; } cublasDgemm ('T', 'N', i, N, MM, -1.0, A+i, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for (i=M-MM-NB; i >= 0; i -= NB) { cublasDgemm ('T', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) break; cublasDgemm ('T', 'N', i, N, NB, -1.0, A+i, lda, d_x+i, M, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); cublasDgemm ('T', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); return; } cublasDgemm ('T', 'N', M-NB, N, NB, -1.0, A+(NB)*lda, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for (i=NB; i < M; i += NB) { MM = min (M-i, NB); cublasDgemm ('T', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; cublasDgemm ('T', 'N', M-i-NB, N, NB, -1.0, A+(i+NB)*lda+i, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } } } else { // side=R /* invert the diagonals */ if (flag == 1) { diag_dtrtri (N, uplo, diag, A, d_dinvA, lda); } if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = (N%NB==0) ? NB : (N%NB); i = N-NN; cublasDgemm ('N', 'N', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); return; } cublasDgemm ('N', 'N', M, i, NN, -1.0, d_x+i*M, M, A+i, lda, alpha, b, ldb); /* the rest blocks */ for (i=N-NN-NB; i >= 0; i -= NB) { cublasDgemm ('N', 'N', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; cublasDgemm ('N', 'N', M, i, NB, -1.0, d_x+i*M, M, A+i, lda, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); cublasDgemm ('N', 'N', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); return; } cublasDgemm ('N', 'N', M, N-NB, NB, -1.0, d_x, M, A+NB*lda, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for (i=NB; i < N; i += NB) { NN = min(NB, N-i); cublasDgemm ('N', 'N', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; cublasDgemm ('N', 'N', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+(i+NB)*lda+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); cublasDgemm ('N', 'T', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); return; } cublasDgemm ('N', 'T', M, N-NB, NB, -1.0, d_x, M, A+NB, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for (i=NB; i < N; i += NB) { NN = min(NB, N-i); cublasDgemm ('N', 'T', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; cublasDgemm ('N', 'T', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+i*lda+NB+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = (N%NB==0) ? NB : (N%NB); i = N-NN; cublasDgemm ('N', 'T', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); return; } cublasDgemm ('N', 'T', M, i, NN, -1.0, d_x+i*M, M, A+i*lda, lda, alpha, b, ldb); /* the rest blocks */ for (i=N-NN-NB; i >= 0; i -= NB) { cublasDgemm ('N', 'T', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; cublasDgemm ('N', 'T', M, i, NB, -1.0, d_x+i*M, M, A+i*lda, lda, 1.0, b, ldb); } } } } b_copy(); }
d252ba7749fd384df013b07ba4fef83551653f37.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // #include "kernel.h" // #include <math.h> // #include <stdio.h> // const int INF = 999999; ///////////////////////////////////////////////////////////////////////////// // Minimal Time ///////////////////////////////////////////////////////////////////////////// // __global__ void MinTime(float* ignTime, float* rothData, float* times, // float* L_n, int size, int rowSize, int colSize){ // __global__ void MT(){ // // Get thread id + stride // /* neighbor's address*/ /* N NE E SE S SW W NW NNW NNE NEE SEE SSE SSW SWW NWW*/ // int nCol[16] = { 0, 1, 1, 1, 0, -1, -1, -1, -1, 1, 2, 2, 1, -1, -2, -2}; // int nRow[16] = { 1, 1, 0, -1, -1, -1, 0, 1, 2, 2, 1, -1, -2, -2, -1, 1}; // int cell = blockIdx.x * blockDim.x + threadIdx.x; // int ncell, nrow, ncol, row, col; // float ignCell, ignCellN, timeNext, timeNow, ROS; // // timeNext = INF; // // get timenext and timenow from global memory // timeNow = times[1]; // timeNow = timeNext // timeNext = INF; // while(cell < size){ // row = cell / rowSize; // col = cell - rowSize*row; // // Load ignition cell to register // ignCell = ignTime[cell]; // // Do atomic update of TimeNext Var (atomicMin) // if(timeNext > ignTime[cell] && ignTime[cell] > timeNow){ // atomicExch(&times[1], ignCell); // timeNext = ignCell; // } // else if(ignCell == timeNow){ // // Find burning cells // for(int n = 0; n < 16; n++){ // // Propagate from burning cells // nrow = row + nRow[n]; // ncol = col + nCol[n]; // if ( nrow<0 || nrow>= rowSize || ncol<0 || ncol>= colSize ) // continue; // ncell = ncol + nrow*colSize; // ignCellN = ignTime[ncell]; // // printf("%f ", ignCellN); // // If neighbor is unburned // if(ignCellN > timeNow){ // // compute ignition time // ROS = rothData[3*cell + 0] * (1.0 - rothData[3*cell + 1]) / // (1.0 - rothData[3*cell + 1] * cos(rothData[3*cell + 2] * 3.14159/180)); // // ROS = 10.0 * (1.0 - 0.321) / (1.0 - 0.321 * cos(30.f)); // // ROS = 0.5; // float ignTimeNew = timeNow + L_n[n] / ROS; // // printf("%f, %f, %f, %f \n", timeNow, L_n[n], ROS, ignTimeNew); // if(ignTimeNew < ignCellN){ // // printf("%f ", ignTime[ncell]); // // ignTime[ncell] = ignTimeNew; // This could cause a race cond. // // atomicMin(&ignTime[ncell], ignTimeNew); // if(ignTimeNew < ignTime[ncell]) // ignTime[ncell] = ignTimeNew; // // float tmp = atomicExch(&ignTime[ncell], ignTimeNew); // // if(tmp < ignTimeNew) // // atomicExch(&ignTime[ncell], ignTimeNew); // // printf("%f \n",ignTime[ncell]); // } // if(ignTimeNew < timeNext){ // // timeNext = ignTimeNew; // // printf("%f, %f \n", times[1], ignTimeNew); // atomicExch(&times[1], ignTimeNew); // // printf("%f, %f \n \n", times[1], ignTimeNew); // } // } // } // } // // Do striding // cell += blockDim.x * gridDim.x; // // printf("%d \n", cell); // } // // printf("%f\n", timeNext); // if(timeNext == INF){ // // printf("BLAH"); // end = 1; // printf("Kernel: %d\n", end); // // } // } // #endif // #if IMT // #endif // #if BURNDIST // #endif
d252ba7749fd384df013b07ba4fef83551653f37.cu
// #include "kernel.h" // #include <math.h> // #include <stdio.h> // const int INF = 999999; ///////////////////////////////////////////////////////////////////////////// // Minimal Time ///////////////////////////////////////////////////////////////////////////// // __global__ void MinTime(float* ignTime, float* rothData, float* times, // float* L_n, int size, int rowSize, int colSize){ // __global__ void MT(){ // // Get thread id + stride // /* neighbor's address*/ /* N NE E SE S SW W NW NNW NNE NEE SEE SSE SSW SWW NWW*/ // int nCol[16] = { 0, 1, 1, 1, 0, -1, -1, -1, -1, 1, 2, 2, 1, -1, -2, -2}; // int nRow[16] = { 1, 1, 0, -1, -1, -1, 0, 1, 2, 2, 1, -1, -2, -2, -1, 1}; // int cell = blockIdx.x * blockDim.x + threadIdx.x; // int ncell, nrow, ncol, row, col; // float ignCell, ignCellN, timeNext, timeNow, ROS; // // timeNext = INF; // // get timenext and timenow from global memory // timeNow = times[1]; // timeNow = timeNext // timeNext = INF; // while(cell < size){ // row = cell / rowSize; // col = cell - rowSize*row; // // Load ignition cell to register // ignCell = ignTime[cell]; // // Do atomic update of TimeNext Var (atomicMin) // if(timeNext > ignTime[cell] && ignTime[cell] > timeNow){ // atomicExch(&times[1], ignCell); // timeNext = ignCell; // } // else if(ignCell == timeNow){ // // Find burning cells // for(int n = 0; n < 16; n++){ // // Propagate from burning cells // nrow = row + nRow[n]; // ncol = col + nCol[n]; // if ( nrow<0 || nrow>= rowSize || ncol<0 || ncol>= colSize ) // continue; // ncell = ncol + nrow*colSize; // ignCellN = ignTime[ncell]; // // printf("%f ", ignCellN); // // If neighbor is unburned // if(ignCellN > timeNow){ // // compute ignition time // ROS = rothData[3*cell + 0] * (1.0 - rothData[3*cell + 1]) / // (1.0 - rothData[3*cell + 1] * cos(rothData[3*cell + 2] * 3.14159/180)); // // ROS = 10.0 * (1.0 - 0.321) / (1.0 - 0.321 * cos(30.f)); // // ROS = 0.5; // float ignTimeNew = timeNow + L_n[n] / ROS; // // printf("%f, %f, %f, %f \n", timeNow, L_n[n], ROS, ignTimeNew); // if(ignTimeNew < ignCellN){ // // printf("%f ", ignTime[ncell]); // // ignTime[ncell] = ignTimeNew; // This could cause a race cond. // // atomicMin(&ignTime[ncell], ignTimeNew); // if(ignTimeNew < ignTime[ncell]) // ignTime[ncell] = ignTimeNew; // // float tmp = atomicExch(&ignTime[ncell], ignTimeNew); // // if(tmp < ignTimeNew) // // atomicExch(&ignTime[ncell], ignTimeNew); // // printf("%f \n",ignTime[ncell]); // } // if(ignTimeNew < timeNext){ // // timeNext = ignTimeNew; // // printf("%f, %f \n", times[1], ignTimeNew); // atomicExch(&times[1], ignTimeNew); // // printf("%f, %f \n \n", times[1], ignTimeNew); // } // } // } // } // // Do striding // cell += blockDim.x * gridDim.x; // // printf("%d \n", cell); // } // // printf("%f\n", timeNext); // if(timeNext == INF){ // // printf("BLAH"); // end = 1; // printf("Kernel: %d\n", end); // // } // } // #endif // #if IMT // #endif // #if BURNDIST // #endif
1910a526089d28d59e4387cdb78a464762d9456a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> using namespace std; const int N = 16; const int CORES = 16; /* this is the GPU kernel function */ __global__ void hello(char* s) { /* blockIdx is a struct containing our block id if this this is a one-dimensional kernel, then x is the block id y and z are also available for 2 or 3 dimensional kernels */ /* capitalize the string by subtracting 32 from each lowercase letter */ if ((s[blockIdx.x] >= 'a') && (s[blockIdx.x] <= 'z')) { s[blockIdx.x] -= 32; } } /* the main function begins running on the CPU */ int main( ) { /* this is the string data - it is 'hello world', in lower-case */ char cpu_string[N] = "hello world!"; /* allocate space on the GPU for the string */ char* gpu_string; hipMalloc((void**) &gpu_string, N * sizeof(char)); /* send the character array to the GPU */ hipMemcpy(gpu_string, cpu_string, N * sizeof(char), hipMemcpyHostToDevice); /* invoke the GPU to run the kernel in parallel we specify CORES cores which each run once */ hipLaunchKernelGGL(( hello), dim3(CORES), dim3(1), 0, 0, gpu_string); /* copy the string back from the GPU to the CPU */ hipMemcpy(cpu_string, gpu_string, N * sizeof(char), hipMemcpyDeviceToHost); /* free the memory we allocated on the GPU */ hipFree(gpu_string); /* print the string we got back from the GPU */ cout << cpu_string << endl; return 0; }
1910a526089d28d59e4387cdb78a464762d9456a.cu
#include <iostream> using namespace std; const int N = 16; const int CORES = 16; /* this is the GPU kernel function */ __global__ void hello(char* s) { /* blockIdx is a struct containing our block id if this this is a one-dimensional kernel, then x is the block id y and z are also available for 2 or 3 dimensional kernels */ /* capitalize the string by subtracting 32 from each lowercase letter */ if ((s[blockIdx.x] >= 'a') && (s[blockIdx.x] <= 'z')) { s[blockIdx.x] -= 32; } } /* the main function begins running on the CPU */ int main( ) { /* this is the string data - it is 'hello world', in lower-case */ char cpu_string[N] = "hello world!"; /* allocate space on the GPU for the string */ char* gpu_string; cudaMalloc((void**) &gpu_string, N * sizeof(char)); /* send the character array to the GPU */ cudaMemcpy(gpu_string, cpu_string, N * sizeof(char), cudaMemcpyHostToDevice); /* invoke the GPU to run the kernel in parallel we specify CORES cores which each run once */ hello<<<CORES, 1>>>(gpu_string); /* copy the string back from the GPU to the CPU */ cudaMemcpy(cpu_string, gpu_string, N * sizeof(char), cudaMemcpyDeviceToHost); /* free the memory we allocated on the GPU */ cudaFree(gpu_string); /* print the string we got back from the GPU */ cout << cpu_string << endl; return 0; }
9edf2fe2655c5edd9add1ed0e464f88468fc38cd.hip
// !!! This is a file automatically generated by hipify!!! #include "device_launch_parameters.h" #include <iostream> int main() { int deviceCount; hipGetDeviceCount(&deviceCount); for(int i=0;i<deviceCount;i++) { hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); std::cout << "GPU device " << i << ": " << devProp.name << std::endl; std::cout << " " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM" << devProp.multiProcessorCount << std::endl; std::cout << "" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "" << devProp.maxThreadsPerBlock << std::endl; std::cout << "Block32 " << devProp.regsPerBlock << std::endl; std::cout << "EM" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "EM" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << " " << devProp.multiProcessorCount << std::endl; std::cout << "======================================================" << std::endl; } return 0; }
9edf2fe2655c5edd9add1ed0e464f88468fc38cd.cu
#include "device_launch_parameters.h" #include <iostream> int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); for(int i=0;i<deviceCount;i++) { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); std::cout << "使用GPU device " << i << ": " << devProp.name << std::endl; std::cout << "设备全局内存总量: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl; std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl; std::cout << "设备上一个线程块(Block)种可用的32位寄存器数量: " << devProp.regsPerBlock << std::endl; std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << "设备上多处理器的数量: " << devProp.multiProcessorCount << std::endl; std::cout << "======================================================" << std::endl; } return 0; }
84e3406d155f4b40e6800e659419642dff9df184.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include "pa5.h" #include "ppm.h" __device__ int monus(int x, int y); __device__ int maxus(int x, int y,int max); dim3 block_dim, grid_dim; int blur_radius; int img_w; int img_h; char* input_ppm; char* output_ppm; struct Image* img_in; struct Image* img_out; /* GPU function for blurring an image of width w and height r Blur radius given by r in: Pixel values to be used for calculations out: Blurred pixel values */ __global__ void blur_img(int w, int h,int r, struct Pixel *in, struct Pixel *out) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; if (blockId < h*w) { int minX = monus(in[blockId].x,r); int maxX = maxus(in[blockId].x,r,w); int minY = monus(in[blockId].y,r); int maxY = maxus(in[blockId].y,r,h); int red = 0; int green = 0; int blue = 0; int num_pixels = 0; int x,y; int index = 0; unsigned char r = 0, g = 0, b = 0; for ( y = minY; y <= maxY; y++ ){ for (x = minX; x <= maxX; x++){ index = y*w+x; r = in[index + threadId%3].r; g = in[index + threadId%3 +1].g; b = in[index + threadId%3 +2].b; red += (int)r; green +=(int) g; blue += (int)b; num_pixels++; } } red = floor( (float) red / num_pixels ); green = floor( (float) green / num_pixels); blue = floor( (float) blue / num_pixels); struct Pixel jp; jp.x = in[blockId].x; jp.y = in[blockId].y; jp.r = red; jp.b = blue; jp.g = green; out[blockId] = jp; } } int main(int argc, char** argv) { if (argc < 3) { printf("Not enough arguments\n"); return 0; } blur_radius = atoi(argv[1]); input_ppm = argv[2]; output_ppm = argv[3]; if (blur_radius < 0) { printf("blur radius too small\n"); return 0; } init(); run(); return 0; } int init() { //Create the two necessary Image objects img_in = ImageRead(input_ppm); img_w = ImageWidth(img_in); img_h = ImageHeight(img_in); img_out = ImageCreate(img_w, img_h); //Setup the grid and block sizes based on image width and height block_dim = dim3(3); grid_dim = dim3(img_h, img_w); return 0; } /* Setup two copies of pixels array to work on the host and device. Call GPU function to blur all the images Write the result */ int run() { int num_pixels = img_w*img_h; struct Pixel *pixels_host_in = (Pixel *) malloc(sizeof(Pixel)*num_pixels); struct Pixel *pixels_host_out = (Pixel *) malloc(sizeof(Pixel)*num_pixels); struct Pixel *pixel_device_in ; struct Pixel *pixel_device_out ; hipMalloc((void **) &pixel_device_in, (sizeof(Pixel)*num_pixels)); hipMalloc((void **) &pixel_device_out, (sizeof(Pixel)*num_pixels)); // populate pixel_device_in int x,y; int index = 0; for (y = 0; y < img_h; y++) { for (x = 0; x < img_w; x++) { index = y*img_w + x; pixels_host_in[index].x = x; pixels_host_in[index].y = y; pixels_host_in[index].r = ImageGetPixel(img_in, x, y, 0); pixels_host_in[index].g = ImageGetPixel(img_in, x, y, 1); pixels_host_in[index].b = ImageGetPixel(img_in, x, y, 2); } } hipMemcpy(pixel_device_in, (Pixel*)pixels_host_in, sizeof(Pixel)*num_pixels, hipMemcpyHostToDevice); hipLaunchKernelGGL(( blur_img), dim3(grid_dim), dim3(block_dim), 0, 0, img_w, img_h, blur_radius, pixel_device_in, pixel_device_out); hipDeviceSynchronize(); hipGetLastError(); hipMemcpy((Pixel*)pixels_host_out, pixel_device_out, sizeof(Pixel)*num_pixels, hipMemcpyDeviceToHost); //write result back to ppm img for (y = 0; y < img_h; y++) { for (x = 0; x < img_w; x++) { index = y*img_w+x; update_image(pixels_host_out[index]); } } ImageWrite(img_out, output_ppm); hipFree((void*) pixel_device_in); hipFree((void*) pixel_device_out); free(pixels_host_in); free(pixels_host_out); free(img_in); free(img_out); printf("\nImage %s (width: %d, height: %d) \t blur Radius: %d \n", input_ppm, img_w, img_h, blur_radius); return 1; } __device__ int monus (int x, int y) { if (x - y < 0){ return 0; } return x; } __device__ int maxus (int x, int y, int max) { if (x + y > max){ return max; } return x + y; } //given a pixel, update it on the img_out void update_image(struct Pixel pixel) { ImageSetPixel(img_out, pixel.x, pixel.y, 0, pixel.r); ImageSetPixel(img_out, pixel.x, pixel.y, 1, pixel.g); ImageSetPixel(img_out, pixel.x, pixel.y, 2, pixel.b); }
84e3406d155f4b40e6800e659419642dff9df184.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include "pa5.h" #include "ppm.h" __device__ int monus(int x, int y); __device__ int maxus(int x, int y,int max); dim3 block_dim, grid_dim; int blur_radius; int img_w; int img_h; char* input_ppm; char* output_ppm; struct Image* img_in; struct Image* img_out; /* GPU function for blurring an image of width w and height r Blur radius given by r in: Pixel values to be used for calculations out: Blurred pixel values */ __global__ void blur_img(int w, int h,int r, struct Pixel *in, struct Pixel *out) { int blockId = blockIdx.y * gridDim.x + blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; if (blockId < h*w) { int minX = monus(in[blockId].x,r); int maxX = maxus(in[blockId].x,r,w); int minY = monus(in[blockId].y,r); int maxY = maxus(in[blockId].y,r,h); int red = 0; int green = 0; int blue = 0; int num_pixels = 0; int x,y; int index = 0; unsigned char r = 0, g = 0, b = 0; for ( y = minY; y <= maxY; y++ ){ for (x = minX; x <= maxX; x++){ index = y*w+x; r = in[index + threadId%3].r; g = in[index + threadId%3 +1].g; b = in[index + threadId%3 +2].b; red += (int)r; green +=(int) g; blue += (int)b; num_pixels++; } } red = floor( (float) red / num_pixels ); green = floor( (float) green / num_pixels); blue = floor( (float) blue / num_pixels); struct Pixel jp; jp.x = in[blockId].x; jp.y = in[blockId].y; jp.r = red; jp.b = blue; jp.g = green; out[blockId] = jp; } } int main(int argc, char** argv) { if (argc < 3) { printf("Not enough arguments\n"); return 0; } blur_radius = atoi(argv[1]); input_ppm = argv[2]; output_ppm = argv[3]; if (blur_radius < 0) { printf("blur radius too small\n"); return 0; } init(); run(); return 0; } int init() { //Create the two necessary Image objects img_in = ImageRead(input_ppm); img_w = ImageWidth(img_in); img_h = ImageHeight(img_in); img_out = ImageCreate(img_w, img_h); //Setup the grid and block sizes based on image width and height block_dim = dim3(3); grid_dim = dim3(img_h, img_w); return 0; } /* Setup two copies of pixels array to work on the host and device. Call GPU function to blur all the images Write the result */ int run() { int num_pixels = img_w*img_h; struct Pixel *pixels_host_in = (Pixel *) malloc(sizeof(Pixel)*num_pixels); struct Pixel *pixels_host_out = (Pixel *) malloc(sizeof(Pixel)*num_pixels); struct Pixel *pixel_device_in ; struct Pixel *pixel_device_out ; cudaMalloc((void **) &pixel_device_in, (sizeof(Pixel)*num_pixels)); cudaMalloc((void **) &pixel_device_out, (sizeof(Pixel)*num_pixels)); // populate pixel_device_in int x,y; int index = 0; for (y = 0; y < img_h; y++) { for (x = 0; x < img_w; x++) { index = y*img_w + x; pixels_host_in[index].x = x; pixels_host_in[index].y = y; pixels_host_in[index].r = ImageGetPixel(img_in, x, y, 0); pixels_host_in[index].g = ImageGetPixel(img_in, x, y, 1); pixels_host_in[index].b = ImageGetPixel(img_in, x, y, 2); } } cudaMemcpy(pixel_device_in, (Pixel*)pixels_host_in, sizeof(Pixel)*num_pixels, cudaMemcpyHostToDevice); blur_img<<<grid_dim, block_dim>>>(img_w, img_h, blur_radius, pixel_device_in, pixel_device_out); cudaDeviceSynchronize(); cudaGetLastError(); cudaMemcpy((Pixel*)pixels_host_out, pixel_device_out, sizeof(Pixel)*num_pixels, cudaMemcpyDeviceToHost); //write result back to ppm img for (y = 0; y < img_h; y++) { for (x = 0; x < img_w; x++) { index = y*img_w+x; update_image(pixels_host_out[index]); } } ImageWrite(img_out, output_ppm); cudaFree((void*) pixel_device_in); cudaFree((void*) pixel_device_out); free(pixels_host_in); free(pixels_host_out); free(img_in); free(img_out); printf("\nImage %s (width: %d, height: %d) \t blur Radius: %d \n", input_ppm, img_w, img_h, blur_radius); return 1; } __device__ int monus (int x, int y) { if (x - y < 0){ return 0; } return x; } __device__ int maxus (int x, int y, int max) { if (x + y > max){ return max; } return x + y; } //given a pixel, update it on the img_out void update_image(struct Pixel pixel) { ImageSetPixel(img_out, pixel.x, pixel.y, 0, pixel.r); ImageSetPixel(img_out, pixel.x, pixel.y, 1, pixel.g); ImageSetPixel(img_out, pixel.x, pixel.y, 2, pixel.b); }
0aa97968a156098d15420cc798c34073e44a7a07.hip
// !!! This is a file automatically generated by hipify!!! #include "MCQ.h" #include <iostream> #include <algorithm> #include <stack> MCQ::MCQ(int n, std::vector<std::vector<int> > A, std::vector<int> degree, int style): MC(n, A, degree){ // number of nodes in the graph this->n = n; // equals 1 if an edge exists this->A = A; // the number of vertices adjacent to vertex i this->degree = degree; /* for(int& i: degree) std::cout << i << " "; std::cout << std::endl; */ // largest clique found so far nodes = maxSize = 0; timeLimit = -1; // flag to customise the algorithm with respect to ordering of the vertices this->style = style; // largest clique found solution.resize(n); } MCQ::~MCQ(){ } // find the largest clique or terminates after time limit void MCQ::search(){ gettimeofday(&tod1, NULL); nodes = 0; // contains vertices i+1 and used when sorting vertices by their color colorClass.resize(n); // current clique found std::vector<int> C; C.reserve(n); // vertices that may be added to growing clique (candidate set) std::vector<int> P(n); // init to have all vertices as possible candidates for(int i=0; i<n; i++) colorClass[i].resize(n); // order vertices orderVertices(P); /* for(int& p: P) std::cout << p << " "; std::cout << std::endl; */ expand(C, P); } void MCQ::expand(std::vector<int> C, std::vector<int> P){ int w; // see if the time limit has been reached gettimeofday(&tod2, NULL); if(timeLimit > 0 && todiff(&tod2, &tod1)/1000 >= timeLimit) return; // count the size of the backtrack search tree explored nodes++; int m = P.size(); int color[m]; numberSort(C, P, P, color); /* std::cout << "Color: "; for(int i=0; i<m; i++){ std::cout << P[i] << " "; } std::cout << std::endl; */ // iterate over the candidate set for(int i=P.size()-1; i>= 0; i--){ //std::cout << "Loop:" << i << " Max: " << maxSize << std::endl; //timeval t1, t2; //gettimeofday(&t1, NULL); /* // print C and P std::cout << "C: "; for(auto& w: C){ std::cout << w << " "; } std::cout << "\t"; std::cout << "P: "; for(auto& w: P){ std::cout << w << " "; } std::cout << std::endl; */ // return if clique cannot grow large enough to be maximum clique if(C.size() + color[i] <= maxSize) return; // select a vertex from P and add it to the current clique int v = P[i]; C.push_back(v); //std::cout << "V: " << v << std::endl; // newP is the set of vertices in P that are adjacent to vertex v // all vertices in newP are adjacent to all vertices in C and all pairs of vertices in C ar adjacent std::vector<int> newP; newP.reserve(i); for(int j=0; j<=i; j++){ w = P[j]; if(A[w][v] == 1){ newP.push_back(w); } } // if newP is empty is is maximal, so stop searching and save it if it is maximum if(newP.empty() && C.size() > maxSize){ //std::cout << "Saving" << std::endl; saveSolution(C); } // else recursively continue search else if(!newP.empty()){ //std::cout << "Expanding" << std::endl; expand(C, newP); } // remove v from P and C when returning C.pop_back(); P.pop_back(); //gettimeofday(&t2, NULL); //std::cout << todiff(&t2, &t1)/1000 << std::endl; } } void MCQ::orderVertices(std::vector<int>& verts){ // create the vertices to sort std::vector<Vertex> V(n); for(int i=0; i<n; i++){ V[i].index = i; V[i].degree = degree[i]; } // calculate the sum of the neighboring degrees for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ if(A[i][j] == 1) V[i].setNebDeg(V[i].getNebDeg() + degree[j]); } } /* std::cout << "NebDeg: "; for(Vertex& v: V) std::cout << v.getNebDeg() << " "; std::cout << std::endl; */ // order based on style passed in switch(style){ case 1: // order by non-increasing degree, tie-breaking on index std::sort(V.begin(), V.end(), Vertex::VertexCmp); break; case 2: // order by minimum width order minWidthOrder(V); break; case 3: // known as MCR // order by non-increasing degree, tie-greaking on sum of the neighborhood // degree nubDeg and then on index std::sort(V.begin(), V.end(), Vertex::MCRComparator); break; } for(int i=0; i<V.size(); i++){ verts[i] = V[i].index; } } bool MCQ::conflicts(int v, std::vector<int> cClass){ // return true if pass in vertex is adjacent to any of the vertices in color class for(int i=0; i<cClass.size(); i++){ int w = cClass[i]; if(A[v][w] == 1) return true; } // return false otherwise return false; } void MCQ::numberSort(std::vector<int> C, std::vector<int> colOrd, std::vector<int>& P, int color[]){ // records the number of colors used int colors = 0; int m = colOrd.size(); // clear out the color classes that might be used for(int i=0; i<m; i++){ colorClass[i].clear(); } // vertices are selected from colOrd and placed into first color class in which there are no conflicts for(int i=0; i<m; i++){ int v = colOrd[i]; int k = 0; // vertice in colorClass are not pair wise adjacent and have same color (independent set) while (conflicts(v, colorClass[k])) k++; colorClass[k].push_back(v); colors = ::max(colors, k+1); } /* std::cout << "ColorClasses: " << std::endl; for(int i=0; i<colors; i++){ std::cout << "C" << i << " "; for(int j=0; j<colorClass[i].size(); j++){ std::cout << colorClass[i][j] << " "; } std::cout << std::endl; } */ // pidgeon hole sort P.clear(); int i = 0; for(int k=0; k<colors; k++){ for(int j=0; j<colorClass[k].size(); j++){ int v = colorClass[k][j]; P.push_back(v); color[i++] = k+1; } } /* std::cout << "Pidgeon Hole: "; for(int& v: P) std::cout << v << " "; std::cout << std::endl; std::cout << "Colors: "; for(int i=0; i<m; i++) std::cout << color[i] << " "; std::cout << std::endl; */ } void MCQ::minWidthOrder(std::vector<Vertex>& V){ std::vector<Vertex> L; std::stack<Vertex> S; for(int i=0; i<V.size(); i++){ L.push_back(V[i]); } while(!L.empty()){ // select vertex with smallest degree and store in v int pos = 0; Vertex v = L[0]; for(int i=0; i<L.size(); i++){ if(L[i].degree < v.degree){ v = L[i]; pos = i; } } // push v onto stack and remove from L S.push(v); L.erase(L.begin()+pos); // reduce degree of all vertices in L that are adjacent to v for(Vertex& u : L){ if(A[u.index][v.index] == 1) u.degree--; } } // pop of stack and place onto V giving minimum width ordering int k = 0; while(!S.empty()){ V[k++] = S.top(); S.pop(); } }
0aa97968a156098d15420cc798c34073e44a7a07.cu
#include "MCQ.h" #include <iostream> #include <algorithm> #include <stack> MCQ::MCQ(int n, std::vector<std::vector<int> > A, std::vector<int> degree, int style): MC(n, A, degree){ // number of nodes in the graph this->n = n; // equals 1 if an edge exists this->A = A; // the number of vertices adjacent to vertex i this->degree = degree; /* for(int& i: degree) std::cout << i << " "; std::cout << std::endl; */ // largest clique found so far nodes = maxSize = 0; timeLimit = -1; // flag to customise the algorithm with respect to ordering of the vertices this->style = style; // largest clique found solution.resize(n); } MCQ::~MCQ(){ } // find the largest clique or terminates after time limit void MCQ::search(){ gettimeofday(&tod1, NULL); nodes = 0; // contains vertices i+1 and used when sorting vertices by their color colorClass.resize(n); // current clique found std::vector<int> C; C.reserve(n); // vertices that may be added to growing clique (candidate set) std::vector<int> P(n); // init to have all vertices as possible candidates for(int i=0; i<n; i++) colorClass[i].resize(n); // order vertices orderVertices(P); /* for(int& p: P) std::cout << p << " "; std::cout << std::endl; */ expand(C, P); } void MCQ::expand(std::vector<int> C, std::vector<int> P){ int w; // see if the time limit has been reached gettimeofday(&tod2, NULL); if(timeLimit > 0 && todiff(&tod2, &tod1)/1000 >= timeLimit) return; // count the size of the backtrack search tree explored nodes++; int m = P.size(); int color[m]; numberSort(C, P, P, color); /* std::cout << "Color: "; for(int i=0; i<m; i++){ std::cout << P[i] << " "; } std::cout << std::endl; */ // iterate over the candidate set for(int i=P.size()-1; i>= 0; i--){ //std::cout << "Loop:" << i << " Max: " << maxSize << std::endl; //timeval t1, t2; //gettimeofday(&t1, NULL); /* // print C and P std::cout << "C: "; for(auto& w: C){ std::cout << w << " "; } std::cout << "\t"; std::cout << "P: "; for(auto& w: P){ std::cout << w << " "; } std::cout << std::endl; */ // return if clique cannot grow large enough to be maximum clique if(C.size() + color[i] <= maxSize) return; // select a vertex from P and add it to the current clique int v = P[i]; C.push_back(v); //std::cout << "V: " << v << std::endl; // newP is the set of vertices in P that are adjacent to vertex v // all vertices in newP are adjacent to all vertices in C and all pairs of vertices in C ar adjacent std::vector<int> newP; newP.reserve(i); for(int j=0; j<=i; j++){ w = P[j]; if(A[w][v] == 1){ newP.push_back(w); } } // if newP is empty is is maximal, so stop searching and save it if it is maximum if(newP.empty() && C.size() > maxSize){ //std::cout << "Saving" << std::endl; saveSolution(C); } // else recursively continue search else if(!newP.empty()){ //std::cout << "Expanding" << std::endl; expand(C, newP); } // remove v from P and C when returning C.pop_back(); P.pop_back(); //gettimeofday(&t2, NULL); //std::cout << todiff(&t2, &t1)/1000 << std::endl; } } void MCQ::orderVertices(std::vector<int>& verts){ // create the vertices to sort std::vector<Vertex> V(n); for(int i=0; i<n; i++){ V[i].index = i; V[i].degree = degree[i]; } // calculate the sum of the neighboring degrees for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ if(A[i][j] == 1) V[i].setNebDeg(V[i].getNebDeg() + degree[j]); } } /* std::cout << "NebDeg: "; for(Vertex& v: V) std::cout << v.getNebDeg() << " "; std::cout << std::endl; */ // order based on style passed in switch(style){ case 1: // order by non-increasing degree, tie-breaking on index std::sort(V.begin(), V.end(), Vertex::VertexCmp); break; case 2: // order by minimum width order minWidthOrder(V); break; case 3: // known as MCR // order by non-increasing degree, tie-greaking on sum of the neighborhood // degree nubDeg and then on index std::sort(V.begin(), V.end(), Vertex::MCRComparator); break; } for(int i=0; i<V.size(); i++){ verts[i] = V[i].index; } } bool MCQ::conflicts(int v, std::vector<int> cClass){ // return true if pass in vertex is adjacent to any of the vertices in color class for(int i=0; i<cClass.size(); i++){ int w = cClass[i]; if(A[v][w] == 1) return true; } // return false otherwise return false; } void MCQ::numberSort(std::vector<int> C, std::vector<int> colOrd, std::vector<int>& P, int color[]){ // records the number of colors used int colors = 0; int m = colOrd.size(); // clear out the color classes that might be used for(int i=0; i<m; i++){ colorClass[i].clear(); } // vertices are selected from colOrd and placed into first color class in which there are no conflicts for(int i=0; i<m; i++){ int v = colOrd[i]; int k = 0; // vertice in colorClass are not pair wise adjacent and have same color (independent set) while (conflicts(v, colorClass[k])) k++; colorClass[k].push_back(v); colors = std::max(colors, k+1); } /* std::cout << "ColorClasses: " << std::endl; for(int i=0; i<colors; i++){ std::cout << "C" << i << " "; for(int j=0; j<colorClass[i].size(); j++){ std::cout << colorClass[i][j] << " "; } std::cout << std::endl; } */ // pidgeon hole sort P.clear(); int i = 0; for(int k=0; k<colors; k++){ for(int j=0; j<colorClass[k].size(); j++){ int v = colorClass[k][j]; P.push_back(v); color[i++] = k+1; } } /* std::cout << "Pidgeon Hole: "; for(int& v: P) std::cout << v << " "; std::cout << std::endl; std::cout << "Colors: "; for(int i=0; i<m; i++) std::cout << color[i] << " "; std::cout << std::endl; */ } void MCQ::minWidthOrder(std::vector<Vertex>& V){ std::vector<Vertex> L; std::stack<Vertex> S; for(int i=0; i<V.size(); i++){ L.push_back(V[i]); } while(!L.empty()){ // select vertex with smallest degree and store in v int pos = 0; Vertex v = L[0]; for(int i=0; i<L.size(); i++){ if(L[i].degree < v.degree){ v = L[i]; pos = i; } } // push v onto stack and remove from L S.push(v); L.erase(L.begin()+pos); // reduce degree of all vertices in L that are adjacent to v for(Vertex& u : L){ if(A[u.index][v.index] == 1) u.degree--; } } // pop of stack and place onto V giving minimum width ordering int k = 0; while(!S.empty()){ V[k++] = S.top(); S.pop(); } }
71a730a232d2000f290a3ac597ae5c8d93b6f88a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdio.h> #include <sys/time.h> #include <hip/hip_runtime.h> using namespace std; #define CUDA_CHECK_RETURN(value) {\ hipError_t _m_cudaStat = value;\ if (_m_cudaStat != hipSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n", hipGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }} __global__ void dgemm(float *A, float *B, float *C, int threads_per_block, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.0f; int ia = n * (threads_per_block * by + ty); // A int ib = threads_per_block * bx + tx; // B int ic = ia + ib; // C for (int k = 0; k < n; k++) sum += A[ia + k] * B[ib + k * n]; C[ic] = sum; } void InitMatrix(float *A, float *B, float *C, int size) { for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) { int k = size * i + j; A[k] = rand(); B[k] = rand(); C[k] = 0.0; } } void printMatrix(float *C, int size) { for (int i = 0; i < size * size; i++) cout << C[i] << "\t"; cout << endl; } double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } int main(int argc, char* argv[]) { if (argc != 4) { cout << "launch parametrs: [matrix size] [threads_x] [threads_y]" << endl; return 1; } int size = atoi(argv[1]); int threads_per_block_x = atoi(argv[2]); int threads_per_block_y = atoi(argv[3]); srand(time(NULL)); float *A = new float[size * size]; float *B = new float[size * size]; float *C = new float[size * size]; float *dev_A, *dev_B, *dev_C; hipMalloc((void**)&dev_A, size * size * sizeof(float)); hipMalloc((void**)&dev_B, size * size * sizeof(float)); hipMalloc((void**)&dev_C, size * size * sizeof(float)); InitMatrix(A, B, C, size); dim3 threads(threads_per_block_x, threads_per_block_y); dim3 blocks(size / threads.x, size / threads.y); hipMemcpy(dev_A, A, size * size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_B, B, size * size * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_C, C, size * size * sizeof(float), hipMemcpyHostToDevice); float elapsedTime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( dgemm) , dim3(blocks), dim3(threads) , 0, 0, dev_A, dev_B, dev_C, threads_per_block_x, size); hipEventRecord(stop, 0); hipEventSynchronize(stop); CUDA_CHECK_RETURN(hipDeviceSynchronize()); CUDA_CHECK_RETURN(hipGetLastError()); hipEventElapsedTime(&elapsedTime, start, stop); hipMemcpy(C, dev_C, size * size * sizeof(float), hipMemcpyDeviceToHost); cout << "time: " << elapsedTime << " ms" << endl; printMatrix(C, size); delete [] A; delete [] B; delete [] C; hipEventDestroy(start); hipEventDestroy(stop); hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); return 0; }
71a730a232d2000f290a3ac597ae5c8d93b6f88a.cu
#include <iostream> #include <stdio.h> #include <sys/time.h> #include <cuda.h> using namespace std; #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }} __global__ void dgemm(float *A, float *B, float *C, int threads_per_block, int n) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; float sum = 0.0f; int ia = n * (threads_per_block * by + ty); // Номер строки из A int ib = threads_per_block * bx + tx; // Номер столбца из B int ic = ia + ib; // Номер элемента из C for (int k = 0; k < n; k++) sum += A[ia + k] * B[ib + k * n]; C[ic] = sum; } void InitMatrix(float *A, float *B, float *C, int size) { for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) { int k = size * i + j; A[k] = rand(); B[k] = rand(); C[k] = 0.0; } } void printMatrix(float *C, int size) { for (int i = 0; i < size * size; i++) cout << C[i] << "\t"; cout << endl; } double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } int main(int argc, char* argv[]) { if (argc != 4) { cout << "launch parametrs: [matrix size] [threads_x] [threads_y]" << endl; return 1; } int size = atoi(argv[1]); int threads_per_block_x = atoi(argv[2]); int threads_per_block_y = atoi(argv[3]); srand(time(NULL)); float *A = new float[size * size]; float *B = new float[size * size]; float *C = new float[size * size]; float *dev_A, *dev_B, *dev_C; cudaMalloc((void**)&dev_A, size * size * sizeof(float)); cudaMalloc((void**)&dev_B, size * size * sizeof(float)); cudaMalloc((void**)&dev_C, size * size * sizeof(float)); InitMatrix(A, B, C, size); dim3 threads(threads_per_block_x, threads_per_block_y); dim3 blocks(size / threads.x, size / threads.y); cudaMemcpy(dev_A, A, size * size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, size * size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_C, C, size * size * sizeof(float), cudaMemcpyHostToDevice); float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); dgemm <<< blocks, threads >>> (dev_A, dev_B, dev_C, threads_per_block_x, size); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaDeviceSynchronize()); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime, start, stop); cudaMemcpy(C, dev_C, size * size * sizeof(float), cudaMemcpyDeviceToHost); cout << "time: " << elapsedTime << " ms" << endl; printMatrix(C, size); delete [] A; delete [] B; delete [] C; cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); return 0; }
803912254a71824441878b5afc65973a91d00e29.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //-------------------------------------------------------- // Multitask Network Cascade // Written by Haozhi Qi // Copyright (c) 2016, Haozhi Qi // Licensed under The MIT License [see LICENSE for details] // -------------------------------------------------------- #include "gpu_mv.hpp" #include <vector> #include <iostream> const int CAFFE_CUDA_NUM_THREADS = 512; const float BINARIZE_THRESH = 0.4; inline int CAFFE_GET_BLOCKS(const int N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; } #define CUDA_POST_KERNEL_CHECK CUDA_CHECK(hipPeekAtLastError()) #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) __global__ void mask_render(const int nthreads, const float* input_box, const float* input_mask, const int box_dim, const int mask_size, const int image_height, const int image_width, float* target_buffer) { CUDA_KERNEL_LOOP(index, nthreads) { // target buffer's size if (n * h * w) int w = index % image_width; int h = (index / image_width) % image_height; int n = index / image_width / image_height; // get the n-th boxes const float* offset_box = input_box + n * box_dim; const float* offset_mask = input_mask + n * mask_size * mask_size; const float box_x1 = offset_box[0]; const float box_y1 = offset_box[1]; const float box_x2 = offset_box[2]; const float box_y2 = offset_box[3]; // check whether pixel is out of box bound if (w < box_x1 || w > box_x2 || h < box_y1 || h > box_y2) { target_buffer[index] = 0.0; continue; } const float box_width = box_x2 - box_x1 + 1.0; const float box_height = box_y2 - box_y1 + 1.0; const float ratio_w = (float) mask_size / box_width; const float ratio_h = (float) mask_size / box_height; const float inverse_x = ((float)w - box_x1) * ratio_w; const float inverse_y = ((float)h - box_y1) * ratio_h; // do bilinear interpolation int start_x = floor(inverse_x); int start_y = floor(inverse_y); if (start_x == mask_size - 1 && start_y == mask_size - 1) { target_buffer[index] = offset_mask[mask_size * mask_size - 1]; } else if (start_x == mask_size - 1 && start_y != mask_size - 1) { target_buffer[index] = offset_mask[start_y * mask_size + start_x]; } else if (start_x != mask_size - 1 && start_y == mask_size - 1) { target_buffer[index] = offset_mask[start_y * mask_size + start_x]; } else { int top_left_ind = start_y * mask_size + start_x; int top_right_ind = top_left_ind + 1; int bot_left_ind = top_left_ind + mask_size; int bot_right_ind = bot_left_ind + 1; float top_left_weight = (1 - (inverse_x - start_x)) * (1 - (inverse_y - start_y)); float top_right_weight = (inverse_x - start_x) * (1 - (inverse_y - start_y)); float bot_left_weight = (1 - (inverse_x - start_x)) * (inverse_y - start_y); float bot_right_weight = (inverse_x - start_x) * (inverse_y - start_y); float val = top_left_weight * offset_mask[top_left_ind] + top_right_weight * offset_mask[top_right_ind] + bot_left_weight * offset_mask[bot_left_ind] + bot_right_weight * offset_mask[bot_right_ind]; target_buffer[index] = val; } } } __global__ void mask_aggregate(const int nthreads, const float* render_mask, float* aggregate_mask, const int* candidate_inds, const int* candidate_starts, const float* candidate_weights, const int image_height, const int image_width) { // render_mask: num_boxes * image_height * image_width // aggregate_mask: output_num * image_height * image_width CUDA_KERNEL_LOOP(index, nthreads) { int w = index % image_width; int h = (index / image_width) % image_height; int n = index / image_width / image_height; // get candidate_inds, candidate_start int candidate_start = (n == 0) ? 0 : candidate_starts[n-1]; int candidate_end = candidate_starts[n]; // output value will be summation of (mask * mask_weight) float val = 0.0; for (int i = candidate_start; i < candidate_end; ++i) { int input_mask_ind = candidate_inds[i]; int offset_render_mask = (input_mask_ind * image_height + h) * image_width + w; val += (render_mask[offset_render_mask] * candidate_weights[i]); } aggregate_mask[index] = val; } } __global__ void reduce_mask_col(const int nthreads, const float* masks, int image_height, int image_width, bool* output_buffer) { // nthreads will be output_num * image_width CUDA_KERNEL_LOOP(index, nthreads) { int w = index % image_width; int n = index / image_width; output_buffer[index] = false; for (int i = 0; i < image_height; ++i) { if (masks[(n * image_height + i) * image_width + w] > BINARIZE_THRESH) { output_buffer[index] = true; break; } } } } __global__ void reduce_mask_row(const int nthreads, const float* masks, int image_height, int image_width, bool* output_buffer) { // nthreads will be output_num * image_width CUDA_KERNEL_LOOP(index, nthreads) { int h = index % image_height; int n = index / image_height; output_buffer[index] = false; for (int i = 0; i < image_width; ++i) { if (masks[(n * image_height + h) * image_width + i] > BINARIZE_THRESH) { output_buffer[index] = true; break; } } } } __global__ void reduce_bounding_x(const int nthreads, const bool* reduced_col, int* output_buffer, const int image_width) { // nthreads will be output_num * 2 CUDA_KERNEL_LOOP(index, nthreads) { int x = index % 2; int n = index / 2; output_buffer[index] = image_width / 2; if (x == 0) { for (int i = 0; i < image_width; ++i) { if (reduced_col[n * image_width + i]) { output_buffer[index] = i; break; } } } else { for (int i = image_width - 1; i >= 0; --i) { if (reduced_col[n * image_width + i]) { output_buffer[index] = i; break; } } } } } __global__ void reduce_bounding_y(const int nthreads, const bool* reduced_row, int* output_buffer, const int image_height) { // nthreads will be output_num * 2 CUDA_KERNEL_LOOP(index, nthreads) { int x = index % 2; int n = index / 2; output_buffer[index] = image_height / 2; if (x == 0) { for (int i = 0; i < image_height; ++i) { if (reduced_row[n * image_height + i]) { output_buffer[index] = i; break; } } } else { for (int i = image_height - 1; i >= 0; --i) { if (reduced_row[n * image_height + i]) { output_buffer[index] = i; break; } } } } } __global__ void mask_resize(const int nthreads, const float* original_mask, const int* bounding_x, const int* bounding_y, float* resized_mask, const int mask_size, const int image_height, const int image_width) { // output size should be result_num * mask_size * mask_size // original_mask should be result_num * image_height * image_width // bounding_x should be result_num * 2 // bounding_y should be result_num * 2 CUDA_KERNEL_LOOP(index, nthreads) { int w = index % mask_size; int h = (index / mask_size) % mask_size; int n = index / mask_size / mask_size; int bbox_x1 = bounding_x[n * 2]; int bbox_x2 = bounding_x[n * 2 + 1]; int bbox_y1 = bounding_y[n * 2]; int bbox_y2 = bounding_y[n * 2 + 1]; float bbox_width = bbox_x2 - bbox_x1 + 1.0; float bbox_height = bbox_y2 - bbox_y1 + 1.0; float ratio_w = bbox_width / static_cast<float>(mask_size); float ratio_h = bbox_height / static_cast<float>(mask_size); float inverse_x = bbox_x1 + static_cast<float>(w) * ratio_w; float inverse_y = bbox_y1 + static_cast<float>(h) * ratio_h; int start_x = floor(inverse_x); int start_y = floor(inverse_y); const float* offset_mask = original_mask + n * image_height * image_width; if (start_x == image_width - 1 && start_y == image_height - 1) { resized_mask[index] = offset_mask[image_width * image_height - 1]; } else if (start_x == image_width - 1 && start_y != image_height - 1) { resized_mask[index] = offset_mask[start_y * image_width + start_x]; } else if (start_x != image_width - 1 && start_y == image_height - 1) { resized_mask[index] = offset_mask[start_y * image_width+ start_x]; } else { int top_left_ind = (n * image_height + start_y) * image_width + start_x; int top_right_ind = top_left_ind + 1; int bot_left_ind = top_left_ind + image_width; int bot_right_ind = bot_left_ind + 1; float top_left_weight = (1 - (inverse_x - start_x)) * (1 - (inverse_y - start_y)); float top_right_weight = (inverse_x - start_x) * (1 - (inverse_y - start_y)); float bot_left_weight = (1 - (inverse_x - start_x)) * (inverse_y - start_y); float bot_right_weight = (inverse_x - start_x) * (inverse_y - start_y); float val = top_left_weight * original_mask[top_left_ind] + top_right_weight * original_mask[top_right_ind] + bot_left_weight * original_mask[bot_left_ind] + bot_right_weight * original_mask[bot_right_ind]; resized_mask[index] = val; } } } void _mv(const float* all_boxes, const float* all_masks, const int all_boxes_num, const int* candidate_inds, const int* candidate_start, const float* candidate_weights, const int candidate_num, const int image_height, const int image_width, const int box_dim, const int mask_size, const int result_num, float* finalize_output_mask, int* finalize_output_box, const int device_id) { // allocate device memory float* dev_boxes = NULL; float* dev_masks = NULL; int* dev_candidate_inds = NULL; float* dev_candidate_weights = NULL; int* dev_candidate_start = NULL; CUDA_CHECK(hipMalloc(&dev_boxes, all_boxes_num * box_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(dev_boxes, all_boxes, all_boxes_num * box_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&dev_masks, all_boxes_num * mask_size * mask_size * sizeof(float))); CUDA_CHECK(hipMemcpy(dev_masks, all_masks, all_boxes_num * mask_size * mask_size * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&dev_candidate_inds, candidate_num * sizeof(int))); CUDA_CHECK(hipMemcpy(dev_candidate_inds, candidate_inds, candidate_num * sizeof(int), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&dev_candidate_weights, candidate_num * sizeof(int))); CUDA_CHECK(hipMemcpy(dev_candidate_weights, candidate_weights, candidate_num * sizeof(int), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&dev_candidate_start, result_num * sizeof(int))); CUDA_CHECK(hipMemcpy(dev_candidate_start, candidate_start, result_num * sizeof(int), hipMemcpyHostToDevice)); // 1. Masks are of size mask_size x mask_size, to do aggregation // first resize them to image scale (image_height x image_width) // result n x image_height x image_width buffer const int render_mask_num = all_boxes_num * image_height * image_width; float* dev_render_mask = NULL; CUDA_CHECK(hipMalloc(&dev_render_mask, render_mask_num * sizeof(float))); hipLaunchKernelGGL(( mask_render), dim3(CAFFE_GET_BLOCKS(render_mask_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, render_mask_num, dev_boxes, dev_masks, box_dim, mask_size, image_height, image_width, dev_render_mask); CUDA_POST_KERNEL_CHECK; // 2. After we get above buffer, we need to merge certain masks // to get new masks according to candidate_weights and candidate_inds // new_mask = \sum (old_mask * old_mask_weight) const int output_mask_num = result_num * image_height * image_width; float* dev_output_mask = NULL; CUDA_CHECK(hipMalloc(&dev_output_mask, output_mask_num * sizeof(float))); hipLaunchKernelGGL(( mask_aggregate), dim3(CAFFE_GET_BLOCKS(output_mask_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, output_mask_num, dev_render_mask, dev_output_mask, dev_candidate_inds, dev_candidate_start, dev_candidate_weights, image_height, image_width); CUDA_POST_KERNEL_CHECK; // 3. After we get new masks buffer (result_num * image_height * image_width) // we then find the mask boundary, this is achieved by two reduction operation // then the tight mask boundary can be obtained int reduced_col_num = result_num * image_width; bool* reduced_col_buffer = NULL; CUDA_CHECK(hipMalloc(&reduced_col_buffer, reduced_col_num * sizeof(bool))); hipLaunchKernelGGL(( reduce_mask_col), dim3(CAFFE_GET_BLOCKS(reduced_col_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_col_num, dev_output_mask, image_height, image_width, reduced_col_buffer); int reduced_bound_x_num = result_num * 2; int* reduced_bound_x = NULL; CUDA_CHECK(hipMalloc(&reduced_bound_x, reduced_bound_x_num * sizeof(int))); hipLaunchKernelGGL(( reduce_bounding_x), dim3(CAFFE_GET_BLOCKS(reduced_bound_x_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_bound_x_num, reduced_col_buffer, reduced_bound_x, image_width); // find vertical boundary int reduced_row_num = result_num * image_height; bool* reduced_row_buffer = NULL; CUDA_CHECK(hipMalloc(&reduced_row_buffer, reduced_row_num * sizeof(bool))); hipLaunchKernelGGL(( reduce_mask_row), dim3(CAFFE_GET_BLOCKS(reduced_row_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_row_num, dev_output_mask, image_height, image_width, reduced_row_buffer); int reduced_bound_y_num = result_num * 2; int* reduced_bound_y = NULL; CUDA_CHECK(hipMalloc(&reduced_bound_y, reduced_bound_y_num * sizeof(int))); hipLaunchKernelGGL(( reduce_bounding_y), dim3(CAFFE_GET_BLOCKS(reduced_bound_y_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, reduced_bound_y_num, reduced_row_buffer, reduced_bound_y, image_height); // 4. Once we get tight mask boundary, we could use it to resize masks back // to mask_size x mask_size float resized_mask_num = result_num * mask_size * mask_size; float* resized_mask = NULL; CUDA_CHECK(hipMalloc(&resized_mask, resized_mask_num * sizeof(float))); hipLaunchKernelGGL(( mask_resize), dim3(CAFFE_GET_BLOCKS(resized_mask_num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, resized_mask_num, dev_output_mask, reduced_bound_x, reduced_bound_y, resized_mask, mask_size, image_height, image_width); // copy back boxes to cpu int* cpu_bound_x = (int*) malloc(reduced_bound_x_num * sizeof(int)); hipMemcpy(cpu_bound_x, reduced_bound_x, reduced_bound_x_num * sizeof(int), hipMemcpyDeviceToHost); int* cpu_bound_y = (int*) malloc(reduced_bound_y_num * sizeof(int)); hipMemcpy(cpu_bound_y, reduced_bound_y, reduced_bound_y_num * sizeof(int), hipMemcpyDeviceToHost); int cnt = 0; for (int i = 0; i < result_num; i ++) { finalize_output_box[i*4] = cpu_bound_x[cnt]; finalize_output_box[i*4+1] = cpu_bound_y[cnt]; finalize_output_box[i*4+2] = cpu_bound_x[cnt+1]; finalize_output_box[i*4+3] = cpu_bound_y[cnt+1]; cnt += 2; } // copy back masks to cpu CUDA_CHECK(hipMemcpy(finalize_output_mask, resized_mask, resized_mask_num * sizeof(float), hipMemcpyDeviceToHost)); // free gpu memories CUDA_CHECK(hipFree(dev_boxes)); CUDA_CHECK(hipFree(dev_masks)); CUDA_CHECK(hipFree(dev_candidate_inds)); CUDA_CHECK(hipFree(dev_candidate_start)); CUDA_CHECK(hipFree(dev_candidate_weights)); CUDA_CHECK(hipFree(dev_render_mask)); CUDA_CHECK(hipFree(resized_mask)); CUDA_CHECK(hipFree(dev_output_mask)); CUDA_CHECK(hipFree(reduced_col_buffer)); CUDA_CHECK(hipFree(reduced_bound_x)); CUDA_CHECK(hipFree(reduced_row_buffer)); CUDA_CHECK(hipFree(reduced_bound_y)); }
803912254a71824441878b5afc65973a91d00e29.cu
//-------------------------------------------------------- // Multitask Network Cascade // Written by Haozhi Qi // Copyright (c) 2016, Haozhi Qi // Licensed under The MIT License [see LICENSE for details] // -------------------------------------------------------- #include "gpu_mv.hpp" #include <vector> #include <iostream> const int CAFFE_CUDA_NUM_THREADS = 512; const float BINARIZE_THRESH = 0.4; inline int CAFFE_GET_BLOCKS(const int N) { return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; } #define CUDA_POST_KERNEL_CHECK CUDA_CHECK(cudaPeekAtLastError()) #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) __global__ void mask_render(const int nthreads, const float* input_box, const float* input_mask, const int box_dim, const int mask_size, const int image_height, const int image_width, float* target_buffer) { CUDA_KERNEL_LOOP(index, nthreads) { // target buffer's size if (n * h * w) int w = index % image_width; int h = (index / image_width) % image_height; int n = index / image_width / image_height; // get the n-th boxes const float* offset_box = input_box + n * box_dim; const float* offset_mask = input_mask + n * mask_size * mask_size; const float box_x1 = offset_box[0]; const float box_y1 = offset_box[1]; const float box_x2 = offset_box[2]; const float box_y2 = offset_box[3]; // check whether pixel is out of box bound if (w < box_x1 || w > box_x2 || h < box_y1 || h > box_y2) { target_buffer[index] = 0.0; continue; } const float box_width = box_x2 - box_x1 + 1.0; const float box_height = box_y2 - box_y1 + 1.0; const float ratio_w = (float) mask_size / box_width; const float ratio_h = (float) mask_size / box_height; const float inverse_x = ((float)w - box_x1) * ratio_w; const float inverse_y = ((float)h - box_y1) * ratio_h; // do bilinear interpolation int start_x = floor(inverse_x); int start_y = floor(inverse_y); if (start_x == mask_size - 1 && start_y == mask_size - 1) { target_buffer[index] = offset_mask[mask_size * mask_size - 1]; } else if (start_x == mask_size - 1 && start_y != mask_size - 1) { target_buffer[index] = offset_mask[start_y * mask_size + start_x]; } else if (start_x != mask_size - 1 && start_y == mask_size - 1) { target_buffer[index] = offset_mask[start_y * mask_size + start_x]; } else { int top_left_ind = start_y * mask_size + start_x; int top_right_ind = top_left_ind + 1; int bot_left_ind = top_left_ind + mask_size; int bot_right_ind = bot_left_ind + 1; float top_left_weight = (1 - (inverse_x - start_x)) * (1 - (inverse_y - start_y)); float top_right_weight = (inverse_x - start_x) * (1 - (inverse_y - start_y)); float bot_left_weight = (1 - (inverse_x - start_x)) * (inverse_y - start_y); float bot_right_weight = (inverse_x - start_x) * (inverse_y - start_y); float val = top_left_weight * offset_mask[top_left_ind] + top_right_weight * offset_mask[top_right_ind] + bot_left_weight * offset_mask[bot_left_ind] + bot_right_weight * offset_mask[bot_right_ind]; target_buffer[index] = val; } } } __global__ void mask_aggregate(const int nthreads, const float* render_mask, float* aggregate_mask, const int* candidate_inds, const int* candidate_starts, const float* candidate_weights, const int image_height, const int image_width) { // render_mask: num_boxes * image_height * image_width // aggregate_mask: output_num * image_height * image_width CUDA_KERNEL_LOOP(index, nthreads) { int w = index % image_width; int h = (index / image_width) % image_height; int n = index / image_width / image_height; // get candidate_inds, candidate_start int candidate_start = (n == 0) ? 0 : candidate_starts[n-1]; int candidate_end = candidate_starts[n]; // output value will be summation of (mask * mask_weight) float val = 0.0; for (int i = candidate_start; i < candidate_end; ++i) { int input_mask_ind = candidate_inds[i]; int offset_render_mask = (input_mask_ind * image_height + h) * image_width + w; val += (render_mask[offset_render_mask] * candidate_weights[i]); } aggregate_mask[index] = val; } } __global__ void reduce_mask_col(const int nthreads, const float* masks, int image_height, int image_width, bool* output_buffer) { // nthreads will be output_num * image_width CUDA_KERNEL_LOOP(index, nthreads) { int w = index % image_width; int n = index / image_width; output_buffer[index] = false; for (int i = 0; i < image_height; ++i) { if (masks[(n * image_height + i) * image_width + w] > BINARIZE_THRESH) { output_buffer[index] = true; break; } } } } __global__ void reduce_mask_row(const int nthreads, const float* masks, int image_height, int image_width, bool* output_buffer) { // nthreads will be output_num * image_width CUDA_KERNEL_LOOP(index, nthreads) { int h = index % image_height; int n = index / image_height; output_buffer[index] = false; for (int i = 0; i < image_width; ++i) { if (masks[(n * image_height + h) * image_width + i] > BINARIZE_THRESH) { output_buffer[index] = true; break; } } } } __global__ void reduce_bounding_x(const int nthreads, const bool* reduced_col, int* output_buffer, const int image_width) { // nthreads will be output_num * 2 CUDA_KERNEL_LOOP(index, nthreads) { int x = index % 2; int n = index / 2; output_buffer[index] = image_width / 2; if (x == 0) { for (int i = 0; i < image_width; ++i) { if (reduced_col[n * image_width + i]) { output_buffer[index] = i; break; } } } else { for (int i = image_width - 1; i >= 0; --i) { if (reduced_col[n * image_width + i]) { output_buffer[index] = i; break; } } } } } __global__ void reduce_bounding_y(const int nthreads, const bool* reduced_row, int* output_buffer, const int image_height) { // nthreads will be output_num * 2 CUDA_KERNEL_LOOP(index, nthreads) { int x = index % 2; int n = index / 2; output_buffer[index] = image_height / 2; if (x == 0) { for (int i = 0; i < image_height; ++i) { if (reduced_row[n * image_height + i]) { output_buffer[index] = i; break; } } } else { for (int i = image_height - 1; i >= 0; --i) { if (reduced_row[n * image_height + i]) { output_buffer[index] = i; break; } } } } } __global__ void mask_resize(const int nthreads, const float* original_mask, const int* bounding_x, const int* bounding_y, float* resized_mask, const int mask_size, const int image_height, const int image_width) { // output size should be result_num * mask_size * mask_size // original_mask should be result_num * image_height * image_width // bounding_x should be result_num * 2 // bounding_y should be result_num * 2 CUDA_KERNEL_LOOP(index, nthreads) { int w = index % mask_size; int h = (index / mask_size) % mask_size; int n = index / mask_size / mask_size; int bbox_x1 = bounding_x[n * 2]; int bbox_x2 = bounding_x[n * 2 + 1]; int bbox_y1 = bounding_y[n * 2]; int bbox_y2 = bounding_y[n * 2 + 1]; float bbox_width = bbox_x2 - bbox_x1 + 1.0; float bbox_height = bbox_y2 - bbox_y1 + 1.0; float ratio_w = bbox_width / static_cast<float>(mask_size); float ratio_h = bbox_height / static_cast<float>(mask_size); float inverse_x = bbox_x1 + static_cast<float>(w) * ratio_w; float inverse_y = bbox_y1 + static_cast<float>(h) * ratio_h; int start_x = floor(inverse_x); int start_y = floor(inverse_y); const float* offset_mask = original_mask + n * image_height * image_width; if (start_x == image_width - 1 && start_y == image_height - 1) { resized_mask[index] = offset_mask[image_width * image_height - 1]; } else if (start_x == image_width - 1 && start_y != image_height - 1) { resized_mask[index] = offset_mask[start_y * image_width + start_x]; } else if (start_x != image_width - 1 && start_y == image_height - 1) { resized_mask[index] = offset_mask[start_y * image_width+ start_x]; } else { int top_left_ind = (n * image_height + start_y) * image_width + start_x; int top_right_ind = top_left_ind + 1; int bot_left_ind = top_left_ind + image_width; int bot_right_ind = bot_left_ind + 1; float top_left_weight = (1 - (inverse_x - start_x)) * (1 - (inverse_y - start_y)); float top_right_weight = (inverse_x - start_x) * (1 - (inverse_y - start_y)); float bot_left_weight = (1 - (inverse_x - start_x)) * (inverse_y - start_y); float bot_right_weight = (inverse_x - start_x) * (inverse_y - start_y); float val = top_left_weight * original_mask[top_left_ind] + top_right_weight * original_mask[top_right_ind] + bot_left_weight * original_mask[bot_left_ind] + bot_right_weight * original_mask[bot_right_ind]; resized_mask[index] = val; } } } void _mv(const float* all_boxes, const float* all_masks, const int all_boxes_num, const int* candidate_inds, const int* candidate_start, const float* candidate_weights, const int candidate_num, const int image_height, const int image_width, const int box_dim, const int mask_size, const int result_num, float* finalize_output_mask, int* finalize_output_box, const int device_id) { // allocate device memory float* dev_boxes = NULL; float* dev_masks = NULL; int* dev_candidate_inds = NULL; float* dev_candidate_weights = NULL; int* dev_candidate_start = NULL; CUDA_CHECK(cudaMalloc(&dev_boxes, all_boxes_num * box_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(dev_boxes, all_boxes, all_boxes_num * box_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&dev_masks, all_boxes_num * mask_size * mask_size * sizeof(float))); CUDA_CHECK(cudaMemcpy(dev_masks, all_masks, all_boxes_num * mask_size * mask_size * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&dev_candidate_inds, candidate_num * sizeof(int))); CUDA_CHECK(cudaMemcpy(dev_candidate_inds, candidate_inds, candidate_num * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&dev_candidate_weights, candidate_num * sizeof(int))); CUDA_CHECK(cudaMemcpy(dev_candidate_weights, candidate_weights, candidate_num * sizeof(int), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&dev_candidate_start, result_num * sizeof(int))); CUDA_CHECK(cudaMemcpy(dev_candidate_start, candidate_start, result_num * sizeof(int), cudaMemcpyHostToDevice)); // 1. Masks are of size mask_size x mask_size, to do aggregation // first resize them to image scale (image_height x image_width) // result n x image_height x image_width buffer const int render_mask_num = all_boxes_num * image_height * image_width; float* dev_render_mask = NULL; CUDA_CHECK(cudaMalloc(&dev_render_mask, render_mask_num * sizeof(float))); mask_render<<<CAFFE_GET_BLOCKS(render_mask_num), CAFFE_CUDA_NUM_THREADS>>> (render_mask_num, dev_boxes, dev_masks, box_dim, mask_size, image_height, image_width, dev_render_mask); CUDA_POST_KERNEL_CHECK; // 2. After we get above buffer, we need to merge certain masks // to get new masks according to candidate_weights and candidate_inds // new_mask = \sum (old_mask * old_mask_weight) const int output_mask_num = result_num * image_height * image_width; float* dev_output_mask = NULL; CUDA_CHECK(cudaMalloc(&dev_output_mask, output_mask_num * sizeof(float))); mask_aggregate<<<CAFFE_GET_BLOCKS(output_mask_num), CAFFE_CUDA_NUM_THREADS>>> (output_mask_num, dev_render_mask, dev_output_mask, dev_candidate_inds, dev_candidate_start, dev_candidate_weights, image_height, image_width); CUDA_POST_KERNEL_CHECK; // 3. After we get new masks buffer (result_num * image_height * image_width) // we then find the mask boundary, this is achieved by two reduction operation // then the tight mask boundary can be obtained int reduced_col_num = result_num * image_width; bool* reduced_col_buffer = NULL; CUDA_CHECK(cudaMalloc(&reduced_col_buffer, reduced_col_num * sizeof(bool))); reduce_mask_col<<<CAFFE_GET_BLOCKS(reduced_col_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_col_num, dev_output_mask, image_height, image_width, reduced_col_buffer); int reduced_bound_x_num = result_num * 2; int* reduced_bound_x = NULL; CUDA_CHECK(cudaMalloc(&reduced_bound_x, reduced_bound_x_num * sizeof(int))); reduce_bounding_x<<<CAFFE_GET_BLOCKS(reduced_bound_x_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_bound_x_num, reduced_col_buffer, reduced_bound_x, image_width); // find vertical boundary int reduced_row_num = result_num * image_height; bool* reduced_row_buffer = NULL; CUDA_CHECK(cudaMalloc(&reduced_row_buffer, reduced_row_num * sizeof(bool))); reduce_mask_row<<<CAFFE_GET_BLOCKS(reduced_row_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_row_num, dev_output_mask, image_height, image_width, reduced_row_buffer); int reduced_bound_y_num = result_num * 2; int* reduced_bound_y = NULL; CUDA_CHECK(cudaMalloc(&reduced_bound_y, reduced_bound_y_num * sizeof(int))); reduce_bounding_y<<<CAFFE_GET_BLOCKS(reduced_bound_y_num), CAFFE_CUDA_NUM_THREADS>>> (reduced_bound_y_num, reduced_row_buffer, reduced_bound_y, image_height); // 4. Once we get tight mask boundary, we could use it to resize masks back // to mask_size x mask_size float resized_mask_num = result_num * mask_size * mask_size; float* resized_mask = NULL; CUDA_CHECK(cudaMalloc(&resized_mask, resized_mask_num * sizeof(float))); mask_resize<<<CAFFE_GET_BLOCKS(resized_mask_num), CAFFE_CUDA_NUM_THREADS>>> (resized_mask_num, dev_output_mask, reduced_bound_x, reduced_bound_y, resized_mask, mask_size, image_height, image_width); // copy back boxes to cpu int* cpu_bound_x = (int*) malloc(reduced_bound_x_num * sizeof(int)); cudaMemcpy(cpu_bound_x, reduced_bound_x, reduced_bound_x_num * sizeof(int), cudaMemcpyDeviceToHost); int* cpu_bound_y = (int*) malloc(reduced_bound_y_num * sizeof(int)); cudaMemcpy(cpu_bound_y, reduced_bound_y, reduced_bound_y_num * sizeof(int), cudaMemcpyDeviceToHost); int cnt = 0; for (int i = 0; i < result_num; i ++) { finalize_output_box[i*4] = cpu_bound_x[cnt]; finalize_output_box[i*4+1] = cpu_bound_y[cnt]; finalize_output_box[i*4+2] = cpu_bound_x[cnt+1]; finalize_output_box[i*4+3] = cpu_bound_y[cnt+1]; cnt += 2; } // copy back masks to cpu CUDA_CHECK(cudaMemcpy(finalize_output_mask, resized_mask, resized_mask_num * sizeof(float), cudaMemcpyDeviceToHost)); // free gpu memories CUDA_CHECK(cudaFree(dev_boxes)); CUDA_CHECK(cudaFree(dev_masks)); CUDA_CHECK(cudaFree(dev_candidate_inds)); CUDA_CHECK(cudaFree(dev_candidate_start)); CUDA_CHECK(cudaFree(dev_candidate_weights)); CUDA_CHECK(cudaFree(dev_render_mask)); CUDA_CHECK(cudaFree(resized_mask)); CUDA_CHECK(cudaFree(dev_output_mask)); CUDA_CHECK(cudaFree(reduced_col_buffer)); CUDA_CHECK(cudaFree(reduced_bound_x)); CUDA_CHECK(cudaFree(reduced_row_buffer)); CUDA_CHECK(cudaFree(reduced_bound_y)); }
6844efdecfeedac819122fb48aa084d908c53a6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* required: https://developer.nvidia.com/cuda-downloads compile: nvcc -shared -o bitonic.dll bitonic.cu */ #include <stdlib.h> #include <stdio.h> __device__ inline void swap(float &a, float &b) { float temp = a; a = b; b = temp; } __global__ void bitonic_sort_core(float *device_array, int length) { // Gets the if of thread and its index in array: int i = threadIdx.x + blockDim.x * blockIdx.x; // The number of iterations = log_2(length) for (int iteration = 2; iteration <= length; iteration <<= 1) { for (int shift = iteration >> 1; shift > 0; shift >>= 1) { // The little hack. The shift is always equals 2^n, so there are two situations: // 1) i = xxxx0xxx // shift = 00001000 // i^shift = xxxx1xxx = i + shift, just like add shift // // 2) i = xxxx1xxx // shift = 00001000 // i^shift = xxxx0xxx = i - shift, i.e. i = j + shift, and then j was processed int i_shifted = i ^ shift; // i_shifted > i is corresponding to the first case: if (i_shifted > i) { // Sets the direction of bitonus subsequence: // 1) for a first iteration the direction is changing every 2th element // 2) for a second iteration the directions is changing every 4th element // ... if ((i & iteration) != 0) { if (device_array[i] < device_array[i_shifted]) { swap(device_array[i], device_array[i_shifted]); } } else { if (device_array[i] > device_array[i_shifted]) { swap(device_array[i], device_array[i_shifted]); } } } __syncthreads(); } } } extern "C" __declspec ( dllexport ) void bitonic_sort(float *memory_array, int length) { float *device_array; size_t size = length * sizeof(float); hipMalloc((void**) &device_array, size); hipMemcpy(device_array, memory_array, size, hipMemcpyHostToDevice); // Calculates the number of threads: int count_threads = min(length, 1024); int count_blocks = length / count_threads; // Inits the dimensions of threads and blocks dim3 blocks(count_blocks,1); dim3 threads(count_threads,1); // Call the device function hipLaunchKernelGGL(( bitonic_sort_core), dim3(blocks), dim3(threads), 0, 0, device_array, length); hipMemcpy(memory_array, device_array, size, hipMemcpyDeviceToHost); hipFree(device_array); }
6844efdecfeedac819122fb48aa084d908c53a6a.cu
/* required: https://developer.nvidia.com/cuda-downloads compile: nvcc -shared -o bitonic.dll bitonic.cu */ #include <stdlib.h> #include <stdio.h> __device__ inline void swap(float &a, float &b) { float temp = a; a = b; b = temp; } __global__ void bitonic_sort_core(float *device_array, int length) { // Gets the if of thread and its index in array: int i = threadIdx.x + blockDim.x * blockIdx.x; // The number of iterations = log_2(length) for (int iteration = 2; iteration <= length; iteration <<= 1) { for (int shift = iteration >> 1; shift > 0; shift >>= 1) { // The little hack. The shift is always equals 2^n, so there are two situations: // 1) i = xxxx0xxx // shift = 00001000 // i^shift = xxxx1xxx = i + shift, just like add shift // // 2) i = xxxx1xxx // shift = 00001000 // i^shift = xxxx0xxx = i - shift, i.e. i = j + shift, and then j was processed int i_shifted = i ^ shift; // i_shifted > i is corresponding to the first case: if (i_shifted > i) { // Sets the direction of bitonus subsequence: // 1) for a first iteration the direction is changing every 2th element // 2) for a second iteration the directions is changing every 4th element // ... if ((i & iteration) != 0) { if (device_array[i] < device_array[i_shifted]) { swap(device_array[i], device_array[i_shifted]); } } else { if (device_array[i] > device_array[i_shifted]) { swap(device_array[i], device_array[i_shifted]); } } } __syncthreads(); } } } extern "C" __declspec ( dllexport ) void bitonic_sort(float *memory_array, int length) { float *device_array; size_t size = length * sizeof(float); cudaMalloc((void**) &device_array, size); cudaMemcpy(device_array, memory_array, size, cudaMemcpyHostToDevice); // Calculates the number of threads: int count_threads = min(length, 1024); int count_blocks = length / count_threads; // Inits the dimensions of threads and blocks dim3 blocks(count_blocks,1); dim3 threads(count_threads,1); // Call the device function bitonic_sort_core<<<blocks, threads>>>(device_array, length); cudaMemcpy(memory_array, device_array, size, cudaMemcpyDeviceToHost); cudaFree(device_array); }
e658257b9da9efe855382c4f8a7ea4d0e06c8eb6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> #include "common/logging.h" using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test) : DataWorker(convNet, data), _test(test) { } // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { double fPropStart = Now(); _convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN); double costStart = Now(); _convNet->getCost(batchCost); double bPropStart = Now(); if (!_test) { _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(); } double done = Now(); // PERIODIC(5, // Log_Info("Processing... batch %d/%d: %.9f fprop, %.9f cost, %.9f bprop, %.9f total", // i, _dp->getNumMinibatches(), // costStart - fPropStart, // bPropStart - costStart, // done - bPropStart, // done - fPropStart)); } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } CopyToGPUWorker::CopyToGPUWorker(ConvNet& convNet) : Worker(convNet) {} void CopyToGPUWorker::run() { _convNet->copyToGPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
e658257b9da9efe855382c4f8a7ea4d0e06c8eb6.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> #include "common/logging.h" using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test) : DataWorker(convNet, data), _test(test) { } // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { double fPropStart = Now(); _convNet->fprop(i, _test ? PASS_TEST : PASS_TRAIN); double costStart = Now(); _convNet->getCost(batchCost); double bPropStart = Now(); if (!_test) { _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(); } double done = Now(); // PERIODIC(5, // Log_Info("Processing... batch %d/%d: %.9f fprop, %.9f cost, %.9f bprop, %.9f total", // i, _dp->getNumMinibatches(), // costStart - fPropStart, // bPropStart - costStart, // done - bPropStart, // done - fPropStart)); } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } CopyToGPUWorker::CopyToGPUWorker(ConvNet& convNet) : Worker(convNet) {} void CopyToGPUWorker::run() { _convNet->copyToGPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
191635168bb279075212b3f9adb9f96a725bd4dd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2015 Patrick Putnam // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "square_host.h" #include <stdint.h> #include "popcount_kernel.h" #include <iostream> __global__ void square_mem( SquareHost::int_type * a, unsigned int N ) { unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x; unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; unsigned int idx = bid * (blockDim.x * blockDim.y) + tid; if( idx < N ) { a[idx] *= a[idx]; } } SquareHost::SquareHost( ) : m_a(NULL) , m_dest(NULL) , m_size(0) , m_capacity(0) , m_maxBlocks(0) , m_maxThreadsPerBlock(0) , m_status(true) , m_gen() { init(); } void SquareHost::init( ) { hiprandStatus_t err = hiprandCreateGenerator( &m_gen, HIPRAND_RNG_PSEUDO_MTGP32 ); if( err != HIPRAND_STATUS_SUCCESS ) { std::cerr << "ERROR: Failed to create generator: " << err << std::endl; m_status = false; return; } } void SquareHost::operator()( unsigned int s, seed_type seed ) { resize( s ); if( !good() ) return; if( hiprandSetPseudoRandomGeneratorSeed( m_gen, seed ) != HIPRAND_STATUS_SUCCESS ) { m_status = false; return; } hiprandGenerate( m_gen, m_dest, s ); unsigned int bcount = (s / 1024); if( s % 1024 ) { ++bcount; } dim3 grid(1, bcount, 1), block(256, 4, 1); hipLaunchKernelGGL(( computeHW), dim3(grid), dim3(block) , 0, 0, m_dest, s ); hipMemcpy( m_a, m_dest, s * sizeof(int_type), hipMemcpyDeviceToHost); // blocks host until copy is complete } void SquareHost::resize( unsigned int s ) { if( !good() ) return; if ( s > m_capacity ) { if( m_a ) { free( m_a ); } if( m_dest ) { hipFree( m_dest ); } size_t byte_size = s * sizeof(int_type ); m_a = (int_type *) malloc( byte_size ); hipError_t err = hipMalloc( (void **) &m_dest, byte_size ); if( err != hipSuccess ) { std::cerr << "Unable to allocate device memory: " << std::endl; m_status = false; } m_capacity = s; } m_size = s; } bool SquareHost::good() const { return m_status; } SquareHost::~SquareHost() { if( m_status ) { hiprandDestroyGenerator( m_gen ); } if( m_a ) free(m_a); if( m_dest ) hipFree(m_dest); } std::ostream & operator<<( std::ostream & out, const SquareHost & rhs ) { if( !rhs.good() ) { out << "BAD STATE"; } else { if( rhs.m_size ) { unsigned int i = 0; out << rhs.m_a[i++]; while( i < rhs.m_size ) { out << "," << rhs.m_a[i++]; } } } return out; }
191635168bb279075212b3f9adb9f96a725bd4dd.cu
// Copyright 2015 Patrick Putnam // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "square_host.h" #include <stdint.h> #include "popcount_kernel.h" #include <iostream> __global__ void square_mem( SquareHost::int_type * a, unsigned int N ) { unsigned int bid = blockIdx.y * gridDim.x + blockIdx.x; unsigned int tid = threadIdx.y * blockDim.x + threadIdx.x; unsigned int idx = bid * (blockDim.x * blockDim.y) + tid; if( idx < N ) { a[idx] *= a[idx]; } } SquareHost::SquareHost( ) : m_a(NULL) , m_dest(NULL) , m_size(0) , m_capacity(0) , m_maxBlocks(0) , m_maxThreadsPerBlock(0) , m_status(true) , m_gen() { init(); } void SquareHost::init( ) { curandStatus_t err = curandCreateGenerator( &m_gen, CURAND_RNG_PSEUDO_MTGP32 ); if( err != CURAND_STATUS_SUCCESS ) { std::cerr << "ERROR: Failed to create generator: " << err << std::endl; m_status = false; return; } } void SquareHost::operator()( unsigned int s, seed_type seed ) { resize( s ); if( !good() ) return; if( curandSetPseudoRandomGeneratorSeed( m_gen, seed ) != CURAND_STATUS_SUCCESS ) { m_status = false; return; } curandGenerate( m_gen, m_dest, s ); unsigned int bcount = (s / 1024); if( s % 1024 ) { ++bcount; } dim3 grid(1, bcount, 1), block(256, 4, 1); computeHW<<< grid, block >>>( m_dest, s ); cudaMemcpy( m_a, m_dest, s * sizeof(int_type), cudaMemcpyDeviceToHost); // blocks host until copy is complete } void SquareHost::resize( unsigned int s ) { if( !good() ) return; if ( s > m_capacity ) { if( m_a ) { free( m_a ); } if( m_dest ) { cudaFree( m_dest ); } size_t byte_size = s * sizeof(int_type ); m_a = (int_type *) malloc( byte_size ); cudaError_t err = cudaMalloc( (void **) &m_dest, byte_size ); if( err != cudaSuccess ) { std::cerr << "Unable to allocate device memory: " << std::endl; m_status = false; } m_capacity = s; } m_size = s; } bool SquareHost::good() const { return m_status; } SquareHost::~SquareHost() { if( m_status ) { curandDestroyGenerator( m_gen ); } if( m_a ) free(m_a); if( m_dest ) cudaFree(m_dest); } std::ostream & operator<<( std::ostream & out, const SquareHost & rhs ) { if( !rhs.good() ) { out << "BAD STATE"; } else { if( rhs.m_size ) { unsigned int i = 0; out << rhs.m_a[i++]; while( i < rhs.m_size ) { out << "," << rhs.m_a[i++]; } } } return out; }
54eda3f71f6968ddcddff5248244888a78402ce3.hip
// !!! This is a file automatically generated by hipify!!! /******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ // Parts of this code sourced from SnopyDogy // https://gist.github.com/SnopyDogy/a9a22497a893ec86aa3e #if defined(WITH_GRAPHICS) #include <interopManager.hpp> #include <err_cuda.hpp> #include <util.hpp> #include <cstdio> namespace cuda { void InteropManager::destroyResources() { int n = getActiveDeviceId(); for(iter_t iter = interop_maps[n].begin(); iter != interop_maps[n].end(); iter++) { CUDA_CHECK(hipGraphicsUnregisterResource(iter->second)); } } InteropManager::~InteropManager() { try { for(int i = 0; i < getDeviceCount(); i++) { setDevice(i); destroyResources(); } } catch (AfError &ex) { std::string perr = getEnvVar("AF_PRINT_ERRORS"); if(!perr.empty()) { if(perr != "0") fprintf(stderr, "%s\n", ex.what()); } } } InteropManager& InteropManager::getInstance() { static InteropManager my_instance; return my_instance; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Image* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaPBOResource; // Register PBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaPBOResource, key->pbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaPBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Plot* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Plot3* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Histogram* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Surface* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(hipGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), hipGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } } #endif
54eda3f71f6968ddcddff5248244888a78402ce3.cu
/******************************************************* * Copyright (c) 2014, ArrayFire * All rights reserved. * * This file is distributed under 3-clause BSD license. * The complete license agreement can be obtained at: * http://arrayfire.com/licenses/BSD-3-Clause ********************************************************/ // Parts of this code sourced from SnopyDogy // https://gist.github.com/SnopyDogy/a9a22497a893ec86aa3e #if defined(WITH_GRAPHICS) #include <interopManager.hpp> #include <err_cuda.hpp> #include <util.hpp> #include <cstdio> namespace cuda { void InteropManager::destroyResources() { int n = getActiveDeviceId(); for(iter_t iter = interop_maps[n].begin(); iter != interop_maps[n].end(); iter++) { CUDA_CHECK(cudaGraphicsUnregisterResource(iter->second)); } } InteropManager::~InteropManager() { try { for(int i = 0; i < getDeviceCount(); i++) { setDevice(i); destroyResources(); } } catch (AfError &ex) { std::string perr = getEnvVar("AF_PRINT_ERRORS"); if(!perr.empty()) { if(perr != "0") fprintf(stderr, "%s\n", ex.what()); } } } InteropManager& InteropManager::getInstance() { static InteropManager my_instance; return my_instance; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Image* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaPBOResource; // Register PBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaPBOResource, key->pbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaPBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Plot* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Plot3* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Histogram* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } cudaGraphicsResource* InteropManager::getBufferResource(const fg::Surface* key) { int device = getActiveDeviceId(); void* key_value = (void*)key; iter_t iter = interop_maps[device].find(key_value); if(interop_maps[device].find(key_value) == interop_maps[device].end()) { cudaGraphicsResource *cudaVBOResource; // Register VBO with CUDA CUDA_CHECK(cudaGraphicsGLRegisterBuffer(&cudaVBOResource, key->vbo(), cudaGraphicsMapFlagsWriteDiscard)); interop_maps[device][key_value] = cudaVBOResource; } return interop_maps[device][key_value]; } } #endif
b2f038da485be2bbaef7b6e0457ff3c3614a8866.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> #include <string> #define GPUJOULE_DIR "" #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 19660800 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; //int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { index = (block_id * elements_per_block) + (warp_id * elements_per_warp); //index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; // int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; // int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { hipProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; hipError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 1 is %s\n", hipGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice); hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice); hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice); hipDeviceSynchronize (); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 2 is %s\n", hipGetErrorString(error_id)); } hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); hipDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/dram/fadd_dram_20_80_64p_asm_power.txt &"; std::system(cmd.c_str()); std::system("sleep 5"); hipEventRecord(start, 0); hipProfilerStart(); hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); hipDeviceSynchronize(); ///hipDeviceSynchronize (); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); std::system("killall power_monitor"); error_id = hipGetLastError(); if (error_id != hipSuccess) { printf("Error 3 is %s\n", hipGetErrorString(error_id)); } /* copy results from GPU to CPU */ hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost); hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost); hipDeviceSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ hipFree(d_a); hipFree(d_ptr_a); hipFree(duration); hipDeviceSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
b2f038da485be2bbaef7b6e0457ff3c3614a8866.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #include <string> #define GPUJOULE_DIR "" #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 19660800 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) { int block_id; int warp_id; int i; int index; int tid = blockDim.x * blockIdx.x + threadIdx.x; void **ptr_array = (void **)my_ptr_array; unsigned long long *array = (unsigned long long *)my_array; if (tid == 0) { int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; //int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; for (block_id = 0; block_id < num_blocks_k; block_id++) { for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) { for (i = 0; i < elements_per_warp; i++) { index = (block_id * elements_per_block) + (warp_id * elements_per_warp); //index = (warp_id * elements_per_warp); ptr_array[index + i] = (void*)&array[(index + ((i + 48) % elements_per_warp))]; } } } /* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS]; } */ for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) { //array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS]; array[i] = (unsigned long long)ptr_array[i]; } } __syncthreads(); } __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int block_id = blockIdx.x; int warp_id = threadIdx.x / 32; int warp_thread_id = threadIdx.x % 32; int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k; int num_warps_per_block = num_threads_per_block_k / 32; int elements_per_warp = elements_per_block / num_warps_per_block; // int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block; int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id; // int index1 = (warp_id * elements_per_warp) + warp_thread_id; void **ptr_array = (void **)my_ptr_array; unsigned long long int *array = (unsigned long long int *)my_array; void **tmp_ptr; //tmp_ptr = (void *)sdata; //tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS])); //tmp_ptr = (void **)(&(ptr_array[index1])); tmp_ptr = (void **)(&(array[index1])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { /* __asm volatile ( ".reg .f32 %r14;\n\t" "mov.f32 %r14, 2.2;\n\t" ); */ for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/data_movement_energy/dram/fadd_dram_20_80_64p_asm_power.txt &"; std::system(cmd.c_str()); std::system("sleep 5"); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::system("killall power_monitor"); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
09d83a27386018263cac2d4602c28cb095bc2541.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <hip/hip_runtime.h> #include <rocblas.h> #define MAX_BLOCKS 8 #define MAX_THREADS 1024 // Multiply the arrays A and B on GPU and save the result in C // C(m,n) = A(m,k) * B(k,n) void cublas_matmul(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS hipblasHandle_t handle; hipblasStatus_t ret = hipblasCreate(&handle); if (ret != HIPBLAS_STATUS_SUCCESS) { printf("hipblasCreate returned error code %d, line(%d)\n", ret, __LINE__); exit(EXIT_FAILURE); } // Do the actual multiplication hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle hipblasDestroy(handle); } void printMatrix(float *mat, int nRows, int nCols) { for (int i = 0; i < nRows; i++) { for (int j = 0; j < nCols; j++) { printf("%f\t", mat[i * nCols + j]); } printf("\n"); } } int main() { hipError_t c_e; hipEvent_t start_kernel, stop_kernel; float kernel_time; // Initilize the matrix int nRows_A = 3, nRows_B = 3; int nCols_A = 3, nCols_B = 3; assert(nCols_A == nRows_B); float *A, *B, *C; A = (float*) malloc(nRows_A * nCols_A * sizeof(float)); B = (float*) malloc(nRows_B * nCols_B * sizeof(float)); C = (float*) malloc(nRows_A * nCols_B * sizeof(float)); // Copy data to the matrix for (int i = 0; i < nRows_A; i++) { for (int j = 0; j < nCols_A; j++) { A[i * nCols_A + j] = i * nCols_A + j; } } for (int i = 0; i < nRows_B; i++) { for (int j = 0; j < nCols_B; j++) { B[i * nCols_B + j] = (nRows_B * nCols_B) - (i * nCols_A + j); } } printf("Matrix A\n"); printMatrix(A, nRows_A, nCols_A); printf("Matrix B\n"); printMatrix(B, nRows_B, nCols_B); printf("Matrix C\n"); printMatrix(C, nRows_A, nCols_B); float *dev_A, *dev_B, *dev_C; c_e = hipMalloc((void **)&dev_A, nRows_A * nCols_A * sizeof(float)); if(c_e!=hipSuccess) { printf("Error (dev_A allocation): %d\n",c_e); exit(-1); } c_e = hipMalloc((void **)&dev_B, nRows_A * nCols_A * sizeof(float)); if(c_e!=hipSuccess) { printf("Error (dev_B allocation): %d\n",c_e); exit(-1); } c_e = hipMalloc((void **)&dev_C, nRows_A * nCols_B * sizeof(float)); if(c_e!=hipSuccess) { printf("Error (dev_C allocation): %d\n",c_e); exit(-1); } // Copy matrices to device hipMemcpy(dev_A, A, nRows_A * nCols_A * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_B, B, nRows_B * nCols_B * sizeof(float), hipMemcpyHostToDevice); // Execute kernel A hipEventCreate(&start_kernel); hipEventCreate(&stop_kernel); hipEventRecord(start_kernel, 0); // cublas_matmul<<<Blocks,Threads>>>(dev_A, dev_B, dev_C, nRows_A, nCols_A, nCols_B); cublas_matmul (dev_A, dev_B, dev_C, nRows_A, nCols_A, nCols_B); hipEventRecord(stop_kernel, 0); hipEventSynchronize(stop_kernel); c_e=hipDeviceSynchronize(); if(c_e!=hipSuccess) { printf("Error: %d\n",c_e); exit(-1); } // Copy output matrix to host hipMemcpy(C, dev_C, nRows_A * nCols_B * sizeof(float), hipMemcpyDeviceToHost); printf("Matrix C\n"); printMatrix(C, nRows_A, nCols_B); // Destory events hipEventElapsedTime(&kernel_time, start_kernel, stop_kernel); hipEventDestroy(start_kernel); hipEventDestroy(stop_kernel); // Release device memory hipFree(dev_A); hipFree(dev_B); hipFree(dev_C); // Release host memory free(A); free(B); free(C); }
09d83a27386018263cac2d4602c28cb095bc2541.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <cuda.h> #include <cublas_v2.h> #define MAX_BLOCKS 8 #define MAX_THREADS 1024 // Multiply the arrays A and B on GPU and save the result in C // C(m,n) = A(m,k) * B(k,n) void cublas_matmul(const float *A, const float *B, float *C, const int m, const int k, const int n) { int lda=m,ldb=k,ldc=m; const float alf = 1; const float bet = 0; const float *alpha = &alf; const float *beta = &bet; // Create a handle for CUBLAS cublasHandle_t handle; cublasStatus_t ret = cublasCreate(&handle); if (ret != CUBLAS_STATUS_SUCCESS) { printf("cublasCreate returned error code %d, line(%d)\n", ret, __LINE__); exit(EXIT_FAILURE); } // Do the actual multiplication cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc); // Destroy the handle cublasDestroy(handle); } void printMatrix(float *mat, int nRows, int nCols) { for (int i = 0; i < nRows; i++) { for (int j = 0; j < nCols; j++) { printf("%f\t", mat[i * nCols + j]); } printf("\n"); } } int main() { cudaError_t c_e; cudaEvent_t start_kernel, stop_kernel; float kernel_time; // Initilize the matrix int nRows_A = 3, nRows_B = 3; int nCols_A = 3, nCols_B = 3; assert(nCols_A == nRows_B); float *A, *B, *C; A = (float*) malloc(nRows_A * nCols_A * sizeof(float)); B = (float*) malloc(nRows_B * nCols_B * sizeof(float)); C = (float*) malloc(nRows_A * nCols_B * sizeof(float)); // Copy data to the matrix for (int i = 0; i < nRows_A; i++) { for (int j = 0; j < nCols_A; j++) { A[i * nCols_A + j] = i * nCols_A + j; } } for (int i = 0; i < nRows_B; i++) { for (int j = 0; j < nCols_B; j++) { B[i * nCols_B + j] = (nRows_B * nCols_B) - (i * nCols_A + j); } } printf("Matrix A\n"); printMatrix(A, nRows_A, nCols_A); printf("Matrix B\n"); printMatrix(B, nRows_B, nCols_B); printf("Matrix C\n"); printMatrix(C, nRows_A, nCols_B); float *dev_A, *dev_B, *dev_C; c_e = cudaMalloc((void **)&dev_A, nRows_A * nCols_A * sizeof(float)); if(c_e!=cudaSuccess) { printf("Error (dev_A allocation): %d\n",c_e); exit(-1); } c_e = cudaMalloc((void **)&dev_B, nRows_A * nCols_A * sizeof(float)); if(c_e!=cudaSuccess) { printf("Error (dev_B allocation): %d\n",c_e); exit(-1); } c_e = cudaMalloc((void **)&dev_C, nRows_A * nCols_B * sizeof(float)); if(c_e!=cudaSuccess) { printf("Error (dev_C allocation): %d\n",c_e); exit(-1); } // Copy matrices to device cudaMemcpy(dev_A, A, nRows_A * nCols_A * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, B, nRows_B * nCols_B * sizeof(float), cudaMemcpyHostToDevice); // Execute kernel A cudaEventCreate(&start_kernel); cudaEventCreate(&stop_kernel); cudaEventRecord(start_kernel, 0); // cublas_matmul<<<Blocks,Threads>>>(dev_A, dev_B, dev_C, nRows_A, nCols_A, nCols_B); cublas_matmul (dev_A, dev_B, dev_C, nRows_A, nCols_A, nCols_B); cudaEventRecord(stop_kernel, 0); cudaEventSynchronize(stop_kernel); c_e=cudaThreadSynchronize(); if(c_e!=cudaSuccess) { printf("Error: %d\n",c_e); exit(-1); } // Copy output matrix to host cudaMemcpy(C, dev_C, nRows_A * nCols_B * sizeof(float), cudaMemcpyDeviceToHost); printf("Matrix C\n"); printMatrix(C, nRows_A, nCols_B); // Destory events cudaEventElapsedTime(&kernel_time, start_kernel, stop_kernel); cudaEventDestroy(start_kernel); cudaEventDestroy(stop_kernel); // Release device memory cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); // Release host memory free(A); free(B); free(C); }
a7d22e16c4b7d501d001f2a90c51e3ed54d884bf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> int main(void) { int device_count = 0; hipGetDeviceCount(&device_count); if (device_count == 0){ printf("There are no avaiable device(s) that support CUDA\n"); } else { printf("Detected %d CUDA capable device(s)\n", device_count); } }
a7d22e16c4b7d501d001f2a90c51e3ed54d884bf.cu
#include <stdio.h> #include <cuda_runtime.h> int main(void) { int device_count = 0; cudaGetDeviceCount(&device_count); if (device_count == 0){ printf("There are no avaiable device(s) that support CUDA\n"); } else { printf("Detected %d CUDA capable device(s)\n", device_count); } }
9b71aeef529b6a00c1feb923628cb7722671d5da.hip
// !!! This is a file automatically generated by hipify!!! #include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" //#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo //#include <Device/Util/Timer.cuh> //xlib::Timer #include <string> #include <algorithm> //std:.generate #include <Graph/GraphStd.hpp> #include <Host/Classes/Timer.hpp> #include <Device/Util/Timer.cuh> #include "Util/CommandLineParam.hpp" using namespace std::string_literals; using vert_t = int; using eoff_t = int; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using Update = hornet::gpu::BatchUpdate<vert_t>; using Init = hornet::HornetInit<vert_t>; using hornet::SoAData; using hornet::TypeList; using hornet::DeviceType; //#define RANDOM void deleteBatch(HornetGPU &hornet, vert_t * src, vert_t * dst, const int batch_size, const bool print_debug) { UpdatePtr ptr(batch_size, src, dst); Update batch_update(ptr); if (print_debug) { batch_update.print(); std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } hornet.erase(batch_update); if (print_debug) { std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } } void deleteBatchTest(HornetGPU &hornet, graph::GraphStd<vert_t, eoff_t> &graph, int batch_size, const bool print_debug) { #ifndef RANDOM vert_t batch_src[] = {1, 5, 2, 4}; vert_t batch_dst[] = {2, 4, 1, 5}; batch_size = 4; #else vert_t* batch_src, *batch_dst; host::allocatePageLocked(batch_src, batch_size); host::allocatePageLocked(batch_dst, batch_size); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT); #endif deleteBatch(hornet, batch_src, batch_dst, batch_size, print_debug); #ifndef RANDOM #else host::freePageLocked(batch_src, batch_dst); #endif } int exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; graph::GraphStd<vert_t, vert_t> graph; graph.read(argv[1]); int batch_size = std::stoi(argv[2]); Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); auto init_coo = hornet_gpu.getCOO(true); hornet::RandomGenTraits<hornet::EMPTY> cooGenTraits; auto randomBatch = hornet::selectRandom(init_coo, batch_size, cooGenTraits); Update batch_update(randomBatch); hornet_gpu.erase(batch_update); auto inst_coo = hornet_gpu.getCOO(true); inst_coo.append(randomBatch); inst_coo.sort(); hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_init_coo = init_coo; hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_inst_coo = inst_coo; auto *s = host_init_coo.srcPtr(); auto *d = host_init_coo.dstPtr(); auto *S = host_inst_coo.srcPtr(); auto *D = host_inst_coo.dstPtr(); auto len = host_init_coo.size(); bool err = false; if (host_inst_coo.size() != host_init_coo.size()) { err = true; std::cerr<<"\nInit Size "<<host_init_coo.size()<<" != Combined size "<<host_inst_coo.size()<<"\n"; len = ::min(host_init_coo.size(), host_inst_coo.size()); } for (int i = 0; i < len; ++i) { if ((s[i] != S[i]) || (d[i] != D[i])) { err = true; std::cout<<"ERR : "; std::cout<<s[i]<<" "<<d[i]<<"\t"; std::cout<<"\t\t"; std::cout<<S[i]<<" "<<D[i]; std::cout<<"\n"; } } if (!err) { std::cout<<"PASSED\n"; } else { std::cout<<"NOT PASSED\n"; } return 0; } int main(int argc, char* argv[]) { int ret = 0; { ret = exec(argc, argv); } return ret; }
9b71aeef529b6a00c1feb923628cb7722671d5da.cu
#include <Hornet.hpp> #include "StandardAPI.hpp" #include "Util/BatchFunctions.hpp" #include "Util/RandomGraphData.cuh" //#include <Device/Util/CudaUtil.cuh> //xlib::deviceInfo //#include <Device/Util/Timer.cuh> //xlib::Timer #include <string> #include <algorithm> //std:.generate #include <Graph/GraphStd.hpp> #include <Host/Classes/Timer.hpp> #include <Device/Util/Timer.cuh> #include "Util/CommandLineParam.hpp" using namespace std::string_literals; using vert_t = int; using eoff_t = int; using HornetGPU = hornet::gpu::Hornet<vert_t>; using UpdatePtr = hornet::BatchUpdatePtr<vert_t, hornet::EMPTY, hornet::DeviceType::HOST>; using Update = hornet::gpu::BatchUpdate<vert_t>; using Init = hornet::HornetInit<vert_t>; using hornet::SoAData; using hornet::TypeList; using hornet::DeviceType; //#define RANDOM void deleteBatch(HornetGPU &hornet, vert_t * src, vert_t * dst, const int batch_size, const bool print_debug) { UpdatePtr ptr(batch_size, src, dst); Update batch_update(ptr); if (print_debug) { batch_update.print(); std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } hornet.erase(batch_update); if (print_debug) { std::cout<<"ne: "<<hornet.nE()<<"\n=======\n"; hornet.print(); } } void deleteBatchTest(HornetGPU &hornet, graph::GraphStd<vert_t, eoff_t> &graph, int batch_size, const bool print_debug) { #ifndef RANDOM vert_t batch_src[] = {1, 5, 2, 4}; vert_t batch_dst[] = {2, 4, 1, 5}; batch_size = 4; #else vert_t* batch_src, *batch_dst; host::allocatePageLocked(batch_src, batch_size); host::allocatePageLocked(batch_dst, batch_size); generateBatch(graph, batch_size, batch_src, batch_dst, BatchGenType::INSERT); #endif deleteBatch(hornet, batch_src, batch_dst, batch_size, print_debug); #ifndef RANDOM #else host::freePageLocked(batch_src, batch_dst); #endif } int exec(int argc, char* argv[]) { using namespace graph::structure_prop; using namespace graph::parsing_prop; graph::GraphStd<vert_t, vert_t> graph; graph.read(argv[1]); int batch_size = std::stoi(argv[2]); Init hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGPU hornet_gpu(hornet_init); auto init_coo = hornet_gpu.getCOO(true); hornet::RandomGenTraits<hornet::EMPTY> cooGenTraits; auto randomBatch = hornet::selectRandom(init_coo, batch_size, cooGenTraits); Update batch_update(randomBatch); hornet_gpu.erase(batch_update); auto inst_coo = hornet_gpu.getCOO(true); inst_coo.append(randomBatch); inst_coo.sort(); hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_init_coo = init_coo; hornet::COO<DeviceType::HOST, vert_t, hornet::EMPTY, eoff_t> host_inst_coo = inst_coo; auto *s = host_init_coo.srcPtr(); auto *d = host_init_coo.dstPtr(); auto *S = host_inst_coo.srcPtr(); auto *D = host_inst_coo.dstPtr(); auto len = host_init_coo.size(); bool err = false; if (host_inst_coo.size() != host_init_coo.size()) { err = true; std::cerr<<"\nInit Size "<<host_init_coo.size()<<" != Combined size "<<host_inst_coo.size()<<"\n"; len = std::min(host_init_coo.size(), host_inst_coo.size()); } for (int i = 0; i < len; ++i) { if ((s[i] != S[i]) || (d[i] != D[i])) { err = true; std::cout<<"ERR : "; std::cout<<s[i]<<" "<<d[i]<<"\t"; std::cout<<"\t\t"; std::cout<<S[i]<<" "<<D[i]; std::cout<<"\n"; } } if (!err) { std::cout<<"PASSED\n"; } else { std::cout<<"NOT PASSED\n"; } return 0; } int main(int argc, char* argv[]) { int ret = 0; { ret = exec(argc, argv); } return ret; }
f460a2e9236b8a7ff1e817584482ea4f0cc8ea1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto _pp_var_x __attribute__((unused)) = params_.globals[0];\ auto _pp_var_y __attribute__((unused)) = params_.globals[1];\ auto _pp_var_z __attribute__((unused)) = params_.globals[2];\ auto _pp_var_w __attribute__((unused)) = params_.globals[3];\ auto _pp_var_s0 __attribute__((unused)) = params_.globals[4];\ auto _pp_var_s1 __attribute__((unused)) = params_.globals[5];\ auto* _pp_var_A __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_B __attribute__((unused)) = params_.state_vars[1];\ auto* _pp_var_C __attribute__((unused)) = params_.state_vars[2];\ auto* _pp_var_d __attribute__((unused)) = params_.state_vars[3];\ auto* _pp_var_e __attribute__((unused)) = params_.state_vars[4];\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_A[tid_] = 4.5; _pp_var_B[tid_] = 6.5999999999999996; _pp_var_C[tid_] = 0.28000000000000003; _pp_var_d[tid_] = 2.0; _pp_var_e[tid_] = 0.; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type t_38_, t_36_, t_35_, t_34_, t_33_, t_31_, t_29_, t_26_, t_25_, t_24_, t_23_, t_21_, t_19_, t_16_, t_15_, t_13_, t_28_, t_11_, j_10_, t_12_, t_9_, t_18_, t_7_, t_6_, t_5_, j_16_, t_3_, j_15_, f_0_, f_4_, p_2_, j_9_, t_2_, f_2_, t_17_, j_7_, j_8_, j_12_, f_1_, t_30_, j_2_, p_1_, t_27_, s_3_, s_2_, s_0_, t_32_, t_14_, j_11_, j_3_, j_13_, t_20_, j_0_, p_0_, p_4_, j_5_, t_8_, j_1_, t_4_, j_14_, s_1_, s_4_, t_10_, f_3_, p_3_, j_6_, t_22_, j_4_, t_37_, t_1_, t_0_; p_0_ = _pp_var_A[tid_]; t_0_ = _pp_var_A[tid_]; p_1_ = _pp_var_B[tid_]; t_1_ = _pp_var_B[tid_]; p_2_ = _pp_var_C[tid_]; t_2_ = _pp_var_C[tid_]; p_3_ = _pp_var_d[tid_]; t_3_ = _pp_var_d[tid_]; p_4_ = _pp_var_e[tid_]; t_4_ = _pp_var_e[tid_]; s_0_ = 1.0/_pp_var_s0; s_1_ = 1.0/_pp_var_s0; s_2_ = 1.0/_pp_var_s0; s_3_ = 1.0/_pp_var_s1; s_4_ = 1.0/_pp_var_s1; f_0_ = t_0_-(p_0_+( -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w))*dt*s_0_); f_1_ = t_1_-(p_1_+ -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_1_); f_2_ = t_2_-(p_2_+(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_2_); f_3_ = t_3_-(p_3_+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_3_); f_4_ = t_4_-(p_4_+(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_4_); j_0_ = 1.0-( -1.0*(t_1_*_pp_var_x)+ -1.0*(t_3_*_pp_var_z))*dt*s_0_; j_1_ = -( -1.0*(t_0_*_pp_var_x)*dt*s_0_); j_2_ = -( -1.0* -_pp_var_y*dt*s_0_); j_3_ = -( -1.0*(t_0_*_pp_var_z)*dt*s_0_); j_4_ = -( -1.0* -_pp_var_w*dt*s_0_); j_5_ = -( -1.0*(t_1_*_pp_var_x)*dt*s_1_); j_6_ = 1.0- -1.0*(t_0_*_pp_var_x)*dt*s_1_; j_7_ = -( -1.0* -_pp_var_y*dt*s_1_); j_8_ = -(t_1_*_pp_var_x*dt*s_2_); j_9_ = -(t_0_*_pp_var_x*dt*s_2_); j_10_ = 1.0- -_pp_var_y*dt*s_2_; j_11_ = -( -1.0*(t_3_*_pp_var_z)*dt*s_3_); j_12_ = 1.0- -1.0*(t_0_*_pp_var_z)*dt*s_3_; j_13_ = -( -1.0* -_pp_var_w*dt*s_3_); j_14_ = -(t_3_*_pp_var_z*dt*s_4_); j_15_ = -(t_0_*_pp_var_z*dt*s_4_); j_16_ = 1.0- -_pp_var_w*dt*s_4_; t_5_ = j_16_*j_0_-j_4_*j_14_; t_6_ = j_16_*j_1_; t_7_ = j_16_*j_2_; t_8_ = j_16_*j_3_-j_4_*j_15_; t_9_ = j_16_*f_0_-j_4_*f_4_; t_10_ = j_16_*j_11_-j_13_*j_14_; t_11_ = j_16_*j_12_-j_13_*j_15_; t_12_ = j_16_*f_3_-j_13_*f_4_; t_13_ = t_11_*t_5_-t_8_*t_10_; t_14_ = t_11_*t_6_; t_15_ = t_11_*t_7_; t_16_ = t_11_*t_9_-t_8_*t_12_; t_17_ = t_11_*j_14_-j_15_*t_10_; t_18_ = t_11_*j_16_; t_19_ = t_11_*f_4_-j_15_*t_12_; t_20_ = j_10_*t_13_-t_15_*j_8_; t_21_ = j_10_*t_14_-t_15_*j_9_; t_22_ = j_10_*t_16_-t_15_*f_2_; t_23_ = j_10_*j_5_-j_7_*j_8_; t_24_ = j_10_*j_6_-j_7_*j_9_; t_25_ = j_10_*f_1_-j_7_*f_2_; t_26_ = t_24_*t_20_-t_21_*t_23_; t_27_ = t_24_*t_22_-t_21_*t_25_; t_28_ = t_24_*j_8_-j_9_*t_23_; t_29_ = t_24_*j_10_; t_30_ = t_24_*f_2_-j_9_*t_25_; t_31_ = t_26_*t_24_; t_32_ = t_26_*t_25_-t_23_*t_27_; t_33_ = t_26_*t_29_; t_34_ = t_26_*t_30_-t_28_*t_27_; t_35_ = t_26_*t_11_; t_36_ = t_26_*t_12_-t_10_*t_27_; t_37_ = t_26_*t_18_; t_38_ = t_26_*t_19_-t_17_*t_27_; t_0_ = t_0_-t_27_/t_26_; t_1_ = t_1_-t_32_/t_31_; t_2_ = t_2_-t_34_/t_33_; t_3_ = t_3_-t_36_/t_35_; t_4_ = t_4_-t_38_/t_37_; f_0_ = t_0_-(p_0_+( -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w))*dt*s_0_); f_1_ = t_1_-(p_1_+ -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_1_); f_2_ = t_2_-(p_2_+(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_2_); f_3_ = t_3_-(p_3_+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_3_); f_4_ = t_4_-(p_4_+(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_4_); j_0_ = 1.0-( -1.0*(t_1_*_pp_var_x)+ -1.0*(t_3_*_pp_var_z))*dt*s_0_; j_1_ = -( -1.0*(t_0_*_pp_var_x)*dt*s_0_); j_2_ = -( -1.0* -_pp_var_y*dt*s_0_); j_3_ = -( -1.0*(t_0_*_pp_var_z)*dt*s_0_); j_4_ = -( -1.0* -_pp_var_w*dt*s_0_); j_5_ = -( -1.0*(t_1_*_pp_var_x)*dt*s_1_); j_6_ = 1.0- -1.0*(t_0_*_pp_var_x)*dt*s_1_; j_7_ = -( -1.0* -_pp_var_y*dt*s_1_); j_8_ = -(t_1_*_pp_var_x*dt*s_2_); j_9_ = -(t_0_*_pp_var_x*dt*s_2_); j_10_ = 1.0- -_pp_var_y*dt*s_2_; j_11_ = -( -1.0*(t_3_*_pp_var_z)*dt*s_3_); j_12_ = 1.0- -1.0*(t_0_*_pp_var_z)*dt*s_3_; j_13_ = -( -1.0* -_pp_var_w*dt*s_3_); j_14_ = -(t_3_*_pp_var_z*dt*s_4_); j_15_ = -(t_0_*_pp_var_z*dt*s_4_); j_16_ = 1.0- -_pp_var_w*dt*s_4_; t_5_ = j_16_*j_0_-j_4_*j_14_; t_6_ = j_16_*j_1_; t_7_ = j_16_*j_2_; t_8_ = j_16_*j_3_-j_4_*j_15_; t_9_ = j_16_*f_0_-j_4_*f_4_; t_10_ = j_16_*j_11_-j_13_*j_14_; t_11_ = j_16_*j_12_-j_13_*j_15_; t_12_ = j_16_*f_3_-j_13_*f_4_; t_13_ = t_11_*t_5_-t_8_*t_10_; t_14_ = t_11_*t_6_; t_15_ = t_11_*t_7_; t_16_ = t_11_*t_9_-t_8_*t_12_; t_17_ = t_11_*j_14_-j_15_*t_10_; t_18_ = t_11_*j_16_; t_19_ = t_11_*f_4_-j_15_*t_12_; t_20_ = j_10_*t_13_-t_15_*j_8_; t_21_ = j_10_*t_14_-t_15_*j_9_; t_22_ = j_10_*t_16_-t_15_*f_2_; t_23_ = j_10_*j_5_-j_7_*j_8_; t_24_ = j_10_*j_6_-j_7_*j_9_; t_25_ = j_10_*f_1_-j_7_*f_2_; t_26_ = t_24_*t_20_-t_21_*t_23_; t_27_ = t_24_*t_22_-t_21_*t_25_; t_28_ = t_24_*j_8_-j_9_*t_23_; t_29_ = t_24_*j_10_; t_30_ = t_24_*f_2_-j_9_*t_25_; t_31_ = t_26_*t_24_; t_32_ = t_26_*t_25_-t_23_*t_27_; t_33_ = t_26_*t_29_; t_34_ = t_26_*t_30_-t_28_*t_27_; t_35_ = t_26_*t_11_; t_36_ = t_26_*t_12_-t_10_*t_27_; t_37_ = t_26_*t_18_; t_38_ = t_26_*t_19_-t_17_*t_27_; t_0_ = t_0_-t_27_/t_26_; t_1_ = t_1_-t_32_/t_31_; t_2_ = t_2_-t_34_/t_33_; t_3_ = t_3_-t_36_/t_35_; t_4_ = t_4_-t_38_/t_37_; f_0_ = t_0_-(p_0_+( -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w))*dt*s_0_); f_1_ = t_1_-(p_1_+ -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_1_); f_2_ = t_2_-(p_2_+(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_2_); f_3_ = t_3_-(p_3_+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_3_); f_4_ = t_4_-(p_4_+(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_4_); j_0_ = 1.0-( -1.0*(t_1_*_pp_var_x)+ -1.0*(t_3_*_pp_var_z))*dt*s_0_; j_1_ = -( -1.0*(t_0_*_pp_var_x)*dt*s_0_); j_2_ = -( -1.0* -_pp_var_y*dt*s_0_); j_3_ = -( -1.0*(t_0_*_pp_var_z)*dt*s_0_); j_4_ = -( -1.0* -_pp_var_w*dt*s_0_); j_5_ = -( -1.0*(t_1_*_pp_var_x)*dt*s_1_); j_6_ = 1.0- -1.0*(t_0_*_pp_var_x)*dt*s_1_; j_7_ = -( -1.0* -_pp_var_y*dt*s_1_); j_8_ = -(t_1_*_pp_var_x*dt*s_2_); j_9_ = -(t_0_*_pp_var_x*dt*s_2_); j_10_ = 1.0- -_pp_var_y*dt*s_2_; j_11_ = -( -1.0*(t_3_*_pp_var_z)*dt*s_3_); j_12_ = 1.0- -1.0*(t_0_*_pp_var_z)*dt*s_3_; j_13_ = -( -1.0* -_pp_var_w*dt*s_3_); j_14_ = -(t_3_*_pp_var_z*dt*s_4_); j_15_ = -(t_0_*_pp_var_z*dt*s_4_); j_16_ = 1.0- -_pp_var_w*dt*s_4_; t_5_ = j_16_*j_0_-j_4_*j_14_; t_6_ = j_16_*j_1_; t_7_ = j_16_*j_2_; t_8_ = j_16_*j_3_-j_4_*j_15_; t_9_ = j_16_*f_0_-j_4_*f_4_; t_10_ = j_16_*j_11_-j_13_*j_14_; t_11_ = j_16_*j_12_-j_13_*j_15_; t_12_ = j_16_*f_3_-j_13_*f_4_; t_13_ = t_11_*t_5_-t_8_*t_10_; t_14_ = t_11_*t_6_; t_15_ = t_11_*t_7_; t_16_ = t_11_*t_9_-t_8_*t_12_; t_17_ = t_11_*j_14_-j_15_*t_10_; t_18_ = t_11_*j_16_; t_19_ = t_11_*f_4_-j_15_*t_12_; t_20_ = j_10_*t_13_-t_15_*j_8_; t_21_ = j_10_*t_14_-t_15_*j_9_; t_22_ = j_10_*t_16_-t_15_*f_2_; t_23_ = j_10_*j_5_-j_7_*j_8_; t_24_ = j_10_*j_6_-j_7_*j_9_; t_25_ = j_10_*f_1_-j_7_*f_2_; t_26_ = t_24_*t_20_-t_21_*t_23_; t_27_ = t_24_*t_22_-t_21_*t_25_; t_28_ = t_24_*j_8_-j_9_*t_23_; t_29_ = t_24_*j_10_; t_30_ = t_24_*f_2_-j_9_*t_25_; t_31_ = t_26_*t_24_; t_32_ = t_26_*t_25_-t_23_*t_27_; t_33_ = t_26_*t_29_; t_34_ = t_26_*t_30_-t_28_*t_27_; t_35_ = t_26_*t_11_; t_36_ = t_26_*t_12_-t_10_*t_27_; t_37_ = t_26_*t_18_; t_38_ = t_26_*t_19_-t_17_*t_27_; t_0_ = t_0_-t_27_/t_26_; t_1_ = t_1_-t_32_/t_31_; t_2_ = t_2_-t_34_/t_33_; t_3_ = t_3_-t_36_/t_35_; t_4_ = t_4_-t_38_/t_37_; _pp_var_A[tid_] = t_0_; _pp_var_B[tid_] = t_1_; _pp_var_C[tid_] = t_2_; _pp_var_d[tid_] = t_3_; _pp_var_e[tid_] = t_4_; } } } // namespace void mechanism_test4_kin_compartment_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p); if (!p->multiplicity) return; hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(5}), block_dim, 0, *p); } void mechanism_test4_kin_compartment_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_test4_kin_compartment_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( advance_state), dim3(grid_dim), dim3(block_dim), 0, 0, *p); } void mechanism_test4_kin_compartment_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_test4_kin_compartment_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_test4_kin_compartment_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
f460a2e9236b8a7ff1e817584482ea4f0cc8ea1c.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto _pp_var_x __attribute__((unused)) = params_.globals[0];\ auto _pp_var_y __attribute__((unused)) = params_.globals[1];\ auto _pp_var_z __attribute__((unused)) = params_.globals[2];\ auto _pp_var_w __attribute__((unused)) = params_.globals[3];\ auto _pp_var_s0 __attribute__((unused)) = params_.globals[4];\ auto _pp_var_s1 __attribute__((unused)) = params_.globals[5];\ auto* _pp_var_A __attribute__((unused)) = params_.state_vars[0];\ auto* _pp_var_B __attribute__((unused)) = params_.state_vars[1];\ auto* _pp_var_C __attribute__((unused)) = params_.state_vars[2];\ auto* _pp_var_d __attribute__((unused)) = params_.state_vars[3];\ auto* _pp_var_e __attribute__((unused)) = params_.state_vars[4];\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { _pp_var_A[tid_] = 4.5; _pp_var_B[tid_] = 6.5999999999999996; _pp_var_C[tid_] = 0.28000000000000003; _pp_var_d[tid_] = 2.0; _pp_var_e[tid_] = 0.; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } __global__ void advance_state(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { auto node_indexi_ = _pp_var_node_index[tid_]; arb_value_type dt = _pp_var_vec_dt[node_indexi_]; arb_value_type t_38_, t_36_, t_35_, t_34_, t_33_, t_31_, t_29_, t_26_, t_25_, t_24_, t_23_, t_21_, t_19_, t_16_, t_15_, t_13_, t_28_, t_11_, j_10_, t_12_, t_9_, t_18_, t_7_, t_6_, t_5_, j_16_, t_3_, j_15_, f_0_, f_4_, p_2_, j_9_, t_2_, f_2_, t_17_, j_7_, j_8_, j_12_, f_1_, t_30_, j_2_, p_1_, t_27_, s_3_, s_2_, s_0_, t_32_, t_14_, j_11_, j_3_, j_13_, t_20_, j_0_, p_0_, p_4_, j_5_, t_8_, j_1_, t_4_, j_14_, s_1_, s_4_, t_10_, f_3_, p_3_, j_6_, t_22_, j_4_, t_37_, t_1_, t_0_; p_0_ = _pp_var_A[tid_]; t_0_ = _pp_var_A[tid_]; p_1_ = _pp_var_B[tid_]; t_1_ = _pp_var_B[tid_]; p_2_ = _pp_var_C[tid_]; t_2_ = _pp_var_C[tid_]; p_3_ = _pp_var_d[tid_]; t_3_ = _pp_var_d[tid_]; p_4_ = _pp_var_e[tid_]; t_4_ = _pp_var_e[tid_]; s_0_ = 1.0/_pp_var_s0; s_1_ = 1.0/_pp_var_s0; s_2_ = 1.0/_pp_var_s0; s_3_ = 1.0/_pp_var_s1; s_4_ = 1.0/_pp_var_s1; f_0_ = t_0_-(p_0_+( -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w))*dt*s_0_); f_1_ = t_1_-(p_1_+ -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_1_); f_2_ = t_2_-(p_2_+(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_2_); f_3_ = t_3_-(p_3_+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_3_); f_4_ = t_4_-(p_4_+(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_4_); j_0_ = 1.0-( -1.0*(t_1_*_pp_var_x)+ -1.0*(t_3_*_pp_var_z))*dt*s_0_; j_1_ = -( -1.0*(t_0_*_pp_var_x)*dt*s_0_); j_2_ = -( -1.0* -_pp_var_y*dt*s_0_); j_3_ = -( -1.0*(t_0_*_pp_var_z)*dt*s_0_); j_4_ = -( -1.0* -_pp_var_w*dt*s_0_); j_5_ = -( -1.0*(t_1_*_pp_var_x)*dt*s_1_); j_6_ = 1.0- -1.0*(t_0_*_pp_var_x)*dt*s_1_; j_7_ = -( -1.0* -_pp_var_y*dt*s_1_); j_8_ = -(t_1_*_pp_var_x*dt*s_2_); j_9_ = -(t_0_*_pp_var_x*dt*s_2_); j_10_ = 1.0- -_pp_var_y*dt*s_2_; j_11_ = -( -1.0*(t_3_*_pp_var_z)*dt*s_3_); j_12_ = 1.0- -1.0*(t_0_*_pp_var_z)*dt*s_3_; j_13_ = -( -1.0* -_pp_var_w*dt*s_3_); j_14_ = -(t_3_*_pp_var_z*dt*s_4_); j_15_ = -(t_0_*_pp_var_z*dt*s_4_); j_16_ = 1.0- -_pp_var_w*dt*s_4_; t_5_ = j_16_*j_0_-j_4_*j_14_; t_6_ = j_16_*j_1_; t_7_ = j_16_*j_2_; t_8_ = j_16_*j_3_-j_4_*j_15_; t_9_ = j_16_*f_0_-j_4_*f_4_; t_10_ = j_16_*j_11_-j_13_*j_14_; t_11_ = j_16_*j_12_-j_13_*j_15_; t_12_ = j_16_*f_3_-j_13_*f_4_; t_13_ = t_11_*t_5_-t_8_*t_10_; t_14_ = t_11_*t_6_; t_15_ = t_11_*t_7_; t_16_ = t_11_*t_9_-t_8_*t_12_; t_17_ = t_11_*j_14_-j_15_*t_10_; t_18_ = t_11_*j_16_; t_19_ = t_11_*f_4_-j_15_*t_12_; t_20_ = j_10_*t_13_-t_15_*j_8_; t_21_ = j_10_*t_14_-t_15_*j_9_; t_22_ = j_10_*t_16_-t_15_*f_2_; t_23_ = j_10_*j_5_-j_7_*j_8_; t_24_ = j_10_*j_6_-j_7_*j_9_; t_25_ = j_10_*f_1_-j_7_*f_2_; t_26_ = t_24_*t_20_-t_21_*t_23_; t_27_ = t_24_*t_22_-t_21_*t_25_; t_28_ = t_24_*j_8_-j_9_*t_23_; t_29_ = t_24_*j_10_; t_30_ = t_24_*f_2_-j_9_*t_25_; t_31_ = t_26_*t_24_; t_32_ = t_26_*t_25_-t_23_*t_27_; t_33_ = t_26_*t_29_; t_34_ = t_26_*t_30_-t_28_*t_27_; t_35_ = t_26_*t_11_; t_36_ = t_26_*t_12_-t_10_*t_27_; t_37_ = t_26_*t_18_; t_38_ = t_26_*t_19_-t_17_*t_27_; t_0_ = t_0_-t_27_/t_26_; t_1_ = t_1_-t_32_/t_31_; t_2_ = t_2_-t_34_/t_33_; t_3_ = t_3_-t_36_/t_35_; t_4_ = t_4_-t_38_/t_37_; f_0_ = t_0_-(p_0_+( -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w))*dt*s_0_); f_1_ = t_1_-(p_1_+ -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_1_); f_2_ = t_2_-(p_2_+(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_2_); f_3_ = t_3_-(p_3_+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_3_); f_4_ = t_4_-(p_4_+(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_4_); j_0_ = 1.0-( -1.0*(t_1_*_pp_var_x)+ -1.0*(t_3_*_pp_var_z))*dt*s_0_; j_1_ = -( -1.0*(t_0_*_pp_var_x)*dt*s_0_); j_2_ = -( -1.0* -_pp_var_y*dt*s_0_); j_3_ = -( -1.0*(t_0_*_pp_var_z)*dt*s_0_); j_4_ = -( -1.0* -_pp_var_w*dt*s_0_); j_5_ = -( -1.0*(t_1_*_pp_var_x)*dt*s_1_); j_6_ = 1.0- -1.0*(t_0_*_pp_var_x)*dt*s_1_; j_7_ = -( -1.0* -_pp_var_y*dt*s_1_); j_8_ = -(t_1_*_pp_var_x*dt*s_2_); j_9_ = -(t_0_*_pp_var_x*dt*s_2_); j_10_ = 1.0- -_pp_var_y*dt*s_2_; j_11_ = -( -1.0*(t_3_*_pp_var_z)*dt*s_3_); j_12_ = 1.0- -1.0*(t_0_*_pp_var_z)*dt*s_3_; j_13_ = -( -1.0* -_pp_var_w*dt*s_3_); j_14_ = -(t_3_*_pp_var_z*dt*s_4_); j_15_ = -(t_0_*_pp_var_z*dt*s_4_); j_16_ = 1.0- -_pp_var_w*dt*s_4_; t_5_ = j_16_*j_0_-j_4_*j_14_; t_6_ = j_16_*j_1_; t_7_ = j_16_*j_2_; t_8_ = j_16_*j_3_-j_4_*j_15_; t_9_ = j_16_*f_0_-j_4_*f_4_; t_10_ = j_16_*j_11_-j_13_*j_14_; t_11_ = j_16_*j_12_-j_13_*j_15_; t_12_ = j_16_*f_3_-j_13_*f_4_; t_13_ = t_11_*t_5_-t_8_*t_10_; t_14_ = t_11_*t_6_; t_15_ = t_11_*t_7_; t_16_ = t_11_*t_9_-t_8_*t_12_; t_17_ = t_11_*j_14_-j_15_*t_10_; t_18_ = t_11_*j_16_; t_19_ = t_11_*f_4_-j_15_*t_12_; t_20_ = j_10_*t_13_-t_15_*j_8_; t_21_ = j_10_*t_14_-t_15_*j_9_; t_22_ = j_10_*t_16_-t_15_*f_2_; t_23_ = j_10_*j_5_-j_7_*j_8_; t_24_ = j_10_*j_6_-j_7_*j_9_; t_25_ = j_10_*f_1_-j_7_*f_2_; t_26_ = t_24_*t_20_-t_21_*t_23_; t_27_ = t_24_*t_22_-t_21_*t_25_; t_28_ = t_24_*j_8_-j_9_*t_23_; t_29_ = t_24_*j_10_; t_30_ = t_24_*f_2_-j_9_*t_25_; t_31_ = t_26_*t_24_; t_32_ = t_26_*t_25_-t_23_*t_27_; t_33_ = t_26_*t_29_; t_34_ = t_26_*t_30_-t_28_*t_27_; t_35_ = t_26_*t_11_; t_36_ = t_26_*t_12_-t_10_*t_27_; t_37_ = t_26_*t_18_; t_38_ = t_26_*t_19_-t_17_*t_27_; t_0_ = t_0_-t_27_/t_26_; t_1_ = t_1_-t_32_/t_31_; t_2_ = t_2_-t_34_/t_33_; t_3_ = t_3_-t_36_/t_35_; t_4_ = t_4_-t_38_/t_37_; f_0_ = t_0_-(p_0_+( -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w))*dt*s_0_); f_1_ = t_1_-(p_1_+ -1.0*(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_1_); f_2_ = t_2_-(p_2_+(t_1_*(t_0_*_pp_var_x)-t_2_*_pp_var_y)*dt*s_2_); f_3_ = t_3_-(p_3_+ -1.0*(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_3_); f_4_ = t_4_-(p_4_+(t_3_*(t_0_*_pp_var_z)-t_4_*_pp_var_w)*dt*s_4_); j_0_ = 1.0-( -1.0*(t_1_*_pp_var_x)+ -1.0*(t_3_*_pp_var_z))*dt*s_0_; j_1_ = -( -1.0*(t_0_*_pp_var_x)*dt*s_0_); j_2_ = -( -1.0* -_pp_var_y*dt*s_0_); j_3_ = -( -1.0*(t_0_*_pp_var_z)*dt*s_0_); j_4_ = -( -1.0* -_pp_var_w*dt*s_0_); j_5_ = -( -1.0*(t_1_*_pp_var_x)*dt*s_1_); j_6_ = 1.0- -1.0*(t_0_*_pp_var_x)*dt*s_1_; j_7_ = -( -1.0* -_pp_var_y*dt*s_1_); j_8_ = -(t_1_*_pp_var_x*dt*s_2_); j_9_ = -(t_0_*_pp_var_x*dt*s_2_); j_10_ = 1.0- -_pp_var_y*dt*s_2_; j_11_ = -( -1.0*(t_3_*_pp_var_z)*dt*s_3_); j_12_ = 1.0- -1.0*(t_0_*_pp_var_z)*dt*s_3_; j_13_ = -( -1.0* -_pp_var_w*dt*s_3_); j_14_ = -(t_3_*_pp_var_z*dt*s_4_); j_15_ = -(t_0_*_pp_var_z*dt*s_4_); j_16_ = 1.0- -_pp_var_w*dt*s_4_; t_5_ = j_16_*j_0_-j_4_*j_14_; t_6_ = j_16_*j_1_; t_7_ = j_16_*j_2_; t_8_ = j_16_*j_3_-j_4_*j_15_; t_9_ = j_16_*f_0_-j_4_*f_4_; t_10_ = j_16_*j_11_-j_13_*j_14_; t_11_ = j_16_*j_12_-j_13_*j_15_; t_12_ = j_16_*f_3_-j_13_*f_4_; t_13_ = t_11_*t_5_-t_8_*t_10_; t_14_ = t_11_*t_6_; t_15_ = t_11_*t_7_; t_16_ = t_11_*t_9_-t_8_*t_12_; t_17_ = t_11_*j_14_-j_15_*t_10_; t_18_ = t_11_*j_16_; t_19_ = t_11_*f_4_-j_15_*t_12_; t_20_ = j_10_*t_13_-t_15_*j_8_; t_21_ = j_10_*t_14_-t_15_*j_9_; t_22_ = j_10_*t_16_-t_15_*f_2_; t_23_ = j_10_*j_5_-j_7_*j_8_; t_24_ = j_10_*j_6_-j_7_*j_9_; t_25_ = j_10_*f_1_-j_7_*f_2_; t_26_ = t_24_*t_20_-t_21_*t_23_; t_27_ = t_24_*t_22_-t_21_*t_25_; t_28_ = t_24_*j_8_-j_9_*t_23_; t_29_ = t_24_*j_10_; t_30_ = t_24_*f_2_-j_9_*t_25_; t_31_ = t_26_*t_24_; t_32_ = t_26_*t_25_-t_23_*t_27_; t_33_ = t_26_*t_29_; t_34_ = t_26_*t_30_-t_28_*t_27_; t_35_ = t_26_*t_11_; t_36_ = t_26_*t_12_-t_10_*t_27_; t_37_ = t_26_*t_18_; t_38_ = t_26_*t_19_-t_17_*t_27_; t_0_ = t_0_-t_27_/t_26_; t_1_ = t_1_-t_32_/t_31_; t_2_ = t_2_-t_34_/t_33_; t_3_ = t_3_-t_36_/t_35_; t_4_ = t_4_-t_38_/t_37_; _pp_var_A[tid_] = t_0_; _pp_var_B[tid_] = t_1_; _pp_var_C[tid_] = t_2_; _pp_var_d[tid_] = t_3_; _pp_var_e[tid_] = t_4_; } } } // namespace void mechanism_test4_kin_compartment_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); init<<<grid_dim, block_dim>>>(*p); if (!p->multiplicity) return; multiply<<<dim3{grid_dim, 5}, block_dim>>>(*p); } void mechanism_test4_kin_compartment_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_test4_kin_compartment_gpu_advance_state_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); advance_state<<<grid_dim, block_dim>>>(*p); } void mechanism_test4_kin_compartment_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_test4_kin_compartment_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_test4_kin_compartment_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
0cc2a45af5a24255bbdad717b9f9c567e19e345b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zgerbt_func_batched.cu, normal z -> c, Sun Nov 20 20:20:31 2016 @author Adrien Remy @author Azzam Haidar */ #include "magma_internal.h" #include "cgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 /***************************************************************************//** Purpose ------- CPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mtv_batched( magma_int_t n, magmaFloatComplex *du, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount); hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n, db_array, 0); hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, du, n+n/2, db_array, n/2); threads = block_length; grid = magma_ceildiv( n, 2*block_length ); hipLaunchKernelGGL(( magmablas_capply_transpose_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, du, 0, db_array, 0); } /***************************************************************************//** Purpose ------- CPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = dv*db @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mv_batched( magma_int_t n, magmaFloatComplex *dv, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount); hipLaunchKernelGGL(( magmablas_capply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dv, 0, db_array, 0); threads = block_length; grid = magma_ceildiv( n, 4*block_length ); hipLaunchKernelGGL(( magmablas_capply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n, db_array, 0); hipLaunchKernelGGL(( magmablas_capply_vector_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dv, n+n/2, db_array, n/2); } /***************************************************************************//** Purpose ------- CPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA COMPLEX array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_batched( magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex *du, magmaFloatComplex *dv, magma_int_t batchCount, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), batchCount ); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, 0, ldda, du, 0, dv, 0); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, n/2, ldda, du, n/2, dv, 0); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), batchCount ); hipLaunchKernelGGL(( magmablas_celementary_multiplication_kernel_batched), dim3(grid2), dim3(threads2), 0, queue->cuda_stream() , n, dA_array, 0, ldda, du, -ldda, dv, -ldda); }
0cc2a45af5a24255bbdad717b9f9c567e19e345b.cu
/* -- MAGMA (version 2.2.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2016 @generated from magmablas/zgerbt_func_batched.cu, normal z -> c, Sun Nov 20 20:20:31 2016 @author Adrien Remy @author Azzam Haidar */ #include "magma_internal.h" #include "cgerbt.h" #define block_height 32 #define block_width 4 #define block_length 256 #define NB 64 /***************************************************************************//** Purpose ------- CPRBT_MVT compute B = UTB to randomize B Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = du*db @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mtv_batched( magma_int_t n, magmaFloatComplex *du, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid( magma_ceildiv( n, 4*block_length ), batchCount); magmablas_capply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n, db_array, 0); magmablas_capply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, du, n+n/2, db_array, n/2); threads = block_length; grid = magma_ceildiv( n, 2*block_length ); magmablas_capply_transpose_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, du, 0, db_array, 0); } /***************************************************************************//** Purpose ------- CPRBT_MV compute B = VB to obtain the non randomized solution Arguments --------- @param[in] n INTEGER The number of values of db. n >= 0. @param[in,out] db COMPLEX array, dimension (n) The n vector db computed by CGESV_NOPIV_GPU On exit db = dv*db @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_mv_batched( magma_int_t n, magmaFloatComplex *dv, magmaFloatComplex **db_array, magma_int_t batchCount, magma_queue_t queue) { magma_int_t threads = block_length; dim3 grid ( magma_ceildiv( n, 2*block_length ), batchCount); magmablas_capply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n, dv, 0, db_array, 0); threads = block_length; grid = magma_ceildiv( n, 4*block_length ); magmablas_capply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n, db_array, 0); magmablas_capply_vector_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dv, n+n/2, db_array, n/2); } /***************************************************************************//** Purpose ------- CPRBT randomize a square general matrix using partial randomized transformation Arguments --------- @param[in] n INTEGER The number of columns and rows of the matrix dA. n >= 0. @param[in,out] dA COMPLEX array, dimension (n,ldda) The n-by-n matrix dA On exit dA = duT*dA*d_V @param[in] ldda INTEGER The leading dimension of the array dA. LDA >= max(1,n). @param[in] du COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix U @param[in] dv COMPLEX array, dimension (n,2) The 2*n vector representing the random butterfly matrix V @param[in] queue magma_queue_t Queue to execute in. *******************************************************************************/ extern "C" void magmablas_cprbt_batched( magma_int_t n, magmaFloatComplex **dA_array, magma_int_t ldda, magmaFloatComplex *du, magmaFloatComplex *dv, magma_int_t batchCount, magma_queue_t queue) { du += ldda; dv += ldda; dim3 threads(block_height, block_width); dim3 grid( magma_ceildiv( n, 4*block_height ), magma_ceildiv( n, 4*block_width ), batchCount ); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, 0, ldda, du, 0, dv, 0); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2, ldda, du, 0, dv, n/2); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, n/2, ldda, du, n/2, dv, 0); magmablas_celementary_multiplication_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>(n/2, dA_array, ldda*n/2+n/2, ldda, du, n/2, dv, n/2); dim3 threads2(block_height, block_width); dim3 grid2( magma_ceildiv( n, 2*block_height ), magma_ceildiv( n, 2*block_width ), batchCount ); magmablas_celementary_multiplication_kernel_batched<<< grid2, threads2, 0, queue->cuda_stream() >>>(n, dA_array, 0, ldda, du, -ldda, dv, -ldda); }
c57fdc34543c485b4d0470139925b34a870a3783.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 ssymv_upper.cu is nearly identical to ssymv_upper.cu, just change names and drop MAGMA_S_CONJ. ssymv_kernel_U (upper) in ssymv_upper.cu is very similar to ssymv_kernel_L (lower) in ssymv.cu; diff the two files to compare. @generated from magmablas/zhemv_upper.cu, normal z -> s, Mon Jun 25 18:24:12 2018 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_s.h" #define PRECISION_s #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void ssymv_kernel_U( int n, float const * __restrict__ A, int lda, float const * __restrict__ x, int incx, float * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); float psum, psum_t; float total = MAGMA_S_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ float sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ float sx_blk[NB_X]; // for x[ blk ] __shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag float rA[4]; float psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_S_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_S_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_S_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_S_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_S_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_S_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_S_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end ssymv_kernel_U /***************************************************************************//** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] *******************************************************************************/ __global__ void ssymv_kernel_U_sum( int n, float alpha, int lda, float beta, float * __restrict__ y, int incy, float const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; float Ax = MAGMA_S_ZERO; for (int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
c57fdc34543c485b4d0470139925b34a870a3783.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 ssymv_upper.cu is nearly identical to ssymv_upper.cu, just change names and drop MAGMA_S_CONJ. ssymv_kernel_U (upper) in ssymv_upper.cu is very similar to ssymv_kernel_L (lower) in ssymv.cu; diff the two files to compare. @generated from magmablas/zhemv_upper.cu, normal z -> s, Mon Jun 25 18:24:12 2018 @author Mark Gates */ #include "magma_internal.h" #include "commonblas_s.h" #define PRECISION_s #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /***************************************************************************//** Upper case, compute block multiply, work = A*x, for any size n: [ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ] The order is different from the lower case, because the upper case processes a block row from the diagonal to the right, whereas the lower case processes a block row from the diagonal to the left. Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. *******************************************************************************/ __global__ void ssymv_kernel_U( int n, float const * __restrict__ A, int lda, float const * __restrict__ x, int incx, float * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); float psum, psum_t; float total = MAGMA_S_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ float sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ float sx_blk[NB_X]; // for x[ blk ] __shared__ float sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag float rA[4]; float psums_t[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx_blk x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_blk[tx] = x[0]; } else { sx_blk[tx] = MAGMA_S_ZERO; } } // -------------------- // move to block row work += blk*lda; // work is work(0, blk) A += blk_ind; // A is A(blk_ind, 0) A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2) // move to 32x32 diag block A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_S_ZERO; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2) } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_S_ZERO; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying upper to lower triangle #pragma unroll for (int j=ty2*4; j < ty2*4 + 4; j++) { if ( j > tx2 ) { sA32(j, tx2) = MAGMA_S_CONJ( sA32(tx2, j) ); } } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - (tx2) + (partial - 1); } #pragma unroll for (int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } else { sA32(tx2, ty2 + j) = MAGMA_S_ZERO; } } if ( tx2 >= partial ) { A = A + (tx2) - (partial - 1); } } else { #pragma unroll for (int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum += MAGMA_S_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum_t = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum_t; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 64x64 block right of diag in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 ) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X ) A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty) // Unlike lower case, don't adjust A here for partial # of rows. // Since block is right of diagonal, it must have all NB rows, // but can have < NB columns, dealt with when loading below. x -= blk_ind*incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; // cycle over blocks jj right of diagonal, in block row blk for (int jj=blk+1; jj < gridDim.x; ++jj) { partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0); // load 64x1 block x(jj_ind + 0:63) into sx_jj if ( ty == 0 ) { if ( partial == 0 || tx < partial ) { sx_jj[tx] = x[jj*NB_X*incx]; } else { sx_jj[tx] = MAGMA_S_ZERO; } } __syncthreads(); for (int k=0; k < 4; k++) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 if ( partial ) { #pragma unroll for (int j=0; j < 4; j++) { if ( 4*ty + j + k*quarter_NB_X < partial ) { rA[j] = A[j*lda]; } else { rA[j] = MAGMA_S_ZERO; } } } else { #pragma unroll for (int j=0; j < 4; j++) { rA[j] = A[j*lda]; } } // 1) multiply 64x16 block A_{blk,jj} * x_jj // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply 16x64 block A_{blk,jj} * x_blk, // storing each product Aji*xi to sA(j,i) #pragma unroll for (int j=0; j < 4; j++) { total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj sA16(ty*4 + j, tx) = MAGMA_S_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum_t = MAGMA_S_ZERO; #pragma unroll for (int j=0; j < 4; j++) { psum_t += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums of transposed result, y_jj (locally) psums_t[k] = psum_t; // move right to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty) } // already at next 64x64 block // A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty) // store partial row sums of transposed result, y_jj #pragma unroll for (int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k]; } __syncthreads(); // sum up partial row sums of transposed result, y_jj, and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) { int ty4_nb4 = ty4*quarter_NB_X; psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4) + sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4) + sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4) + sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4) + sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4) + sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4) + sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4) + sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4); work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } // store row sums sA16(ty, tx) = total; __syncthreads(); partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); // sum up final total, y_blk, for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } // end ssymv_kernel_U /***************************************************************************//** Upper case, sum up final results Each block sums one block row; each thread sums one row. On input (for 3 blocks): [ (A11*x1 + A12*x2 + A13*x3) --- --- ] work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] [ (A13^H*x1) (A23^H*x2) (A33*x3) ] On output: [ (A11*x1 + A12*x2 + A13*x3) ] y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y [ (A13^H*x1) + (A23^H*x2) + (A33*x3) ] *******************************************************************************/ __global__ void ssymv_kernel_U_sum( int n, float alpha, int lda, float beta, float * __restrict__ y, int incy, float const * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; // Don't write outside [0, ..., n) if ( ind < n ) { work += ind; float Ax = MAGMA_S_ZERO; for (int j = 0; j <= blk; ++j) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } }
f9b7e7d58970b0791ada83c56b15778d3ff98226.hip
// !!! This is a file automatically generated by hipify!!! #include <fstream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hiprand/hiprand_kernel.h> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <malloc.h> #include <math.h> #include <iomanip> #include <afx.h> #include <time.h> #include"calRHS.h" #include"sor3D.h" #include"initialIntegration.h" #include "io.h" #include <list> #include <vector> using namespace std; #define weight1 0 #define weight2 0 #define weight 0.8 #define zero 1e-7 #define PI 3.1415926535897932384626433832795 using namespace std; using std::vector; //Get all the files in filepath // Returns false on success, true on error // Return true if the folder exists, false otherwise bool folderExists(CString folderName) //Examine if a folder exist { std::string s = CT2A(folderName); if (_access(s.c_str(), 0) == -1) { //File not found return false; } DWORD attr = GetFileAttributes(folderName); if (!(attr & FILE_ATTRIBUTE_DIRECTORY)) { // File is not a directory return false; } return true; } bool createFolder(CString folderName) { list<std::string> folderLevels; // char* c_str = (char*)folderName.c_str(); if (folderExists(folderName)) { return false; } else { CreateDirectory((LPCTSTR)folderName, NULL); return true; } /*Point to end of the string char* strPtr = &c_str[strlen(c_str) - 1]; // Create a list of the folders which do not currently exist do { if (folderExists(c_str)) { break; } // Break off the last folder name, store in folderLevels list do { strPtr--; } while ((*strPtr != '\\') && (*strPtr != '/') && (strPtr >= c_str)); folderLevels.push_front(string(strPtr + 1)); strPtr[1] = 0; } while (strPtr >= c_str); if (_chdir(c_str)) { return true; } // Create the folders iteratively for (list<std::string>::iterator it = folderLevels.begin(); it != folderLevels.end(); it++) { if (CreateDirectory((LPCTSTR)it->c_str(), NULL) == 0) { return true; } _chdir(it->c_str()); } return false;*/ } void getFiles(string path, vector<string> & f) { FILE* pipe = NULL; string pCmd = "dir /B /S " + string(path); string tmp; char buf[1024]; if (NULL == (pipe = _popen(pCmd.c_str(), "rt"))) { cout << "Shit" << endl; return; } while (!feof(pipe)) { if (fgets(buf, 1024, pipe) != NULL) { tmp = string(buf); tmp.erase(tmp.find_last_not_of("\n") + 1); f.push_back(tmp); } } _pclose(pipe); } __device__ __host__ void ntoijk(long Xsize, long Ysize, long Zsize, long nout, int* i, int*j, int*k) { int iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } i[0] = iout; j[0] = jout; k[0] = kout; } __device__ __host__ bool crosspoint(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, float k1, float k2, float k3, int* i, int* j, int* k) { int iout, jout, kout; float r, x, y, z; r = 0; x = 0; y = 0; z = 0; bool flag = 0; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (iin >= 0 && iin <= Xsize - 1) { ////four crossing point;y=0;y=max;z=0;z=max; r = (0 - jin) / k2; y = 0; z = kin + k3*r; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//cross y=0; { iout = iin; jout = 0; kout = floor(z + 0.5); flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; z = kin + k3*r; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//y=max; { iout = iin; jout = Ysize - 1; kout = floor(z + 0.5); flag = 1; } r = (0 - kin) / k3; z = 0; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0)//z=0; { iout = iin; jout = floor(y + 0.5); kout = 0; flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = iin; jout = floor(y + 0.5); kout = Zsize - 1; flag = 1; } } if (iin == Xsize - 1 || iin == 0) { int jout1 = jin; int kout1 = kin; r = (0 - jin) / k2; y = 0; float z = kin + k3*r; bool flag2 = 0; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//cross y=0; { if (flag2 == 0){ iout = iin; jout = 0; kout = floor(z + 0.5); flag2 = 1; } else { iout = iin; jout1 = 0; kout1 = floor(z + 0.5); } flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; z = kin + k3*r; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//y=max; { if (flag2 == 0) { iout = iin; jout = Ysize - 1; kout = floor(z + 0.5); flag2 = 1; } else { iout = iin; jout1 = Ysize - 1; kout1 = floor(z + 0.5); } flag = 1; } r = (0 - kin) / k3; z = 0; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0)//z=0; { if (flag2 == 0) { iout = iin; jout = floor(y + 0.5); kout = 0; flag2 = 1; } else { iout = iin; jout1 = floor(y + 0.5); kout1 = 0; } flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = iin; jout = floor(y + 0.5); kout = Zsize - 1; flag2 = 1; } else { iout = iin; jout1 = floor(y + 0.5); kout1 = Zsize - 1; } flag = 1; } if ((jout1 - jin)*(jout1 - jin) + (kout1 - kin)*(kout1 - kin)>(jout - jin)*(jout - jin) + (kout - kin)*(kout - kin)) { jout = jout1; kout = kout1; } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (jin >= 0 && jin <= Ysize - 1) { ////four crossing point r = (0 - iin) / k1; x = 0; z = kin + k3*r;//x=0; if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = 0; jout = jin; kout = floor(z + 0.5); flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; z = kin + k3*r;//x=max if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = Xsize - 1; jout = jin; kout = floor(z + 0.5); flag = 1; } r = (0 - kin) / k3; z = 0; x = iin + k1*r;//z=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { iout = floor(x + 0.5); jout = jin; kout = 0; flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; x = iin + k1*r;//z=max; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { iout = floor(x + 0.5); jout = jin; kout = Zsize - 1; flag = 1; } } if (jin == 0 || jin == Ysize - 1) { int iout1 = iin; int kout1 = kin; bool flag2 = 0; r = (0 - iin) / k1; x = 0; z = kin + k3*r;//x=0; if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = 0; jout = jin; kout = floor(z + 0.5); flag2 = 1; } else { iout1 = 0; jout = jin; kout1 = floor(z + 0.5); } flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; z = kin + k3*r;//x=max if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = Xsize - 1; jout = jin; kout = floor(z + 0.5); flag2 = 1; } else { iout1 = Xsize - 1; jout = jin; kout1 = floor(z + 0.5); } flag = 1; } r = (0 - kin) / k3; z = 0; x = iin + k1*r;//z=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { if (flag2 == 0) { iout = floor(x + 0.5); jout = jin; kout = 0; flag2 = 1; } else { iout1 = int(x + 0.5); jout = jin; kout1 = 0; } flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; x = iin + k1*r;//z=max; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { if (flag2 == 0) { iout = floor(x + 0.5); jout = jin; kout = Zsize - 1; flag2 = 1; } else { iout1 = floor(x + 0.5); jout = jin; kout1 = Zsize - 1; } flag = 1; } if ((iout1 - iin)*(iout1 - iin) + (kout1 - kin)*(kout1 - kin)>(iout - iin)*(iout - iin) + (kout - kin)*(kout - kin)) { iout = iout1; kout = kout1; } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (kin >= 0 && kin <= Zsize - 1) { ////four crossing point r = (0 - iin) / k1; x = 0; y = jin + k2*r;//x=0; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = 0; jout = floor(y + 0.5); kout = kin; flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; y = jin + k2*r;//x=max; if (y <= Ysize - 1 && y >= 0 && r != 0 && flag == 0) { iout = Xsize - 1; jout = floor(y + 0.5); kout = kin; flag = 1; } r = (0 - jin) / k2; y = 0; x = iin + k1*r;//y=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { iout = floor(x + 0.5); jout = 0; kout = kin; flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; x = iin + k1*r;//y=max; if (x <= Xsize - 1 && x >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = Ysize - 1; kout = kin; flag = 1; } } if (kin == 0 || kin == Zsize - 1) { int iout1 = iin; int jout1 = jin; bool flag2 = 0; r = (0 - iin) / k1; x = 0; y = jin + k2*r;//x=0; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = 0; jout = floor(y + 0.5); kout = kin; flag2 = 1; } else { iout1 = 0; jout1 = floor(y + 0.5); kout = kin; } flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; y = jin + k2*r;//x=max; if (y <= Ysize - 1 && y >= 0 && r != 0 && flag == 0) { if (flag2 == 0) { iout = Xsize - 1; jout = floor(y + 0.5); kout = kin; flag2 = 1; } else { iout1 = Xsize - 1; jout1 = floor(y + 0.5); kout = kin; } flag = 1; } r = (0 - jin) / k2; y = 0; x = iin + k1*r;//y=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { if (flag == 0) { iout = floor(x + 0.5); jout = 0; kout = kin; flag2 = 1; } else { iout1 = floor(x + 0.5); jout1 = 0; kout = kin; } flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; x = iin + k1*r;//y=max; if (x <= Xsize - 1 && x >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = floor(x + 0.5); jout = Ysize - 1; kout = kin; flag2 = 1; } else { iout1 = floor(x + 0.5); jout1 = Ysize - 1; kout = kin; } flag = 1; } if ((iout1 - iin)*(iout1 - iin) + (jout1 - jin)*(jout1 - jin)>(iout - iin)*(iout - iin) + (jout - jin)*(jout - jin)) { iout = iout1; jout = jout1; } } } ///case 4, vertical to plane IJ if (k1 == 0 && k2 == 0 && k3 != 0 && flag == 0) { if (iin <= Xsize - 1 && iin >= 0 && jin <= Ysize - 1 && jin >= 0) { iout = iin; jout = jin; if (kin<Zsize / 2) { kout = Zsize - 1; } else { kout = 0; } flag = 1; } } ///case 5, vertical to IK plane if (k1 == 0 && k2 != 0 && k3 == 0 && flag == 0) { if (iin >= 0 && iin <= Xsize - 1 && kin >= 0 && kin <= Zsize - 1) { iout = iin; kout = kin; if (jin<Ysize / 2) { jout = Ysize - 1; } else { jout = 0; } flag = 1; } } ///case 6, vertical to JK plane if (k1 != 0 && k2 == 0 && k3 == 0 && flag == 0) { if (jin >= 0 && jin <= Ysize - 1 && kin >= 0 && kin <= Zsize - 1) { jout = jin; kout = kin; if (iin<Xsize / 2) { iout = Xsize - 1; } else { iout = 0; } flag = 1; } } /// case 7, purely inclined if (k1 != 0 && k2 != 0 && k3 != 0 && flag == 0) { /// six crossing point r = (0 - iin) / k1; x = 0; y = jin + k2*r; z = kin + k3*r;//x=0 if (y <= Ysize - 1 && y >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = 0; jout = floor(y + 0.5); kout = floor(z + 0.5); flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; y = jin + k2*r; z = kin + k3*r;//x=max if (y <= Ysize - 1 && y >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = Xsize - 1; jout = floor(y + 0.5); kout = floor(z + 0.5); flag = 1; } r = (0 - jin) / k2; x = iin + k1*r; y = 0; z = kin + k3*r;//y=0; if (x <= Xsize - 1 && x >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = 0; kout = floor(z + 0.5); flag = 1; } r = (Ysize - 1 - jin) / k2; x = iin + k1*r; y = Ysize - 1; z = kin + k3*r;//y=max if (x <= Xsize - 1 && x >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = Ysize - 1; kout = floor(z + 0.5); flag = 1; } r = (0 - kin) / k3; x = iin + k1*r; y = jin + k2*r; z = 0;//z=0; if (x <= Xsize - 1 && x >= 0 && y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = floor(y + 0.5); kout = 0; flag = 1; } r = (Zsize - 1 - kin) / k3; x = iin + k1*r; y = jin + k2*r; z = Zsize - 1;//z=max if (x <= Xsize - 1 && x >= 0 && y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = floor(y + 0.5); kout = Zsize - 1; flag = 1; } } if (flag == 1) { i[0] = iout; j[0] = jout; k[0] = kout; } else { i[0] = iin; j[0] = jin; k[0] = kin; } return flag; } __device__ __host__ bool cross2point(long Xsize, long Ysize, long Zsize, int *iin, int *jin, int *kin, float xin, float yin, float zin, float k1, float k2, float k3, int* iout, int* jout, int* kout) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; iin[0] = Xsize; jin[0] = Ysize; kin[0] = Zsize; iout[0] = Xsize; jout[0] = Ysize; kout[0] = Zsize; // printf("%f %f %f %f",xin,yin,zin,sqrt(xin*xin+yin*yin+zin*zin)); if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin>-center_x&&xin<center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z&&flag == 0)//cross y=0; { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = 0; kin[0] = floor(z1 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = 0; kout[0] = floor(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = Ysize - 1; kin[0] = floor(z2 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = Ysize - 1; kout[0] = floor(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = floor(y3 + center_y + 0.5); kin[0] = 0; } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = floor(y3 + center_y + 0.5); kout[0] = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = floor(y4 + center_y + 0.5); kin[0] = Zsize - 1; } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = floor(y4 + center_y + 0.5); kout[0] = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout[0] - jin[0])*k2 + (kout[0] - kin[0])*k3<0) { int temp; temp = jin[0]; jin[0] = jout[0]; jout[0] = temp; temp = kin[0]; kin[0] = kout[0]; kout[0] = temp; } } return true; } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin>-center_y&&yin<center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin[0] = 0; jin[0] = floor(yin + center_y + 0.5); kin[0] = floor(z1 + center_z + 0.5); } if (flag == 1) { iout[0] = 0; jout[0] = floor(yin + center_y + 0.5); kout[0] = floor(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin[0] = Xsize - 1; jin[0] = floor(yin + center_y + 0.5); kin[0] = floor(z2 + center_z + 0.5); } if (flag == 1) { iout[0] = Xsize - 1; jout[0] = floor(yin + center_y + 0.5); kout[0] = floor(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin[0] = floor(x3 + center_x + 0.5); jin[0] = floor(yin + center_y + 0.5); kin[0] = 0; } if (flag == 1) { iout[0] = floor(x3 + center_x + 0.5); jout[0] = floor(yin + center_y + 0.5); kout[0] = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin[0] = floor(x4 + center_x + 0.5); jin[0] = floor(yin + center_y + 0.5); kin[0] = Zsize - 1; } if (flag == 1) { iout[0] = floor(x4 + center_x + 0.5); jout[0] = floor(yin + center_y + 0.5); kout[0] = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout[0] - iin[0])*k1 + (kout[0] - kin[0])*k3<0) { int temp; temp = iin[0]; iin[0] = iout[0]; iout[0] = temp; temp = kin[0]; kin[0] = kout[0]; kout[0] = temp; } } return true; } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin>-center_z&&zin<center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin[0] = 0; jin[0] = floor(y1 + center_y + 0.5); kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = 0; jout[0] = floor(y1 + center_y + 0.5); kout[0] = floor(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin[0] = Xsize - 1; jin[0] = floor(y2 + center_y + 0.5); kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = Xsize - 1; jout[0] = floor(y2 + center_y + 0.5); kout[0] = floor(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin[0] = floor(x3 + center_x + 0.5); jin[0] = 0; kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x3 + center_x + 0.5); jout[0] = 0; kout[0] = floor(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin[0] = floor(x4 + center_x + 0.5); jin[0] = Ysize - 1; kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x4 + center_x + 0.5); jout[0] = Ysize - 1; kout[0] = floor(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout[0] - iin[0])*k1 + (jout[0] - jin[0])*k2<0) { int temp; temp = iin[0]; iin[0] = iout[0]; iout[0] = temp; temp = jin[0]; jin[0] = jout[0]; jout[0] = temp; } } return true; } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin<center_x&&xin>-center_x&&yin<center_y&&yin>-center_y) { iin[0] = floor(xin + center_x + 0.5); iout[0] = iin[0]; jin[0] = floor(yin + center_y + 0.5); jout[0] = jin[0]; if (k3>0) { kin[0] = 0; kout[0] = Zsize - 1; } else{ kin[0] = Zsize - 1; kout[0] = 0; } return true; } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin>-center_x&&xin<center_x&&zin>-center_z&&zin<center_z) { iin[0] = floor(xin + center_x + 0.5); iout[0] = iin[0]; kin[0] = floor(zin + center_z + 0.5); kout[0] = kin[0]; if (k2>0) { jout[0] = Ysize - 1; jin[0] = 0; } else { jin[0] = Ysize - 1; jout[0] = 0; } return true; } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin>-center_y&&yin<center_y&&zin>-center_z&&zin<center_z) { jin[0] = floor(yin + center_y + 0.5); jout[0] = jin[0]; kin[0] = floor(zin + center_z + 0.5); kout[0] = kin[0]; if (k1>0) { iout[0] = Xsize - 1; iin[0] = 0; } else { iin[0] = Xsize - 1; iout[0] = 0; } } return true; } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin[0] = 0; jin[0] = floor(y1 + center_y + 0.5); kin[0] = floor(z1 + center_z + 0.5); } if (flag == 1) { iout[0] = 0; jout[0] = floor(y1 + center_y + 0.5); kout[0] = floor(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin[0] = Xsize - 1; jin[0] = floor(y2 + center_y + 0.5); kin[0] = floor(z2 + center_z + 0.5); } if (flag == 1) { iout[0] = Xsize - 1; jout[0] = floor(y2 + center_y + 0.5); kout[0] = floor(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin[0] = floor(x3 + center_x + 0.5); jin[0] = 0; kin[0] = floor(z3 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x3 + center_x + 0.5); jout[0] = 0; kout[0] = floor(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin[0] = floor(x4 + center_x + 0.5); jin[0] = Ysize - 1; kin[0] = floor(z4 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x4 + center_x + 0.5); jout[0] = Ysize - 1; kout[0] = floor(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin[0] = floor(x5 + center_x + 0.5); jin[0] = floor(y5 + center_y + 0.5); kin[0] = 0; } if (flag == 1) { iout[0] = floor(x5 + center_x + 0.5); jout[0] = floor(y5 + center_y + 0.5); kout[0] = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin[0] = floor(x6 + center_x + 0.5); jin[0] = floor(y6 + center_y + 0.5); kin[0] = Zsize - 1; } if (flag == 1) { iout[0] = floor(x6 + center_x + 0.5); jout[0] = floor(y6 + center_y + 0.5); kout[0] = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout[0] - iin[0])*k1 + (jout[0] - jin[0])*k2 + (kout[0] - kin[0])*k3<0) { int temp; temp = iin[0]; iin[0] = iout[0]; iout[0] = temp; temp = jin[0]; jin[0] = jout[0]; jout[0] = temp; temp = kin[0]; kin[0] = kout[0]; kout[0] = temp; } return true; } return false; } __device__ __host__ bool crosspoint2d(long Xsize, long Ysize, int iin, int jin, float k1, float k2, int *i, int *j) { int iout, jout; bool flag = 0; if (k1 == 0 && k2 != 0) { iout = iin; if (jin == 0) { jout = Ysize - 1; } else { jout = 0; } if (iout == 0 || iout == Xsize - 1) { if (jin<Ysize / 2) { jout = Ysize - 1; } else { jout = 0; } } flag = 1; } if (k1 != 0 && k2 == 0) { jout = jin; if (iin == 0) { iout = Xsize - 1; } else { iout = 0; } if (jout == 0 || jout == Ysize - 1) { if (iin<Xsize / 2) { jout = Xsize - 1; } else { jout = 0; } } flag = 1; } if (k1 != 0 && k2 != 0) { float r, x, y; r = (0 - iin) / k1; y = k2*r + jin; if (y >= 0 && y <= Ysize - 1 && r != 0 && flag == 0) { iout = 0; jout = int(y + 0.5); flag = 1; } r = (Xsize - 1 - iin) / k1; y = k2*r + jin; if (y >= 0 && y <= Ysize - 1 && r != 0 && flag == 0) { iout = Xsize - 1; jout = int(y + 0.5); flag = 1; } r = (0 - jin) / k2; x = k1*r + iin; if (x >= 0 && x <= Xsize - 1 && r != 0 && flag == 0) { jout = 0; iout = int(x + 0.5); flag = 1; } r = (Ysize - 1 - jin) / k2; x = k1*r + iin; if (x >= 0 && x <= Xsize - 1 && r != 0 && flag == 0) { jout = Ysize - 1; iout = int(x + 0.5); flag = 1; } } if (flag == 1) { i[0] = iout; j[0] = jout; } return flag; } __device__ __host__ void ntoij2d(long Xsize, long Ysize, int nin, int *i, int *j) { int iin, jin; if (nin <= Xsize - 1) { iin = nin; jin = 0; } if (nin>Xsize - 1 && nin <= Xsize + Ysize - 2) { iin = Xsize - 1; jin = nin - (Xsize - 1); } if (nin>Xsize + Ysize - 2 && nin <= 2 * Xsize + Ysize - 3) { iin = Xsize - 1 - (nin - (Xsize + Ysize - 2)); jin = Ysize - 1; } if (nin>2 * Xsize + Ysize - 3) { iin = 0; jin = Ysize - 1 - (nin - (2 * Xsize + Ysize - 3)); } i[0] = iin; j[0] = jin; } __device__ __host__ void ij2dton(long Xsize, long Ysize, int *n, int i, int j) { if (j == 0) { n[0] = i; } if (i == Xsize - 1) { n[0] = i + j; } if (j == Ysize - 1) { n[0] = Xsize - 1 + Ysize - 1 + (Xsize - 1 - i); } if (i == 0 && j != 0) { n[0] = Xsize - 1 + Ysize - 1 + Xsize - 1 + (Ysize - 1 - j); } } __device__ __host__ float bodyIntegralFromCenter(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float k1, k2, k3; ilast = iin; jlast = jin; klast = kin; k1 = iout - jin; k2 = jout - jin; k3 = kout - kin; //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, x, y, z; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegral(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path //why we are not following the real integration path generated???????? float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; //pcountinner[inext1+jnext1*Xsize+knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; //pcountinner[inext2+jnext2*Xsize+knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; //pcountinner[inext3+jnext3*Xsize+knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralWeighted(long Xsize, long Ysize, long Zsize, int nin, int nout, int n, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float* pcountinner, float* pint, float*pcount, float*pweight) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pinttmp = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path //why we are not following the real integration path generated???????? float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pinttmp += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; counttmp++; //pcountinner[inext1+jnext1*Xsize+knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pinttmp += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; counttmp++; //pcountinner[inext2+jnext2*Xsize+knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pinttmp += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; counttmp++; //pcountinner[inext3+jnext3*Xsize+knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); curltmp = curltmp / counttmp; if (curltmp != 0) { pweight[nin + nout*n] += 1 / curltmp; pcount[nin + nout*n]++; pint[nin + nout*n] += pinttmp; } return pinttmp; } __device__ __host__ float bodyIntegralWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int nin, int nout, int n, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float* pcountinner, float* pint, float*pcount) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pinttmp = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path //why we are not following the real integration path generated???????? float r, d1, d2, d3, xt, yt, zt; /*r=k1*inext1-x*k1+k2*jnext1-k2*y+k3*knext1-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d1=sqrt((xt-inext1)*(xt-inext1)+(yt-jnext1)*(yt-jnext1)+(zt-knext1)*(zt-knext1)); r=k1*inext2-x*k1+k2*jnext2-k2*y+k3*knext2-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d2=sqrt((xt-inext2)*(xt-inext2)+(yt-jnext2)*(yt-jnext2)+(zt-knext2)*(zt-knext2)); r=k1*inext3-x*k1+k2*jnext3-k2*y+k3*knext3-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d3=sqrt((xt-inext3)*(xt-inext3)+(yt-jnext3)*(yt-jnext3)+(zt-knext3)*(zt-knext3));*/ //////End of calculation distance/////////////// ///***calculation of curl in three directions***////////////////// d1 = 1e10; d2 = 1e10; d3 = 1e10; if (inext1 + jnext1*Xsize + knext1*Xsize*Ysize >= 0 && inext1 + jnext1*Xsize + knext1*Xsize*Ysize<Xsize*Ysize*Zsize) { d1 = curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; } if (inext2 + jnext2*Xsize + knext2*Xsize*Ysize >= 0 && inext2 + jnext2*Xsize + knext2*Xsize*Ysize<Xsize*Ysize*Zsize) { d2 = curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; } if (inext3 + jnext3*Xsize + knext3*Xsize*Ysize >= 0 && inext3 + jnext3*Xsize + knext3*Xsize*Ysize<Xsize*Ysize*Zsize) { d3 = curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; } //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pinttmp += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; counttmp++; //pcountinner[inext1+jnext1*Xsize+knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pinttmp += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; counttmp++; //pcountinner[inext2+jnext2*Xsize+knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pinttmp += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; counttmp++; //pcountinner[inext3+jnext3*Xsize+knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); curltmp = curltmp / counttmp; if (curltmp != 0) { pcount[nin + nout*n] += 1 / curltmp; pint[nin + nout*n] += pinttmp; } return pinttmp; } __device__ __host__ float bodyIntegralInner(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; pn[ilast + jlast*Xsize + klast*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize]; pcountinner[ilast + jlast*Xsize + klast*Xsize*Ysize] += 1; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerStepCount(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float*pcountinner, long*IntegrationStep) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; int steps = 0; pn[ilast + jlast*Xsize + klast*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize]; pcountinner[ilast + jlast*Xsize + klast*Xsize*Ysize] += 1; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } else { steps++; IntegrationStep[ilast + jlast*Xsize + klast*Xsize*Ysize] += steps; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerWeighted(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1 < Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]/10000; counttmp++; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); ilast = inext1; if (curltmp != 0) { pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); if (curltmp != 0) { pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]/10000; counttmp++; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); klast = knext3; if(curltmp != 0) { pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerWeightedFixedBC(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1 < Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); ilast = inext1; if (curltmp != 0) { pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 > 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); if (curltmp != 0) { pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); klast = knext3; if (curltmp != 0) { pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerSelect(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float *curl, float*pcountinner, float threshold) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; bool outthreshold = 0; float p0=0; bool flag_p0set = 0; if (curl[iin + jin*Xsize + kin*Xsize*Ysize] == 0 || curl[iin + jin*Xsize + kin*Xsize*Ysize]>threshold) { flag_p0set = 0; } else { p0 = p[iin + jin*Xsize + kin*Xsize*Ysize]; flag_p0set = 1; curltmp = 0; counttmp = 0; } //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { if (flag_p0set == 0) { if (curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]<threshold) { p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]!=0) { //passing through low error zone pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]/10000; counttmp++; pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] >= threshold || curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]==0) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint); //pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } ilast = inext1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { if (flag_p0set == 0) { if (curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]!=0) { //passing through low error zone pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]/10000; counttmp++; pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint); //pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { if (flag_p0set == 0) { if (curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] !=0) { //passing through low error zone pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]/10000; counttmp++; pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint); //pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]!= 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; pint = 0; outthreshold = 0; } } klast = knext3; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } return 0; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerSelectFixedBC(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float *curl, float*pcountinner, float threshold) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; bool outthreshold = 0; float p0 = 0; bool flag_p0set = 0; if (curl[iin + jin*Xsize + kin*Xsize*Ysize] == 0 || curl[iin + jin*Xsize + kin*Xsize*Ysize]>threshold) { flag_p0set = 0; } else { p0 = p[iin + jin*Xsize + kin*Xsize*Ysize]; flag_p0set = 1; curltmp = 0; counttmp = 0; } //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 < d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize &&jnext1 != 0) { if (flag_p0set == 0) { if (curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]<threshold) { p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0) { //passing through low error zone pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] / 10000; counttmp++; pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] >= threshold || curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] == 0) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint); //pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } ilast = inext1; flag = 1; } if (d2 <= d1&&d2 <= d3&&jnext2 > 0 && jnext2<Ysize) { if (flag_p0set == 0) { if (curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { //passing through low error zone pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] / 10000; counttmp++; pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint); //pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } jlast = jnext2; flag = 1; } if (d3 < d1 && d3 < d2 && knext3 >= 0 && knext3<Zsize && jnext3 != 0) { if (flag_p0set == 0) { if (curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { //passing through low error zone pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] / 10000; counttmp++; pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint); //pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; pint = 0; outthreshold = 0; } } klast = knext3; flag = 1; } /*if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } return 0; }*/ } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerMiniCurl(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; /*r=k1*inext1-x*k1+k2*jnext1-k2*y+k3*knext1-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d1=sqrt((xt-inext1)*(xt-inext1)+(yt-jnext1)*(yt-jnext1)+(zt-knext1)*(zt-knext1)); r=k1*inext2-x*k1+k2*jnext2-k2*y+k3*knext2-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d2=sqrt((xt-inext2)*(xt-inext2)+(yt-jnext2)*(yt-jnext2)+(zt-knext2)*(zt-knext2)); r=k1*inext3-x*k1+k2*jnext3-k2*y+k3*knext3-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d3=sqrt((xt-inext3)*(xt-inext3)+(yt-jnext3)*(yt-jnext3)+(zt-knext3)*(zt-knext3));*/ //////End of calculation distance/////////////// ///***calculation of curl in three directions***////////////////// d1 = 1e10; d2 = 1e10; d3 = 1e10; if (inext1 + jnext1*Xsize + knext1*Xsize*Ysize >= 0 && inext1 + jnext1*Xsize + knext1*Xsize*Ysize<Xsize*Ysize*Zsize) { d1 = curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; } if (inext2 + jnext2*Xsize + knext2*Xsize*Ysize >= 0 && inext2 + jnext2*Xsize + knext2*Xsize*Ysize<Xsize*Ysize*Zsize) { d2 = curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; } if (inext3 + jnext3*Xsize + knext3*Xsize*Ysize >= 0 && inext3 + jnext3*Xsize + knext3*Xsize*Ysize<Xsize*Ysize*Zsize) { d3 = curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; } //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; curltmp = curl[ilast + jlast*Xsize + klast*Xsize*Ysize]; counttmp = 1; pn[ilast + jlast*Xsize + klast*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] * 1 / curltmp*counttmp; pcountinner[ilast + jlast*Xsize + klast*Xsize*Ysize] += 1 / curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; /*r=k1*inext1-x*k1+k2*jnext1-k2*y+k3*knext1-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d1=sqrt((xt-inext1)*(xt-inext1)+(yt-jnext1)*(yt-jnext1)+(zt-knext1)*(zt-knext1)); r=k1*inext2-x*k1+k2*jnext2-k2*y+k3*knext2-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d2=sqrt((xt-inext2)*(xt-inext2)+(yt-jnext2)*(yt-jnext2)+(zt-knext2)*(zt-knext2)); r=k1*inext3-x*k1+k2*jnext3-k2*y+k3*knext3-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d3=sqrt((xt-inext3)*(xt-inext3)+(yt-jnext3)*(yt-jnext3)+(zt-knext3)*(zt-knext3));*/ //////End of calculation distance/////////////// ///***calculation of curl in three directions***////////////////// d1 = 1e10; d2 = 1e10; d3 = 1e10; if (inext1 + jnext1*Xsize + knext1*Xsize*Ysize >= 0 && inext1 + jnext1*Xsize + knext1*Xsize*Ysize<Xsize*Ysize*Zsize) { d1 = curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; } if (inext2 + jnext2*Xsize + knext2*Xsize*Ysize >= 0 && inext2 + jnext2*Xsize + knext2*Xsize*Ysize<Xsize*Ysize*Zsize) { d2 = curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; } if (inext3 + jnext3*Xsize + knext3*Xsize*Ysize >= 0 && inext3 + jnext3*Xsize + knext3*Xsize*Ysize<Xsize*Ysize*Zsize) { d3 = curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; } //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; counttmp++; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; counttmp++; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; counttmp++; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInner2(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; x = x + k1*r; y = y + k2*r; z = z + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; x = x + k1*r; y = y + k2*r; z = z + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; x = x + k1*r; y = y + k2*r; z = z + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { float pinttmp = -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pint += pinttmp; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] + pinttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; ilast = inext1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { float pinttmp = -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pint += pinttmp; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] + pinttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { float pinttmp = -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pint += pinttmp; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[ilast + jlast*Xsize + klast*Xsize*Ysize] + pinttmp); pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; klast = knext3; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __global__ void initialIntegration(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* p, float* pn) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long nout = threadIdx.x + blockIdx.x*blockDim.x; while (nout<n) { int iout, jout, kout; ntoijk(Xsize, Ysize, Zsize, nout, &iout, &jout, &kout); p[nout] = bodyIntegralFromCenter(Xsize, Ysize, Zsize, Xsize / 2, Ysize / 2, Zsize / 2, iout, jout, kout, deltx, delty, deltz, density, DuDt, DvDt, DwDt); nout = nout + blockDim.x*gridDim.x; } } __global__ void omni3d(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, int*pcountinner) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long iin, jin, kin, iout, jout, kout, indexin, indexout; long nin = blockDim.x*blockIdx.x + threadIdx.x; long nout = blockDim.y*blockIdx.y + threadIdx.y; while (nin<n&&nout<n) { long iout, jout, kout; long facein, faceout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; faceout = 1; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; faceout = 2; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; faceout = 3; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; faceout = 4; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; faceout = 5; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; faceout = 6; } long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; facein = 1; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; facein = 2; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; facein = 3; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; facein = 4; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; facein = 5; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; facein = 6; } long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; if (nin != nout&&nin >= 0 && nin<n&&nout >= 0 && nout<n) { float k1 = iout - iin; float k2 = jout - jin; float k3 = kout - kin; float l = sqrt(k1*k1 + k2*k2 + k3*k3); k1 = k1 / l; k2 = k2 / l; k3 = k3 / l; //cout<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //cout<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; //log<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //log<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3, x, y, z; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 if (d1 <= d2&&d1 <= d3) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); } nin = nin + blockDim.x*gridDim.x; nout = nout + blockDim.y*gridDim.y; } //////End of calculation of pressure increment//////////////// } __global__ void omni3dparallellinesEqualSpacing(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesEqualSpacingWeighted(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float*pweight, float* pcount, float* pcountinner, float*curl) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegralWeighted(Xsize, Ysize, Zsize, n, nin, nout, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, pcountinner, pint, pcount, pweight); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesEqualSpacingWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner, float*curl) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegralWeightedMiniCurl(Xsize, Ysize, Zsize, n, nin, nout, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, pcountinner, pint, pcount); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesEqualSpacingSelect(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner, float*curl, float threshold) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 4 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<4 && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<4 && kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<4 && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } if (iin >= 0 && iin<Xsize&&jin >= 4 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 4 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } //select iout<iin __global__ void omni3dparallellinesEqualSpacingSelect2(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize && (phi<PI / 4 || phi>3 * PI / 4)) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni2dparallellinesOnFace(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.414 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid&&angle<NoAngles) { float theta = angle / NoAngles * 2 * PI; ///on XY face float k1 = __cosf(theta); float k2 = __sinf(theta); float k3 = 0; float x = __sinf(theta)*(point - NoGrid / 2)*linespacing; float y = __cosf(theta)*(point - NoGrid / 2)*linespacing; float z = -Zsize / 2.0; int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } ///on XY face 2 z = Zsize / 2.0; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void devidecount(long Xsize, long Ysize, long Zsize, float* pint, float* pcount) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid<n*n) { if (pcount[tid]>0) { pint[tid] /= pcount[tid]; } tid += blockDim.x*gridDim.x; } } __global__ void devidecountWeight(long Xsize, long Ysize, long Zsize, float* pint, float* pcount, float*pweight) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid<n*n) { if (pcount[tid]>0) { pint[tid] /= pcount[tid]; pweight[tid] /= pcount[tid]; } tid += blockDim.x*gridDim.x; } } __global__ void omni3dvirtual(long Xsize, long Ysize, long Zsize, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / Zsize / 2; float deltbeta = PI / Xsize / 2; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; indexin = blockDim.x*blockIdx.x + threadIdx.x; float thetain = (indexin / (2 * Zsize))*delttheta; float betain = (blockDim.x*blockIdx.x + threadIdx.x - 2 * Zsize*(indexin / (2 * Zsize)))*deltbeta; indexout = blockDim.y*blockIdx.y + threadIdx.y; float thetaout = (indexout / (2 * Xsize))*delttheta; float betaout = (blockDim.y*blockIdx.y + threadIdx.y - 2 * Xsize*(indexout / (2 * Xsize)))*deltbeta; while (indexin<int(PI / delttheta)*int(PI / deltbeta) * 2 && indexout<int(PI / delttheta)*int(PI / deltbeta) * 2) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z&&flag == 0)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize && (iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { int nin, nout; long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout&&nin<n&&nout<n) { do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize&&jnext1 >= 0 && jnext1<Ysize&&knext1 >= 0 && knext1<Zsize) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3&&inext2<Xsize&&jnext2 >= 0 && jnext2<Ysize&&knext2 >= 0 && knext2<Zsize) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2&&inext3<Xsize&&jnext3 >= 0 && jnext3<Ysize&&knext3 >= 0 && knext3<Zsize) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); pcount[nin + nout*n]++; } } indexin = indexin + blockDim.x*gridDim.x; indexout = indexout + blockDim.y*gridDim.y; } } __global__ void omni3virtualgrid(long Xsize, long Ysize, long Zsize, int NoTheta, int NoBeta, long* index, long* ninvir, long *noutvir, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pintvir) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / NoTheta; float deltbeta = 2 * PI / NoBeta; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; //int n=Xsize*Ysize*2+(Zsize-2)*Ysize*2+(Xsize-2)*(Zsize-2)*2; int iin, jin, kin, iout, jout, kout, indexin, indexout; indexin = blockDim.x*blockIdx.x + threadIdx.x; float thetain = (indexin / (NoBeta))*delttheta; float betain = (blockDim.x*blockIdx.x + threadIdx.x - NoBeta*(indexin / (NoBeta)))*deltbeta; indexout = blockDim.y*blockIdx.y + threadIdx.y; float thetaout = (indexout / (NoBeta))*delttheta; float betaout = (blockDim.y*blockIdx.y + threadIdx.y - NoBeta*(indexout / (NoBeta)))*deltbeta; while (indexin<NoTheta*NoBeta&&indexout<NoTheta*NoBeta) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; } if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if ((iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// ninvir[indexin + indexout*NoTheta*NoBeta] = index[iin + jin*Xsize + kin*Xsize*Ysize]; noutvir[indexin + indexout*NoTheta*NoBeta] = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (d1 <= d2&&d1 <= d3) { pintvir[indexin + indexout*NoTheta*NoBeta] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pintvir[indexin + indexout*NoTheta*NoBeta] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pintvir[indexin + indexout*NoTheta*NoBeta] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); } indexin = indexin + blockDim.x*gridDim.x; indexout = indexout + blockDim.y*gridDim.y; } } __global__ void omni3dvirtual2(long Xsize, long Ysize, long Zsize, long* index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / Zsize / 2; float deltbeta = PI / Xsize / 2; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; float r, d1, d2, d3; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin; indexin = blockDim.x*blockIdx.x + threadIdx.x; float thetain = (indexin / (2 * Zsize) - 1)*delttheta; float betain = (blockDim.x*blockIdx.x + threadIdx.x - 2 * Zsize*(indexin / (2 * Zsize)) - 1)*deltbeta; for (float thetaout = 0; thetaout<PI; thetaout += delttheta) { for (float betaout = 0; betaout<2 * PI; betaout += deltbeta) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; } if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if ((iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// int nin, nout; nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize*kout*Xsize*Ysize]; if (d1 <= d2&&d1 <= d3) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); } } } } __global__ void BCiteration(long Xsize, long Ysize, long Zsize, float* pint, float *pcount, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; pcount[nout] = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; pcount[nout]++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcount[nout]; //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void BCiterationFixedBC(long Xsize, long Ysize, long Zsize, float* pint, float *pcount, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; pcount[nout] = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// /// boundary pressure fixed on the top surface.... if (pint[nin + nout*n] != 0&&jout!=Ysize-1) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; pcount[nout]++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcount[nout]; //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void BCiterationWeighted(long Xsize, long Ysize, long Zsize, float* pint, float *pweight, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; float pcounttmp = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n])*pweight[nin + nout*n]; pcounttmp += pweight[nin + nout*n]; } } if (pcounttmp != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcounttmp; } //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void BCiterationWeightedFixedBC(long Xsize, long Ysize, long Zsize, float* pint, float *pweight, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; float pcounttmp = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0&&jout!=0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n])*pweight[nin + nout*n]; pcounttmp += pweight[nin + nout*n]; } } if (pcounttmp != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcounttmp; } //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void omni3dparallellinesESInner(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerStepCount(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner, long* IntegrationSteps) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerStepCount(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner, IntegrationSteps); } } point += blockDim.x*gridDim.x; } } __global__ void omni3dparallellinesESInnerWeighted(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerWeighted(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerMiniCurl(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerSelect(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner, float threshold) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout < Ysize && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerSelect(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, curl, pcountinner, threshold); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerSelectFixedBC(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner, float threshold) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin>=0&&jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerSelectFixedBC(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, curl, pcountinner, threshold); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerSelect2(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize && (phi<PI / 4 || phi>3 * PI / 4)) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni2dparallellinesOnFaceInner(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.414 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid&&angle<NoAngles) { float theta = angle / NoAngles * 2 * PI; ///on XY face float k1 = __cosf(theta); float k2 = __sinf(theta); float k3 = 0; float x = __sinf(theta)*(point - NoGrid / 2)*linespacing; float y = __cosf(theta)*(point - NoGrid / 2)*linespacing; float z = -Zsize / 2.0; int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } } ///on XY face 2 z = Zsize / 2.0; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; } } __global__ void omni3dparallellinesESInner2(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner2(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void devidecountInner(long Xsize, long Ysize, long Zsize, float* p, float* pn, float* pcountinner) { long tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid<Xsize*Ysize*Zsize) { if (pcountinner[tid]>0) { pn[tid] = pn[tid] / pcountinner[tid]; p[tid] = pn[tid]; pn[tid] = 0; } tid += blockDim.x*gridDim.x; } } __global__ void BCiterationvirtualgrid(long Xsize, long Ysize, long Zsize, int NoTheta, int NoBeta, long* index, long* ninvir, long *noutvir, float* pintvir, float*p, float *pn, int Noitr) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / NoTheta; float deltbeta = 2 * PI / NoBeta; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; for (int iteration = 0; iteration<Noitr; iteration++) { indexin = blockDim.x*blockIdx.x + threadIdx.x; indexout = blockDim.y*blockIdx.y + threadIdx.y; while (indexin<int(PI / delttheta)*int(PI / deltbeta) * 2 && indexout<int(PI / delttheta)*int(PI / deltbeta) * 2) { int nin, nout; nin = ninvir[indexin + indexout*NoTheta*NoBeta]; nout = noutvir[indexin + indexout*NoTheta*NoBeta]; long iout, jout, kout, iin, jin, kin; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } int beta = 0; if (pintvir[indexin + indexout*NoTheta*NoBeta] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] = (pn[iout + jout*Xsize + kout*Xsize*Ysize] + p[iin + jin*Xsize + kin*Xsize*Ysize] + pintvir[indexin + indexout*NoTheta*NoBeta])*0.5; } indexin = indexin + blockDim.x*gridDim.x; indexout = indexout + blockDim.y*gridDim.y; } } } __global__ void calCurlofMaterialAcc(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float* DuDt, float * DvDt, float * DwDt, float * curl) { int i = blockDim.x*blockIdx.x + threadIdx.x; int j = blockDim.y*blockIdx.y + threadIdx.y; int k = blockDim.z*blockIdx.z + threadIdx.z; while (i<Xsize&&j<Ysize&&k<Zsize) { int i0 = i - 1 >= 0 ? i - 1 : i; int j0 = j - 1 >= 0 ? j - 1 : j; int k0 = k - 1 >= 0 ? k - 1 : k; int ie = i + 1 <= Xsize - 1 ? i + 1 : i; int je = j + 1 <= Ysize - 1 ? j + 1 : j; int ke = k + 1 <= Zsize - 1 ? k + 1 : k; float curlx = (DwDt[i + je*Xsize + k*Xsize*Ysize] - DwDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curlx += -(DvDt[i + j*Xsize + ke*Xsize*Ysize] - DvDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curly = -(DwDt[ie + j*Xsize + k*Xsize*Ysize] - DwDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curly += (DuDt[i + j*Xsize + ke*Xsize*Ysize] - DuDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curlz = (DvDt[ie + j*Xsize + k*Xsize*Ysize] - DvDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curlz += -(DuDt[i + je*Xsize + k*Xsize*Ysize] - DuDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curl[i + j*Xsize + k*Xsize*Ysize] = sqrt(curlx*curlx + curly*curly + curlz*curlz); i += blockDim.x*gridDim.x; j += blockDim.y*gridDim.y; k += blockDim.z*gridDim.z; } } void omni3virtualcpu(long Xsize, long Ysize, long Zsize, long *index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, long *pcount) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / 16; float deltbeta = PI / 16; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; CStdioFile log; log.Open(_T("log.dat"), CFile::modeCreate | CFile::modeWrite); for (float thetaout = 0; thetaout<PI; thetaout += delttheta) { for (float betaout = 0; betaout<2 * PI; betaout += deltbeta) { for (float thetain = 0; thetain<PI; thetain += delttheta) { for (float betain = 0; betain<PI; betain += deltbeta) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; iin = 0; iout = 0; jin = 0; jout = 0; kin = 0; kout = 0; if (!(k1 == 0 && k2 == 0 && k3 == 0)) { /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if ((iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { int nin, nout; long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; /*if(kin==0) { nin=iin+jin*Xsize; } if(iin==Xsize-1&&kin!=0) { nin=Xsize*Ysize-1+kin+(Ysize-1-jin)*(Zsize-1); } if(kin==Zsize-1&&iin!=Xsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Xsize-1-iin+jin*(Xsize-1); } if(jin==0&&iin!=Xsize-1&&kin!=0&&kin!=Zsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+Xsize-1-iin+(kin-1)*(Xsize-1);//???? } if(iin==0&&jin!=0&&kin!=0&&kin!=Zsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+Zsize-1-kin+(jin-1)*(Zsize-2); } if(jin==Ysize-1&&iin!=0&&iin!=Xsize-1&&kin!=0&&kin!=Zsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+(Ysize-1)*(Zsize-2)+iin+(kin-1)*(Xsize-2); } if(kout==0) { nout=iout+jout*Xsize; } if(iout==Xsize-1&&kout!=0) { nout=Xsize*Ysize-1+kout+(Ysize-1-jout)*(Zsize-1); } if(kout==Zsize-1&&iout!=Xsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Xsize-1-iout+jout*(Xsize-1); } if(jout==0&&iout!=Xsize-1&&kout!=0&&kout!=Zsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+Xsize-1-iout+(kout-1)*(Xsize-1); } if(iout==0&&jout!=0&&kout!=0&&kout!=Zsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+Zsize-1-kout+(jout-1)*(Zsize-2); } if(jout==Ysize-1&&iout!=0&&iout!=Xsize-1&&kout!=0&&kout!=Zsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+(Ysize-1)*(Zsize-2)+iout+(kout-1)*(Xsize-2); }*/ if (d1 <= d2&&d1 <= d3) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); pcount[nin + nout*n]++; CString str; str.Format(_T("%04d--%04d (%02d,%02d,%02d) (%02d,%02d,%02d) %10.8f %02d\n"), nin, nout, iin, jin, kin, iout, jout, kout, pint[nin + nout*n], pcount[nin + nout*n]); cout << str; log.WriteString(str); } } } } } } int no = 0; for (int k = 0; k<n*n; k++) { if (pcount[k]>0) { pint[k] = pint[k] / pcount[k]; no++; } } cout << no << endl; log.Close(); } float BCIterationCPU(long Xsize, long Ysize, long Zsize, float* pint, float *p, float* pn, float eps, int Noitr) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float pdiffold = 0; float pdiffnew = 0; float pdiffrela = 100; float meanp = 0; long iteration = 0; while (iteration<Noitr&&pdiffrela>eps) { meanp = 0; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } long beta = 0; for (long nin = 0; nin<n; nin++) { long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; beta++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / (beta + 1); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //cout<<pn[iout+jout*Xsize+kout*Xsize*Ysize]<<endl; } iteration++; for (long nout = 0; nout<n; nout++) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } meanp += pn[iout + jout*Xsize + kout*Xsize*Ysize]; pdiffnew += abs(p[iout + jout*Xsize + kout*Xsize*Ysize] - pn[iout + jout*Xsize + kout*Xsize*Ysize]); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; } meanp = meanp / n; pdiffnew = pdiffnew / n; pdiffrela = abs(pdiffnew - pdiffold); pdiffold = pdiffnew; pdiffnew = 0; } return meanp; } float BCIterationCPUFixBC(long Xsize, long Ysize, long Zsize, float* pint, float *p, float* pn, float eps, int Noitr) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float pdiffold = 0; float pdiffnew = 0; float pdiffrela = 100; float meanp = 0; long iteration = 0; while (iteration<Noitr&&pdiffrela>eps) { meanp = 0; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } long beta = 0; for (long nin = 0; nin<n; nin++) { long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0 && jout != Ysize - 1) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; beta++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / (beta + 1); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //cout<<pn[iout+jout*Xsize+kout*Xsize*Ysize]<<endl; } iteration++; for (long nout = 0; nout<n; nout++) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } meanp += pn[iout + jout*Xsize + kout*Xsize*Ysize]; pdiffnew += abs(p[iout + jout*Xsize + kout*Xsize*Ysize] - pn[iout + jout*Xsize + kout*Xsize*Ysize]); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; } meanp = meanp / n; pdiffnew = pdiffnew / n; pdiffrela = abs(pdiffnew - pdiffold); pdiffold = pdiffnew; pdiffnew = 0; } return meanp; } float BCIterationCPUFixPoint(long Xsize, long Ysize, long Zsize, float* pint, float *p, float* pn, float eps, int Noitr) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float pdiffold = 0; float pdiffnew = 0; float pdiffrela = 100; float meanp = 0; long iteration = 0; while (iteration<Noitr&&pdiffrela>eps) { meanp = 0; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } long beta = 0; for (long nin = 0; nin<n; nin++) { long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0 && !(jout == Ysize - 1 && iout == 0 && kout == 0)) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; beta++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / (beta + 1); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //cout<<pn[iout+jout*Xsize+kout*Xsize*Ysize]<<endl; } iteration++; for (long nout = 0; nout<n; nout++) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } meanp += pn[iout + jout*Xsize + kout*Xsize*Ysize]; pdiffnew += abs(p[iout + jout*Xsize + kout*Xsize*Ysize] - pn[iout + jout*Xsize + kout*Xsize*Ysize]); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; } meanp = meanp / n; pdiffnew = pdiffnew / n; pdiffrela = abs(pdiffnew - pdiffold); pdiffold = pdiffnew; pdiffnew = 0; } return meanp; } void omni3Dinner(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, long *pcount, float *p, float* pn, int itrNo) { int iteration = 0; float rms = 0; long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; while (iteration<itrNo) { for (int nin = 0; nin<n; nin = nin + 1) { for (int nout = 0; nout<n; nout = nout + 1) { int iout, jout, kout; int facein, faceout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; faceout = 1; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; faceout = 2; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; faceout = 3; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; faceout = 4; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; faceout = 5; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; faceout = 6; } int iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; facein = 1; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; facein = 2; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; facein = 3; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; facein = 4; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; facein = 5; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; facein = 6; } int ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; if (nin != nout&&nin >= 0 && nin<n&&nout >= 0 && nout<n) { float k1 = iout - iin; float k2 = jout - jin; float k3 = kout - kin; float l = sqrt(k1*k1 + k2*k2 + k3*k3); k1 = k1 / l; k2 = k2 / l; k3 = k3 / l; //cout<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //cout<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; //log<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //log<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, x, y, z; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 if (d1 <= d2&&d1 <= d3) { pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] - density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pcount[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]++; //pint[nin+nout*n]+=-density*(inext1-ilast)*deltx*0.5*(DuDt[inext1+jnext1*Xsize+knext1*Xsize*Ysize]+DuDt[ilast+jlast*Xsize+klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] - density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pcount[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]++; //pint[nin+nout*n]+=-density*(jnext2-jlast)*delty*0.5*(DvDt[inext2+jnext2*Xsize+knext2*Xsize*Ysize]+DvDt[ilast+jlast*Xsize+klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] - density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pcount[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]++; //pint[nin+nout*n]+=-density*(knext3-klast)*deltz*0.5*(DwDt[inext3+jnext3*Xsize+knext3*Xsize*Ysize]+DwDt[ilast+jlast*Xsize+klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-3); } //cout<<thetain<<' '<<betain<<endl; //cout<<thetaout<<' '<<betaout<<endl; //cout<<"k1="<<k1<<" k2="<<k2<<" k3="<<k3<<endl; //cout<<indexin<<" "<<indexout<<endl; } } rms = 0; for (int k = 0; k<Xsize*Ysize*Zsize; k++) { pn[k] = pn[k] / pcount[k]; pcount[k] = 0; rms += (p[k] - pn[k])*(p[k] - pn[k]); } rms = sqrt(rms / Xsize / Ysize / Zsize); cout << "Iteration: " << iteration << " rms: " << rms << endl; memcpy(p, pn, sizeof(float)*Xsize*Ysize*Zsize); memset(pn, 0, sizeof(float)*Xsize*Ysize*Zsize); iteration++; } } void calIndex(long*index, long Xsize, long Ysize, long Zsize) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } index[iout + jout*Xsize + kout*Xsize*Ysize] = nout; } } void omni3dparallellinesEqualSpacingCPU(long Xsize, long Ysize, long Zsize, int NoAngles, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = 16; float spacing = 1; //CStdioFile log; //log.Open("log.dat",CFile::modeCreate|CFile::modeWrite); //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; for (int angle = 0; angle<NoAngles; angle++) { for (int point = 0; point<NoGrid*NoGrid; point++) { float xprime = (float(point / NoGrid) - 0.5*NoGrid)*spacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*NoGrid)*spacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = asinf(k2 / sinf(theta)); if (k1 / sinf(theta)<0) { phi = -phi + PI; } float x = xprime*cosf(theta)*cosf(phi) - yprime*sinf(phi); float y = xprime*cosf(theta)*sinf(phi) + yprime*cosf(phi); float z = -xprime*sinf(theta); //float k1=sinf(theta)*cosf(phi); //float k2=sinf(theta)*sinf(phi); //float k3=cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { pint[nin + nout*n] += bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); pcount[nin + nout*n]++; } //CString str; //str.Format(_T("%6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %02d %02d %02d %02d %02d %02d\n"),theta,phi,k1,k2,k3,x,y,z,iin,jin,kin,iout,jout,kout); //if(angle==10000/2-1) //{ // log.WriteString(str); //} } } } //log.Close(); } void omni3dparallellinesESInnerCPU(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; for (int angle = 0; angle<NoAngles; angle++) { for (int point = 0; point<NoGrid*NoGrid; point++) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = asinf(k2 / sinf(theta)); if (k1 / sinf(theta)<0) { phi = -phi + PI; } float x = xprime*cosf(theta)*cosf(phi) - yprime*sinf(phi); float y = xprime*cosf(theta)*sinf(phi) + yprime*cosf(phi); float z = -xprime*sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } } } } void devidecountCPU(long Xsize, long Ysize, long Zsize, float* pint, float* pcount) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; for (int tid = 0; tid<n*n; tid++) { if (pcount[tid]>1) { pint[tid] /= pcount[tid]; } } } void devidecountInnerCPU(long Xsize, long Ysize, long Zsize, float* p, float* pn, float* pcountinner) { for (int tid = 0; tid<Xsize*Ysize*Zsize; tid++) { if (pcountinner[tid]>1) { p[tid] = pn[tid] / pcountinner[tid]; pn[tid] = 0; } } } void calCurlofMaterialAccCPU(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float* DuDt, float * DvDt, float * DwDt, float * curl,float* mask) { for (int k = 0; k<Zsize; k++) { for (int j = 0; j<Ysize; j++) { for (int i = 0; i<Xsize; i++) { int i0 = i - 1 >= 0 ? i - 1 : i; int j0 = j - 1 >= 0 ? j - 1 : j; int k0 = k - 1 >= 0 ? k - 1 : k; int ie = i + 1 <= Xsize - 1 ? i + 1 : i; int je = j + 1 <= Ysize - 1 ? j + 1 : j; int ke = k + 1 <= Zsize - 1 ? k + 1 : k; float curlx = (DwDt[i + je*Xsize + k*Xsize*Ysize] - DwDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curlx += -(DvDt[i + j*Xsize + ke*Xsize*Ysize] - DvDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curly = -(DwDt[ie + j*Xsize + k*Xsize*Ysize] - DwDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curly += (DuDt[i + j*Xsize + ke*Xsize*Ysize] - DuDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curlz = (DvDt[ie + j*Xsize + k*Xsize*Ysize] - DvDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curlz += -(DuDt[i + je*Xsize + k*Xsize*Ysize] - DuDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curl[i + j*Xsize + k*Xsize*Ysize] = sqrt(curlx*curlx + curly*curly + curlz*curlz)*mask[i + j*Xsize + k*Xsize*Ysize]; } } } } void thredholdHistMaterialAccCPU(int Imax, int Jmax, int Kmax, float* curl, float percentage, float* threshold) { //get min max values for curl; float minv = 1e4; float maxv = -1e4; for (int i = 0; i<Imax*Jmax*Kmax; i++) { if (minv>curl[i]) { minv = curl[i]; } if (maxv<curl[i]) { maxv = curl[i]; } } //generate 1000 bins; int * hist; int N = 10000; hist = new int[N]; memset(hist, 0, sizeof(int)*N); for (int i = 0; i<Imax*Jmax*Kmax; i++) { int ind = (int)((curl[i] - minv) / (maxv - minv)*N); hist[ind]++; } float totnum = 0; for (int j = N - 1; j >= 0; j++) { totnum += hist[j]; if (totnum >= Imax*Jmax*Kmax*percentage) { threshold[0] = float(j) / N*(maxv - minv) + minv; return; } } threshold[0] = maxv; delete[] hist; } __global__ void calIndexGPU(long*index, long Xsize, long Ysize, long Zsize) { long nout = threadIdx.x + blockIdx.x*blockDim.x; long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; while (nout<n) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } index[iout + jout*Xsize + kout*Xsize*Ysize] = nout; nout += blockDim.x*gridDim.x; } } /////////////////////////////For Experimental Data Version/////////////////////////////// int main() { hipDeviceProp_t prop; long DeviceNo=0; hipSetDevice(DeviceNo); hipGetDeviceProperties(&prop, DeviceNo) ; printf( " ----------- General Information for device %d --------\n", DeviceNo ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " ------------ Memory Information for device %d ---------\n", DeviceNo ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", DeviceNo ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); ofstream log; log.open("log.dat"); long ImaxOrg,JmaxOrg,KmaxOrg,Imax,Jmax,Kmax,n,PlaneSt,Planedt,PlaneEnd,FileNumSt,FileNumDelt,FileNumEnd; float rho,scale,linespacing; float density=1; float eps=1e-10; float meanpcal=0; float meanpdns=0; float* x,*y,*z,*u,*v,*w,*dudt,*dvdt,*dwdt,*pint,*p,*pn,*pdns,*RHS,*curl,*mask; float* k1,*k2,*k3; float* dudt_d,*dvdt_d,*dwdt_d,*pint_d,*p_d,*pn_d,*curl_d; float* k1_d,*k2_d,*k3_d; long *index,*index_d; float *pcountinner,*pcountinner_d; float* pcount_d,*pcountitr_d,*pcount; float* pweight_d; float threshold; float pref=0; int NoAngles=10000; int NoItr=100; int cutzs,cutze; int cutxs,cutxe; int cutys,cutye; Imax=64; Jmax=64; Kmax=64; float deltx=0.006135923151543; float delty=0.006135923151543; float deltz=0.006135923151543; CString pathpressure,pathacc,fileacc,basefile; CString filegrid; ///////////////////Reading parameters////////////////////////////// CStdioFile par; CString str; if (!par.Open(_T("Parameter_Omni3D.dat"), CFile::modeRead)) { cout << "Parameter input file: \"Parameter_Omni3D.dat\" open error" << endl; //MessageBox(NULL, _T("Parameter input file: \"Parameter_Omni3D.dat\" open error"), _T("Omni3D Message"), MB_OK); cin >> Imax; return; } par.ReadString(str);ImaxOrg=_wtoi(str); par.ReadString(str);JmaxOrg=_wtoi(str); par.ReadString(str);KmaxOrg=_wtoi(str); par.ReadString(str);deltx=_wtof(str); par.ReadString(str);delty=_wtof(str); par.ReadString(str);deltz=_wtof(str); par.ReadString(str);density=_wtof(str); par.ReadString(str);scale=_wtoi(str); par.ReadString(str);linespacing=_wtof(str); par.ReadString(str);NoAngles=_wtoi(str); par.ReadString(filegrid); par.ReadString(pathacc); par.ReadString(pathpressure); par.ReadString(str);NoItr=_wtoi(str); par.ReadString(str); threshold = _wtof(str); par.ReadString(str); pref = _wtof(str); if (str == ""){ pref = 0; } par.ReadString(str); cutxs = _wtoi(str); if (str == ""){ cutxs = 0; } par.ReadString(str); cutxe = _wtoi(str); if (str == ""){ cutxe = ImaxOrg-1; } par.ReadString(str); cutys = _wtoi(str); if (str == ""){ cutys = 0; } par.ReadString(str); cutye = _wtoi(str); if (str == ""){ cutye = JmaxOrg-1; } par.ReadString(str); cutzs = _wtoi(str); if (str == ""){ cutzs = 0; } par.ReadString(str); cutze = _wtoi(str); if (str == ""){ cutze = KmaxOrg-1; } par.Close(); ////////////////////////////////Reading parameter completed//////////////////////// Imax=cutxe-cutxs+1; Jmax=cutye-cutys+1; Kmax=cutze-cutzs+1; x=new float[Imax*Jmax*Kmax]; y=new float[Imax*Jmax*Kmax]; z=new float[Imax*Jmax*Kmax]; u=new float[Imax*Jmax*Kmax]; v=new float[Imax*Jmax*Kmax]; w=new float[Imax*Jmax*Kmax]; dudt=new float[Imax*Jmax*Kmax]; dvdt=new float[Imax*Jmax*Kmax]; dwdt=new float[Imax*Jmax*Kmax]; n=Imax*Jmax*2+(Jmax-2)*Kmax*2+(Imax-2)*(Kmax-2)*2; p=new float[Imax*Jmax*Kmax]; pn=new float[Imax*Jmax*Kmax]; pdns=new float[Imax*Jmax*Kmax]; RHS=new float[Imax*Jmax*Kmax]; curl = new float[Imax*Jmax*Kmax]; mask = new float[Imax*Jmax*Kmax]; pint=new float[n*n]; pcountinner=new float[Imax*Jmax*Kmax]; pcount=new float[n*n]; index=new long[Imax*Jmax*Kmax]; k1=new float[NoAngles]; k2=new float[NoAngles]; k3=new float[NoAngles]; memset(p,0,sizeof(float)*Imax*Jmax*Kmax); memset(pn,0,sizeof(float)*Imax*Jmax*Kmax); memset(RHS,0,sizeof(float)*Imax*Jmax*Kmax); memset(curl, 0, sizeof(float)*Imax*Jmax*Kmax); memset(mask, 0, sizeof(float)*Imax*Jmax*Kmax); memset(pint,0,sizeof(float)*n*n); memset(pcountinner,0,sizeof(float)*Imax*Jmax*Kmax); memset(pcount,0,sizeof(float)*n*n); //calIndex(index,Imax,Jmax,Kmax); CStdioFile fin; ////Read Random Numbers; cout<<"Reading virtual grid points on the sphere.........."<<endl; if (!fin.Open(filegrid, CFile::modeRead)) { cout << "Virtual grid points:" << CT2A(filegrid)<<" open error" << endl; //MessageBox(NULL, _T("Parameter input file: \"Parameter_Omni3D.dat\" open error"), _T("Omni3D Message"), MB_OK); cin >> Imax; return; } for(int j=0;j<NoAngles;j++) { long pos; fin.ReadString(str); pos=str.ReverseFind(' '); k3[j]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); pos=str.ReverseFind(' '); } k2[j]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); pos=str.ReverseFind(' '); } k1[j]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); pos=str.ReverseFind(' '); } } fin.Close(); cout << "done" << endl; /////////////////////////////////////////////// hipMalloc((void **)&dudt_d,sizeof(float)*Imax*Jmax*Kmax); hipMalloc((void **)&dvdt_d,sizeof(float)*Imax*Jmax*Kmax); hipMalloc((void **)&dwdt_d,sizeof(float)*Imax*Jmax*Kmax); hipMalloc((void **)&curl_d,sizeof(float)*Imax*Jmax*Kmax); hipMalloc((void **)&pint_d,sizeof(float)*n*n); hipMalloc((void **)&pcount_d,sizeof(float)*n*n); hipMalloc((void **)&pweight_d,sizeof(float)*n*n); hipMalloc((void **)&p_d,sizeof(float)*Imax*Jmax*Kmax); hipMalloc((void **)&pn_d,sizeof(float)*Imax*Jmax*Kmax); hipMalloc((void **)&index_d,sizeof(long)*Imax*Jmax*Kmax); //hipMalloc((void**)&pcountitr_d,sizeof(int)*n); hipMalloc((void**)&pcountinner_d,sizeof(float)*Imax*Jmax*Kmax); hipMalloc((void**)&k1_d,sizeof(float)*NoAngles); hipMalloc((void**)&k2_d,sizeof(float)*NoAngles); hipMalloc((void**)&k3_d,sizeof(float)*NoAngles); //////////////////////End of allocate memory on GPU////////////// hipMemcpy(k1_d, k1, sizeof(float)*NoAngles, hipMemcpyHostToDevice); hipMemcpy(k2_d, k2, sizeof(float)*NoAngles, hipMemcpyHostToDevice); hipMemcpy(k3_d, k3, sizeof(float)*NoAngles, hipMemcpyHostToDevice); ///Read all the files inside a folder//////////////////////////// //Step.1 judge whether folder exsists/////////////////////////// if (!folderExists(pathacc)) { cout << "Acceleration folder does not exist: " << CT2A(pathacc)<< endl; //MessageBox(NULL, _T("Parameter input file: \"Parameter_Omni3D.dat\" open error"), _T("Omni3D Message"), MB_OK); cin >> Imax; //MessageBox(NULL, _T("Acceleration folder does not exist"), _T("Omni3D Message"), MB_OK); return; } if (!folderExists(pathpressure)) { createFolder(pathpressure); } // Step.2 Read all the acceleration filenames//// vector<string> filesacc; vector<string> filesvel; string s = CT2A(pathacc); getFiles(s, filesacc); ///////////////////////////////////////////////////////////////// int size = filesacc.size(); cout << "Processing Starts" << endl; log << "Processing Starts" << endl; for(int FileNum=0;FileNum<size;FileNum++) { fileacc=filesacc[FileNum].c_str(); int pos = fileacc.ReverseFind('\\'); cout << CT2A(fileacc.Right(fileacc.GetLength() - pos - 1)) <<endl; log << CT2A(fileacc.Right(fileacc.GetLength() - pos - 1)); if (!fin.Open(fileacc, CFile::modeRead)){ log << " failed to open" << endl; continue; } fin.ReadString(str);fin.ReadString(str);fin.ReadString(str); //fin.ReadString(str);fin.ReadString(str);fin.ReadString(str); //With mask files... for(long k=0;k<KmaxOrg;k++) { for(long j=0;j<JmaxOrg;j++) { for(long i=0;i<ImaxOrg;i++) { long pos; int ind=i-cutxs+(j-cutys)*(cutxe-cutxs+1)+(k-cutzs)*(cutxe-cutxs+1)*(cutye-cutys+1); fin.ReadString(str); str = str.TrimRight(); if(i>=cutxs&&i<=cutxe&&j>=cutys&&j<=cutye&&k>=cutzs&&k<=cutze) { pos = str.ReverseFind(' '); mask[ind] = _wtof(str.Right(str.GetLength() - pos - 1)); for (long m = 0; m<1; m++) { str = str.Left(pos); str = str.TrimRight(); pos = str.ReverseFind(' '); } pos=str.ReverseFind(' '); dwdt[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } dvdt[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } dudt[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } w[ind]=_wtof(str.Right(str.GetLength()-pos-1)); str=str.Left(pos); pos=str.ReverseFind(' '); v[ind]=_wtof(str.Right(str.GetLength()-pos-1)); str=str.Left(pos); pos=str.ReverseFind(' '); u[ind]=_wtof(str.Right(str.GetLength()-pos-1)); str=str.Left(pos); pos=str.ReverseFind(' '); z[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } y[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } x[ind]=_wtof(str.Right(str.GetLength()-pos-1)); } } } } fin.Close(); hipMemset(p_d,0,sizeof(float)*Imax*Jmax*Kmax); hipMemset(pn_d,0,sizeof(float)*Imax*Jmax*Kmax); hipMemset(curl_d, 0, sizeof(float)*Imax*Jmax*Kmax); hipMemset(pint_d,0,sizeof(float)*n*n); hipMemset(pcount_d,0,sizeof(float)*n*n); hipMemset(pcountinner_d,0,sizeof(float)*Imax*Jmax*Kmax); hipMemset(pweight_d,0,sizeof(float)*n*n); //hipMemset(pcountitr_d,0,sizeof(int)*n); hipMemcpy(dudt_d,dudt,sizeof(float)*Imax*Jmax*Kmax,hipMemcpyHostToDevice); hipMemcpy(dvdt_d,dvdt,sizeof(float)*Imax*Jmax*Kmax,hipMemcpyHostToDevice); hipMemcpy(dwdt_d,dwdt,sizeof(float)*Imax*Jmax*Kmax,hipMemcpyHostToDevice); //set boundary pressure as from Bernoulli equation for (int i = 0; i < Imax; i++){ for (int k = 0; k < Kmax; k++){ int ind = i + 0*Imax + k*Imax*Jmax; p[ind] = pref-0.5*density*(u[ind] * u[ind] + v[ind] * v[ind] + w[ind] * w[ind]); } } hipMemcpy(p_d,p,sizeof(float)*Imax*Jmax*Kmax,hipMemcpyHostToDevice); hipMemcpy(pn_d, p, sizeof(float)*Imax*Jmax*Kmax, hipMemcpyHostToDevice); //////////////////////End of allocate memory on GPU////////////// dim3 threadPerBlock(8,8); dim3 blockPerGrid(512,512); dim3 threadPerBlock1(8,8,8); dim3 blockPerGrid1(256, 256,256); //calCurlofMaterialAcc <<<blockPerGrid1, threadPerBlock1>>>(Imax, Jmax, Kmax, deltx, delty, deltz, dudt_d, dvdt_d, dwdt_d, curl_d); calCurlofMaterialAccCPU(Imax, Jmax, Kmax, deltx, delty, deltz, dudt, dvdt, dwdt, curl,mask); hipMemcpy(curl_d, curl, sizeof(float)*Imax*Jmax*Kmax, hipMemcpyHostToDevice); // thredholdHistMaterialAccCPU(Imax,Jmax,Kmax,curl1,percentage,&threshold); ////////////////////Start Kernels on GPU//////////////////////////////////////////// hipLaunchKernelGGL(( calIndexGPU) , dim3(n/512), dim3(512) , 0, 0, index_d, Imax, Jmax, Kmax); //omni3dparallellinesEqualSpacing <<<blockPerGrid, threadPerBlock >>>(Imax, Jmax, Kmax, NoAngles, linespacing, k1_d, k2_d, k3_d, index_d, deltx, delty, deltz, density, dudt_d, dvdt_d, dwdt_d, pint_d, pcount_d, pcountinner_d); //omni3dparallellinesEqualSpacingWeighted<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,pint_d,pweight_d,pcount_d,pcountinner_d,curl); //omni3dparallellinesEqualSpacingSelect<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,pint_d,pcount_d,pcountinner_d); // devidecount <<<n/ 512, 512 >>>(Imax, Jmax, Kmax, pint_d, pcount_d); //BCiteration <<<n / 512, 512 >>>(Imax, Jmax, Kmax, pint_d, pcount_d, p_d, pn_d, NoItr); /////-------------couting time----------------//////////// //omni3dparallellinesESInner<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d); //omni3dparallellinesESInnerSelect<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d); for (int i = 0; i<NoItr; i++) { //omni3dparallellinesESInnerWeighted<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,curl_d,p_d,pn_d,pcountinner_d); //omni3dparallellinesESInnerStepCount<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d,IntegrationSteps_d); //omni3dparallellinesESInnerWeightedMiniCurl<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,curl,p_d,pn_d,pcountinner_d); hipLaunchKernelGGL(( omni3dparallellinesESInnerSelect) , dim3(blockPerGrid), dim3(threadPerBlock) , 0, 0, Imax, Jmax, Kmax, NoAngles, linespacing, k1_d, k2_d, k3_d, index_d, deltx, delty, deltz, density, dudt_d, dvdt_d, dwdt_d, curl_d, p_d, pn_d, pcountinner_d, threshold); //omni3dparallellinesESInnerSelectFixedBC << <blockPerGrid, threadPerBlock >> >(Imax, Jmax, Kmax, NoAngles, linespacing, k1_d, k2_d, k3_d, index_d, deltx, delty, deltz, density, dudt_d, dvdt_d, dwdt_d, curl_d, p_d, pn_d, pcountinner_d, threshold); //omni2dparallellinesOnFaceInner<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,10000,linespacing,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d); //omni3dparallellinesInner<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d); hipLaunchKernelGGL(( devidecountInner) , dim3(n/512), dim3(512) , 0, 0, Imax, Jmax, Kmax, p_d, pn_d, pcountinner_d); if (i == NoItr - 1) { hipMemcpy(pcountinner, pcountinner_d, sizeof(float)*Imax*Jmax*Kmax, hipMemcpyDeviceToHost); } //hipMemset(pcountinner_d,1.0,sizeof(float)*Imax*Jmax*Kmax); hipMemset(pcountinner_d, 0, sizeof(int)*Imax*Jmax*Kmax); } //devidecountInner<<<n/512,512>>>(Imax,Jmax,Kmax,p_d,pn_d,pcountinner_d); hipMemcpy(p, p_d, sizeof(float)*Imax*Jmax*Kmax, hipMemcpyDeviceToHost); //hipMemcpy(pcountinner, pcountinner_d, sizeof(float)*Imax*Jmax*Kmax, hipMemcpyDeviceToHost); //hipMemcpy(curl1,curl,sizeof(float)*Imax*Jmax*Kmax,hipMemcpyDeviceToHost); // check for error hipError_t error = hipGetLastError(); if (error != hipSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", hipGetErrorString(error)); } CStdioFile fout; CString outfile=pathpressure; outfile.AppendFormat(_T("PressureOmni3D_%05d.dat"),FileNum); fout.Open(outfile,CFile::modeWrite|CFile::modeCreate); ////////////////////Write Data to file///////////////////// /* meanpcal=0; int count = 0; for(long k=0;k<Kmax;k++) { for(long j=0;j<Jmax;j++) { for(long i=0;i<Imax;i++) { int ind=i+j*Imax+k*Imax*Jmax; if (curl[ind] != 0) { meanpcal += p[ind]; count++; } } } } //meanpcal=meanpcal/n; meanpcal=meanpcal/count; for(long k=0;k<Kmax;k++) { for(long j=0;j<Jmax;j++) { for(long i=0;i<Imax;i++) { int ind=i+j*Imax+k*Imax*Jmax; if (curl[ind] != 0) { p[ind]=p[ind]-meanpcal; } } } } */ fout.WriteString(_T("TITLE = \"Pressure Integrated From GPU Based Omni 3D Method\"\n")); fout.WriteString(_T("VARIABLES = \"X\",\"Y\",\"Z\",\"P\",\"Count\"\n")); str.Format(_T("ZONE I=%i, J=%i, K=%i,F=POINT\n"),Imax,Jmax,Kmax); fout.WriteString(str); //pmax=1;meanpdns=0; for(long k=0;k<Kmax;k++) { for(long j=0;j<Jmax;j++) { for(long i=0;i<Imax;i++) { int ind=i+j*Imax+k*Imax*Jmax; str.Format(_T("%15.9f %15.9f %15.9f %15.9f %15.9f\n"), x[ind], y[ind], z[ind], p[ind], pcountinner[ind]); fout.WriteString(str); } } } fout.Close(); /////////////Iteration completed///////////////////////////////////////////// if (FileNum == 0) { fout.Open(_T("CurlofMaterialAcc_Sample.dat"), CFile::modeWrite | CFile::modeCreate); fout.WriteString(_T("TITLE = \"Curl of Material Acceleration Multiplied by the Mask\"\n")); fout.WriteString(_T("VARIABLES = \"X\",\"Y\",\"Z\",\"Curl of Acceleration\"\n")); str.Format(_T("ZONE I=%i, J=%i, K=%i,F=POINT\n"), Imax, Jmax, Kmax); fout.WriteString(str); //pmax=1;meanpdns=0; for (long k = 0; k<Kmax; k++) { for (long j = 0; j<Jmax; j++) { for (long i = 0; i<Imax; i++) { int ind = i + j*Imax + k*Imax*Jmax; str.Format(_T("%15.9f %15.9f %15.9f %15.9f\n"), x[ind], y[ind], z[ind], curl[ind]); fout.WriteString(str); } } } fout.Close(); } } delete []x,y,z,u,v,w,dudt,dvdt,dwdt,pint,p,pn,pdns,RHS,pcount,pcountinner,k1,k2,k3,curl,mask; log.close(); hipFree(dudt_d); hipFree(dvdt_d); hipFree(dwdt_d); hipFree(pint_d); hipFree(pcount_d); hipFree(p_d); hipFree(pn_d); hipFree(pcountinner_d); hipFree(k1_d); hipFree(k2_d); hipFree(k3_d); hipFree(pweight_d); hipFree(curl_d); hipDeviceReset(); exit(true ? EXIT_SUCCESS : EXIT_FAILURE); return 0; }
f9b7e7d58970b0791ada83c56b15778d3ff98226.cu
#include <fstream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <curand_kernel.h> #include <iostream> #include <stdio.h> #include <stdlib.h> #include <malloc.h> #include <math.h> #include <iomanip> #include <afx.h> #include <time.h> #include"calRHS.h" #include"sor3D.h" #include"initialIntegration.h" #include "io.h" #include <list> #include <vector> using namespace std; #define weight1 0 #define weight2 0 #define weight 0.8 #define zero 1e-7 #define PI 3.1415926535897932384626433832795 using namespace std; using std::vector; //Get all the files in filepath // Returns false on success, true on error // Return true if the folder exists, false otherwise bool folderExists(CString folderName) //Examine if a folder exist { std::string s = CT2A(folderName); if (_access(s.c_str(), 0) == -1) { //File not found return false; } DWORD attr = GetFileAttributes(folderName); if (!(attr & FILE_ATTRIBUTE_DIRECTORY)) { // File is not a directory return false; } return true; } bool createFolder(CString folderName) { list<std::string> folderLevels; // char* c_str = (char*)folderName.c_str(); if (folderExists(folderName)) { return false; } else { CreateDirectory((LPCTSTR)folderName, NULL); return true; } /*Point to end of the string char* strPtr = &c_str[strlen(c_str) - 1]; // Create a list of the folders which do not currently exist do { if (folderExists(c_str)) { break; } // Break off the last folder name, store in folderLevels list do { strPtr--; } while ((*strPtr != '\\') && (*strPtr != '/') && (strPtr >= c_str)); folderLevels.push_front(string(strPtr + 1)); strPtr[1] = 0; } while (strPtr >= c_str); if (_chdir(c_str)) { return true; } // Create the folders iteratively for (list<std::string>::iterator it = folderLevels.begin(); it != folderLevels.end(); it++) { if (CreateDirectory((LPCTSTR)it->c_str(), NULL) == 0) { return true; } _chdir(it->c_str()); } return false;*/ } void getFiles(string path, vector<string> & f) { FILE* pipe = NULL; string pCmd = "dir /B /S " + string(path); string tmp; char buf[1024]; if (NULL == (pipe = _popen(pCmd.c_str(), "rt"))) { cout << "Shit" << endl; return; } while (!feof(pipe)) { if (fgets(buf, 1024, pipe) != NULL) { tmp = string(buf); tmp.erase(tmp.find_last_not_of("\n") + 1); f.push_back(tmp); } } _pclose(pipe); } __device__ __host__ void ntoijk(long Xsize, long Ysize, long Zsize, long nout, int* i, int*j, int*k) { int iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } i[0] = iout; j[0] = jout; k[0] = kout; } __device__ __host__ bool crosspoint(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, float k1, float k2, float k3, int* i, int* j, int* k) { int iout, jout, kout; float r, x, y, z; r = 0; x = 0; y = 0; z = 0; bool flag = 0; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (iin >= 0 && iin <= Xsize - 1) { ////four crossing point;y=0;y=max;z=0;z=max; r = (0 - jin) / k2; y = 0; z = kin + k3*r; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//cross y=0; { iout = iin; jout = 0; kout = floor(z + 0.5); flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; z = kin + k3*r; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//y=max; { iout = iin; jout = Ysize - 1; kout = floor(z + 0.5); flag = 1; } r = (0 - kin) / k3; z = 0; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0)//z=0; { iout = iin; jout = floor(y + 0.5); kout = 0; flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = iin; jout = floor(y + 0.5); kout = Zsize - 1; flag = 1; } } if (iin == Xsize - 1 || iin == 0) { int jout1 = jin; int kout1 = kin; r = (0 - jin) / k2; y = 0; float z = kin + k3*r; bool flag2 = 0; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//cross y=0; { if (flag2 == 0){ iout = iin; jout = 0; kout = floor(z + 0.5); flag2 = 1; } else { iout = iin; jout1 = 0; kout1 = floor(z + 0.5); } flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; z = kin + k3*r; if (z <= Zsize - 1 && z >= 0 && r != 0 && flag == 0)//y=max; { if (flag2 == 0) { iout = iin; jout = Ysize - 1; kout = floor(z + 0.5); flag2 = 1; } else { iout = iin; jout1 = Ysize - 1; kout1 = floor(z + 0.5); } flag = 1; } r = (0 - kin) / k3; z = 0; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0)//z=0; { if (flag2 == 0) { iout = iin; jout = floor(y + 0.5); kout = 0; flag2 = 1; } else { iout = iin; jout1 = floor(y + 0.5); kout1 = 0; } flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; y = jin + k2*r; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = iin; jout = floor(y + 0.5); kout = Zsize - 1; flag2 = 1; } else { iout = iin; jout1 = floor(y + 0.5); kout1 = Zsize - 1; } flag = 1; } if ((jout1 - jin)*(jout1 - jin) + (kout1 - kin)*(kout1 - kin)>(jout - jin)*(jout - jin) + (kout - kin)*(kout - kin)) { jout = jout1; kout = kout1; } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (jin >= 0 && jin <= Ysize - 1) { ////four crossing point r = (0 - iin) / k1; x = 0; z = kin + k3*r;//x=0; if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = 0; jout = jin; kout = floor(z + 0.5); flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; z = kin + k3*r;//x=max if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = Xsize - 1; jout = jin; kout = floor(z + 0.5); flag = 1; } r = (0 - kin) / k3; z = 0; x = iin + k1*r;//z=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { iout = floor(x + 0.5); jout = jin; kout = 0; flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; x = iin + k1*r;//z=max; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { iout = floor(x + 0.5); jout = jin; kout = Zsize - 1; flag = 1; } } if (jin == 0 || jin == Ysize - 1) { int iout1 = iin; int kout1 = kin; bool flag2 = 0; r = (0 - iin) / k1; x = 0; z = kin + k3*r;//x=0; if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = 0; jout = jin; kout = floor(z + 0.5); flag2 = 1; } else { iout1 = 0; jout = jin; kout1 = floor(z + 0.5); } flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; z = kin + k3*r;//x=max if (z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = Xsize - 1; jout = jin; kout = floor(z + 0.5); flag2 = 1; } else { iout1 = Xsize - 1; jout = jin; kout1 = floor(z + 0.5); } flag = 1; } r = (0 - kin) / k3; z = 0; x = iin + k1*r;//z=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { if (flag2 == 0) { iout = floor(x + 0.5); jout = jin; kout = 0; flag2 = 1; } else { iout1 = int(x + 0.5); jout = jin; kout1 = 0; } flag = 1; } r = (Zsize - 1 - kin) / k3; z = Zsize - 1; x = iin + k1*r;//z=max; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { if (flag2 == 0) { iout = floor(x + 0.5); jout = jin; kout = Zsize - 1; flag2 = 1; } else { iout1 = floor(x + 0.5); jout = jin; kout1 = Zsize - 1; } flag = 1; } if ((iout1 - iin)*(iout1 - iin) + (kout1 - kin)*(kout1 - kin)>(iout - iin)*(iout - iin) + (kout - kin)*(kout - kin)) { iout = iout1; kout = kout1; } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (kin >= 0 && kin <= Zsize - 1) { ////four crossing point r = (0 - iin) / k1; x = 0; y = jin + k2*r;//x=0; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = 0; jout = floor(y + 0.5); kout = kin; flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; y = jin + k2*r;//x=max; if (y <= Ysize - 1 && y >= 0 && r != 0 && flag == 0) { iout = Xsize - 1; jout = floor(y + 0.5); kout = kin; flag = 1; } r = (0 - jin) / k2; y = 0; x = iin + k1*r;//y=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { iout = floor(x + 0.5); jout = 0; kout = kin; flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; x = iin + k1*r;//y=max; if (x <= Xsize - 1 && x >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = Ysize - 1; kout = kin; flag = 1; } } if (kin == 0 || kin == Zsize - 1) { int iout1 = iin; int jout1 = jin; bool flag2 = 0; r = (0 - iin) / k1; x = 0; y = jin + k2*r;//x=0; if (y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = 0; jout = floor(y + 0.5); kout = kin; flag2 = 1; } else { iout1 = 0; jout1 = floor(y + 0.5); kout = kin; } flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; y = jin + k2*r;//x=max; if (y <= Ysize - 1 && y >= 0 && r != 0 && flag == 0) { if (flag2 == 0) { iout = Xsize - 1; jout = floor(y + 0.5); kout = kin; flag2 = 1; } else { iout1 = Xsize - 1; jout1 = floor(y + 0.5); kout = kin; } flag = 1; } r = (0 - jin) / k2; y = 0; x = iin + k1*r;//y=0; if (x <= Xsize - 1 && x >= 0 && r != 0 && flag == 0) { if (flag == 0) { iout = floor(x + 0.5); jout = 0; kout = kin; flag2 = 1; } else { iout1 = floor(x + 0.5); jout1 = 0; kout = kin; } flag = 1; } r = (Ysize - 1 - jin) / k2; y = Ysize - 1; x = iin + k1*r;//y=max; if (x <= Xsize - 1 && x >= 0 && flag == 0 && r != 0) { if (flag2 == 0) { iout = floor(x + 0.5); jout = Ysize - 1; kout = kin; flag2 = 1; } else { iout1 = floor(x + 0.5); jout1 = Ysize - 1; kout = kin; } flag = 1; } if ((iout1 - iin)*(iout1 - iin) + (jout1 - jin)*(jout1 - jin)>(iout - iin)*(iout - iin) + (jout - jin)*(jout - jin)) { iout = iout1; jout = jout1; } } } ///case 4, vertical to plane IJ if (k1 == 0 && k2 == 0 && k3 != 0 && flag == 0) { if (iin <= Xsize - 1 && iin >= 0 && jin <= Ysize - 1 && jin >= 0) { iout = iin; jout = jin; if (kin<Zsize / 2) { kout = Zsize - 1; } else { kout = 0; } flag = 1; } } ///case 5, vertical to IK plane if (k1 == 0 && k2 != 0 && k3 == 0 && flag == 0) { if (iin >= 0 && iin <= Xsize - 1 && kin >= 0 && kin <= Zsize - 1) { iout = iin; kout = kin; if (jin<Ysize / 2) { jout = Ysize - 1; } else { jout = 0; } flag = 1; } } ///case 6, vertical to JK plane if (k1 != 0 && k2 == 0 && k3 == 0 && flag == 0) { if (jin >= 0 && jin <= Ysize - 1 && kin >= 0 && kin <= Zsize - 1) { jout = jin; kout = kin; if (iin<Xsize / 2) { iout = Xsize - 1; } else { iout = 0; } flag = 1; } } /// case 7, purely inclined if (k1 != 0 && k2 != 0 && k3 != 0 && flag == 0) { /// six crossing point r = (0 - iin) / k1; x = 0; y = jin + k2*r; z = kin + k3*r;//x=0 if (y <= Ysize - 1 && y >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = 0; jout = floor(y + 0.5); kout = floor(z + 0.5); flag = 1; } r = (Xsize - 1 - iin) / k1; x = Xsize - 1; y = jin + k2*r; z = kin + k3*r;//x=max if (y <= Ysize - 1 && y >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = Xsize - 1; jout = floor(y + 0.5); kout = floor(z + 0.5); flag = 1; } r = (0 - jin) / k2; x = iin + k1*r; y = 0; z = kin + k3*r;//y=0; if (x <= Xsize - 1 && x >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = 0; kout = floor(z + 0.5); flag = 1; } r = (Ysize - 1 - jin) / k2; x = iin + k1*r; y = Ysize - 1; z = kin + k3*r;//y=max if (x <= Xsize - 1 && x >= 0 && z <= Zsize - 1 && z >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = Ysize - 1; kout = floor(z + 0.5); flag = 1; } r = (0 - kin) / k3; x = iin + k1*r; y = jin + k2*r; z = 0;//z=0; if (x <= Xsize - 1 && x >= 0 && y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = floor(y + 0.5); kout = 0; flag = 1; } r = (Zsize - 1 - kin) / k3; x = iin + k1*r; y = jin + k2*r; z = Zsize - 1;//z=max if (x <= Xsize - 1 && x >= 0 && y <= Ysize - 1 && y >= 0 && flag == 0 && r != 0) { iout = floor(x + 0.5); jout = floor(y + 0.5); kout = Zsize - 1; flag = 1; } } if (flag == 1) { i[0] = iout; j[0] = jout; k[0] = kout; } else { i[0] = iin; j[0] = jin; k[0] = kin; } return flag; } __device__ __host__ bool cross2point(long Xsize, long Ysize, long Zsize, int *iin, int *jin, int *kin, float xin, float yin, float zin, float k1, float k2, float k3, int* iout, int* jout, int* kout) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; iin[0] = Xsize; jin[0] = Ysize; kin[0] = Zsize; iout[0] = Xsize; jout[0] = Ysize; kout[0] = Zsize; // printf("%f %f %f %f",xin,yin,zin,sqrt(xin*xin+yin*yin+zin*zin)); if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin>-center_x&&xin<center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z&&flag == 0)//cross y=0; { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = 0; kin[0] = floor(z1 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = 0; kout[0] = floor(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = Ysize - 1; kin[0] = floor(z2 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = Ysize - 1; kout[0] = floor(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = floor(y3 + center_y + 0.5); kin[0] = 0; } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = floor(y3 + center_y + 0.5); kout[0] = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin[0] = floor(xin + center_x + 0.5); jin[0] = floor(y4 + center_y + 0.5); kin[0] = Zsize - 1; } if (flag == 1) { iout[0] = floor(xin + center_x + 0.5); jout[0] = floor(y4 + center_y + 0.5); kout[0] = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout[0] - jin[0])*k2 + (kout[0] - kin[0])*k3<0) { int temp; temp = jin[0]; jin[0] = jout[0]; jout[0] = temp; temp = kin[0]; kin[0] = kout[0]; kout[0] = temp; } } return true; } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin>-center_y&&yin<center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin[0] = 0; jin[0] = floor(yin + center_y + 0.5); kin[0] = floor(z1 + center_z + 0.5); } if (flag == 1) { iout[0] = 0; jout[0] = floor(yin + center_y + 0.5); kout[0] = floor(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin[0] = Xsize - 1; jin[0] = floor(yin + center_y + 0.5); kin[0] = floor(z2 + center_z + 0.5); } if (flag == 1) { iout[0] = Xsize - 1; jout[0] = floor(yin + center_y + 0.5); kout[0] = floor(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin[0] = floor(x3 + center_x + 0.5); jin[0] = floor(yin + center_y + 0.5); kin[0] = 0; } if (flag == 1) { iout[0] = floor(x3 + center_x + 0.5); jout[0] = floor(yin + center_y + 0.5); kout[0] = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin[0] = floor(x4 + center_x + 0.5); jin[0] = floor(yin + center_y + 0.5); kin[0] = Zsize - 1; } if (flag == 1) { iout[0] = floor(x4 + center_x + 0.5); jout[0] = floor(yin + center_y + 0.5); kout[0] = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout[0] - iin[0])*k1 + (kout[0] - kin[0])*k3<0) { int temp; temp = iin[0]; iin[0] = iout[0]; iout[0] = temp; temp = kin[0]; kin[0] = kout[0]; kout[0] = temp; } } return true; } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin>-center_z&&zin<center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin[0] = 0; jin[0] = floor(y1 + center_y + 0.5); kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = 0; jout[0] = floor(y1 + center_y + 0.5); kout[0] = floor(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin[0] = Xsize - 1; jin[0] = floor(y2 + center_y + 0.5); kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = Xsize - 1; jout[0] = floor(y2 + center_y + 0.5); kout[0] = floor(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin[0] = floor(x3 + center_x + 0.5); jin[0] = 0; kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x3 + center_x + 0.5); jout[0] = 0; kout[0] = floor(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin[0] = floor(x4 + center_x + 0.5); jin[0] = Ysize - 1; kin[0] = floor(zin + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x4 + center_x + 0.5); jout[0] = Ysize - 1; kout[0] = floor(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout[0] - iin[0])*k1 + (jout[0] - jin[0])*k2<0) { int temp; temp = iin[0]; iin[0] = iout[0]; iout[0] = temp; temp = jin[0]; jin[0] = jout[0]; jout[0] = temp; } } return true; } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin<center_x&&xin>-center_x&&yin<center_y&&yin>-center_y) { iin[0] = floor(xin + center_x + 0.5); iout[0] = iin[0]; jin[0] = floor(yin + center_y + 0.5); jout[0] = jin[0]; if (k3>0) { kin[0] = 0; kout[0] = Zsize - 1; } else{ kin[0] = Zsize - 1; kout[0] = 0; } return true; } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin>-center_x&&xin<center_x&&zin>-center_z&&zin<center_z) { iin[0] = floor(xin + center_x + 0.5); iout[0] = iin[0]; kin[0] = floor(zin + center_z + 0.5); kout[0] = kin[0]; if (k2>0) { jout[0] = Ysize - 1; jin[0] = 0; } else { jin[0] = Ysize - 1; jout[0] = 0; } return true; } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin>-center_y&&yin<center_y&&zin>-center_z&&zin<center_z) { jin[0] = floor(yin + center_y + 0.5); jout[0] = jin[0]; kin[0] = floor(zin + center_z + 0.5); kout[0] = kin[0]; if (k1>0) { iout[0] = Xsize - 1; iin[0] = 0; } else { iin[0] = Xsize - 1; iout[0] = 0; } } return true; } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin[0] = 0; jin[0] = floor(y1 + center_y + 0.5); kin[0] = floor(z1 + center_z + 0.5); } if (flag == 1) { iout[0] = 0; jout[0] = floor(y1 + center_y + 0.5); kout[0] = floor(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin[0] = Xsize - 1; jin[0] = floor(y2 + center_y + 0.5); kin[0] = floor(z2 + center_z + 0.5); } if (flag == 1) { iout[0] = Xsize - 1; jout[0] = floor(y2 + center_y + 0.5); kout[0] = floor(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin[0] = floor(x3 + center_x + 0.5); jin[0] = 0; kin[0] = floor(z3 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x3 + center_x + 0.5); jout[0] = 0; kout[0] = floor(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin[0] = floor(x4 + center_x + 0.5); jin[0] = Ysize - 1; kin[0] = floor(z4 + center_z + 0.5); } if (flag == 1) { iout[0] = floor(x4 + center_x + 0.5); jout[0] = Ysize - 1; kout[0] = floor(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin[0] = floor(x5 + center_x + 0.5); jin[0] = floor(y5 + center_y + 0.5); kin[0] = 0; } if (flag == 1) { iout[0] = floor(x5 + center_x + 0.5); jout[0] = floor(y5 + center_y + 0.5); kout[0] = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin[0] = floor(x6 + center_x + 0.5); jin[0] = floor(y6 + center_y + 0.5); kin[0] = Zsize - 1; } if (flag == 1) { iout[0] = floor(x6 + center_x + 0.5); jout[0] = floor(y6 + center_y + 0.5); kout[0] = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout[0] - iin[0])*k1 + (jout[0] - jin[0])*k2 + (kout[0] - kin[0])*k3<0) { int temp; temp = iin[0]; iin[0] = iout[0]; iout[0] = temp; temp = jin[0]; jin[0] = jout[0]; jout[0] = temp; temp = kin[0]; kin[0] = kout[0]; kout[0] = temp; } return true; } return false; } __device__ __host__ bool crosspoint2d(long Xsize, long Ysize, int iin, int jin, float k1, float k2, int *i, int *j) { int iout, jout; bool flag = 0; if (k1 == 0 && k2 != 0) { iout = iin; if (jin == 0) { jout = Ysize - 1; } else { jout = 0; } if (iout == 0 || iout == Xsize - 1) { if (jin<Ysize / 2) { jout = Ysize - 1; } else { jout = 0; } } flag = 1; } if (k1 != 0 && k2 == 0) { jout = jin; if (iin == 0) { iout = Xsize - 1; } else { iout = 0; } if (jout == 0 || jout == Ysize - 1) { if (iin<Xsize / 2) { jout = Xsize - 1; } else { jout = 0; } } flag = 1; } if (k1 != 0 && k2 != 0) { float r, x, y; r = (0 - iin) / k1; y = k2*r + jin; if (y >= 0 && y <= Ysize - 1 && r != 0 && flag == 0) { iout = 0; jout = int(y + 0.5); flag = 1; } r = (Xsize - 1 - iin) / k1; y = k2*r + jin; if (y >= 0 && y <= Ysize - 1 && r != 0 && flag == 0) { iout = Xsize - 1; jout = int(y + 0.5); flag = 1; } r = (0 - jin) / k2; x = k1*r + iin; if (x >= 0 && x <= Xsize - 1 && r != 0 && flag == 0) { jout = 0; iout = int(x + 0.5); flag = 1; } r = (Ysize - 1 - jin) / k2; x = k1*r + iin; if (x >= 0 && x <= Xsize - 1 && r != 0 && flag == 0) { jout = Ysize - 1; iout = int(x + 0.5); flag = 1; } } if (flag == 1) { i[0] = iout; j[0] = jout; } return flag; } __device__ __host__ void ntoij2d(long Xsize, long Ysize, int nin, int *i, int *j) { int iin, jin; if (nin <= Xsize - 1) { iin = nin; jin = 0; } if (nin>Xsize - 1 && nin <= Xsize + Ysize - 2) { iin = Xsize - 1; jin = nin - (Xsize - 1); } if (nin>Xsize + Ysize - 2 && nin <= 2 * Xsize + Ysize - 3) { iin = Xsize - 1 - (nin - (Xsize + Ysize - 2)); jin = Ysize - 1; } if (nin>2 * Xsize + Ysize - 3) { iin = 0; jin = Ysize - 1 - (nin - (2 * Xsize + Ysize - 3)); } i[0] = iin; j[0] = jin; } __device__ __host__ void ij2dton(long Xsize, long Ysize, int *n, int i, int j) { if (j == 0) { n[0] = i; } if (i == Xsize - 1) { n[0] = i + j; } if (j == Ysize - 1) { n[0] = Xsize - 1 + Ysize - 1 + (Xsize - 1 - i); } if (i == 0 && j != 0) { n[0] = Xsize - 1 + Ysize - 1 + Xsize - 1 + (Ysize - 1 - j); } } __device__ __host__ float bodyIntegralFromCenter(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float k1, k2, k3; ilast = iin; jlast = jin; klast = kin; k1 = iout - jin; k2 = jout - jin; k3 = kout - kin; //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, x, y, z; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegral(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path //why we are not following the real integration path generated???????? float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; //pcountinner[inext1+jnext1*Xsize+knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; //pcountinner[inext2+jnext2*Xsize+knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; //pcountinner[inext3+jnext3*Xsize+knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralWeighted(long Xsize, long Ysize, long Zsize, int nin, int nout, int n, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float* pcountinner, float* pint, float*pcount, float*pweight) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pinttmp = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path //why we are not following the real integration path generated???????? float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pinttmp += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; counttmp++; //pcountinner[inext1+jnext1*Xsize+knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pinttmp += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; counttmp++; //pcountinner[inext2+jnext2*Xsize+knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pinttmp += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; counttmp++; //pcountinner[inext3+jnext3*Xsize+knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); curltmp = curltmp / counttmp; if (curltmp != 0) { pweight[nin + nout*n] += 1 / curltmp; pcount[nin + nout*n]++; pint[nin + nout*n] += pinttmp; } return pinttmp; } __device__ __host__ float bodyIntegralWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int nin, int nout, int n, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float* pcountinner, float* pint, float*pcount) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pinttmp = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path //why we are not following the real integration path generated???????? float r, d1, d2, d3, xt, yt, zt; /*r=k1*inext1-x*k1+k2*jnext1-k2*y+k3*knext1-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d1=sqrt((xt-inext1)*(xt-inext1)+(yt-jnext1)*(yt-jnext1)+(zt-knext1)*(zt-knext1)); r=k1*inext2-x*k1+k2*jnext2-k2*y+k3*knext2-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d2=sqrt((xt-inext2)*(xt-inext2)+(yt-jnext2)*(yt-jnext2)+(zt-knext2)*(zt-knext2)); r=k1*inext3-x*k1+k2*jnext3-k2*y+k3*knext3-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d3=sqrt((xt-inext3)*(xt-inext3)+(yt-jnext3)*(yt-jnext3)+(zt-knext3)*(zt-knext3));*/ //////End of calculation distance/////////////// ///***calculation of curl in three directions***////////////////// d1 = 1e10; d2 = 1e10; d3 = 1e10; if (inext1 + jnext1*Xsize + knext1*Xsize*Ysize >= 0 && inext1 + jnext1*Xsize + knext1*Xsize*Ysize<Xsize*Ysize*Zsize) { d1 = curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; } if (inext2 + jnext2*Xsize + knext2*Xsize*Ysize >= 0 && inext2 + jnext2*Xsize + knext2*Xsize*Ysize<Xsize*Ysize*Zsize) { d2 = curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; } if (inext3 + jnext3*Xsize + knext3*Xsize*Ysize >= 0 && inext3 + jnext3*Xsize + knext3*Xsize*Ysize<Xsize*Ysize*Zsize) { d3 = curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; } //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pinttmp += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; counttmp++; //pcountinner[inext1+jnext1*Xsize+knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pinttmp += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; counttmp++; //pcountinner[inext2+jnext2*Xsize+knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pinttmp += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; counttmp++; //pcountinner[inext3+jnext3*Xsize+knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); curltmp = curltmp / counttmp; if (curltmp != 0) { pcount[nin + nout*n] += 1 / curltmp; pint[nin + nout*n] += pinttmp; } return pinttmp; } __device__ __host__ float bodyIntegralInner(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; pn[ilast + jlast*Xsize + klast*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize]; pcountinner[ilast + jlast*Xsize + klast*Xsize*Ysize] += 1; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerStepCount(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float*pcountinner, long*IntegrationStep) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; int steps = 0; pn[ilast + jlast*Xsize + klast*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize]; pcountinner[ilast + jlast*Xsize + klast*Xsize*Ysize] += 1; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } else { steps++; IntegrationStep[ilast + jlast*Xsize + klast*Xsize*Ysize] += steps; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerWeighted(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1 < Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]/10000; counttmp++; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); ilast = inext1; if (curltmp != 0) { pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); if (curltmp != 0) { pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]/10000; counttmp++; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); klast = knext3; if(curltmp != 0) { pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerWeightedFixedBC(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1 < Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); ilast = inext1; if (curltmp != 0) { pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 > 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); if (curltmp != 0) { pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] / 10000; counttmp++; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); klast = knext3; if (curltmp != 0) { pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerSelect(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float *curl, float*pcountinner, float threshold) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; bool outthreshold = 0; float p0=0; bool flag_p0set = 0; if (curl[iin + jin*Xsize + kin*Xsize*Ysize] == 0 || curl[iin + jin*Xsize + kin*Xsize*Ysize]>threshold) { flag_p0set = 0; } else { p0 = p[iin + jin*Xsize + kin*Xsize*Ysize]; flag_p0set = 1; curltmp = 0; counttmp = 0; } //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { if (flag_p0set == 0) { if (curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]<threshold) { p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]!=0) { //passing through low error zone pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]/10000; counttmp++; pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] >= threshold || curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]==0) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint); //pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } ilast = inext1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { if (flag_p0set == 0) { if (curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]!=0) { //passing through low error zone pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]/10000; counttmp++; pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint); //pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { if (flag_p0set == 0) { if (curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] !=0) { //passing through low error zone pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]/10000; counttmp++; pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint); //pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]!= 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; pint = 0; outthreshold = 0; } } klast = knext3; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } return 0; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerSelectFixedBC(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float *curl, float*pcountinner, float threshold) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; bool outthreshold = 0; float p0 = 0; bool flag_p0set = 0; if (curl[iin + jin*Xsize + kin*Xsize*Ysize] == 0 || curl[iin + jin*Xsize + kin*Xsize*Ysize]>threshold) { flag_p0set = 0; } else { p0 = p[iin + jin*Xsize + kin*Xsize*Ysize]; flag_p0set = 1; curltmp = 0; counttmp = 0; } //curltmp=curl[ilast+jlast*Xsize+klast*Xsize*Ysize]; //counttmp=1; //pn[ilast+jlast*Xsize+klast*Xsize*Ysize]+=p[ilast+jlast*Xsize+klast*Xsize*Ysize]*1/curltmp*counttmp; //pcountinner[ilast+jlast*Xsize+klast*Xsize*Ysize]+=1/curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d1 = sqrt((xt - inext1)*(xt - inext1) + (yt - jnext1)*(yt - jnext1) + (zt - knext1)*(zt - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d2 = sqrt((xt - inext2)*(xt - inext2) + (yt - jnext2)*(yt - jnext2) + (zt - knext2)*(zt - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; xt = x + k1*r; yt = y + k2*r; zt = z + k3*r; d3 = sqrt((xt - inext3)*(xt - inext3) + (yt - jnext3)*(yt - jnext3) + (zt - knext3)*(zt - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 < d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize &&jnext1 != 0) { if (flag_p0set == 0) { if (curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]<threshold) { p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0) { //passing through low error zone pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] / 10000; counttmp++; pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] >= threshold || curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] == 0) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p0 + pint); //pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] < threshold && curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } ilast = inext1; flag = 1; } if (d2 <= d1&&d2 <= d3&&jnext2 > 0 && jnext2<Ysize) { if (flag_p0set == 0) { if (curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { //passing through low error zone pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] / 10000; counttmp++; pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p0 + pint); //pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] < threshold&& curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; pint = 0; outthreshold = 0; counttmp = 0; curltmp = 0; } } jlast = jnext2; flag = 1; } if (d3 < d1 && d3 < d2 && knext3 >= 0 && knext3<Zsize && jnext3 != 0) { if (flag_p0set == 0) { if (curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; flag_p0set = 1; pint = 0; counttmp = 0; curltmp = 0; } } else { if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { //passing through low error zone pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] / 10000; counttmp++; pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint) * 1 / curltmp*counttmp; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; } if (outthreshold == 0 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] >= threshold) { ///Entering high error zone //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); outthreshold = 1; //pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p0 + pint); //pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; } if (outthreshold == 1 && curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] < threshold&&curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] != 0) { //reset the starting point of integration. if Exiting the higher error zone. //pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); p0 = p[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; pint = 0; outthreshold = 0; } } klast = knext3; flag = 1; } /*if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } return 0; }*/ } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerMiniCurl(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; /*r=k1*inext1-x*k1+k2*jnext1-k2*y+k3*knext1-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d1=sqrt((xt-inext1)*(xt-inext1)+(yt-jnext1)*(yt-jnext1)+(zt-knext1)*(zt-knext1)); r=k1*inext2-x*k1+k2*jnext2-k2*y+k3*knext2-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d2=sqrt((xt-inext2)*(xt-inext2)+(yt-jnext2)*(yt-jnext2)+(zt-knext2)*(zt-knext2)); r=k1*inext3-x*k1+k2*jnext3-k2*y+k3*knext3-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d3=sqrt((xt-inext3)*(xt-inext3)+(yt-jnext3)*(yt-jnext3)+(zt-knext3)*(zt-knext3));*/ //////End of calculation distance/////////////// ///***calculation of curl in three directions***////////////////// d1 = 1e10; d2 = 1e10; d3 = 1e10; if (inext1 + jnext1*Xsize + knext1*Xsize*Ysize >= 0 && inext1 + jnext1*Xsize + knext1*Xsize*Ysize<Xsize*Ysize*Zsize) { d1 = curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; } if (inext2 + jnext2*Xsize + knext2*Xsize*Ysize >= 0 && inext2 + jnext2*Xsize + knext2*Xsize*Ysize<Xsize*Ysize*Zsize) { d2 = curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; } if (inext3 + jnext3*Xsize + knext3*Xsize*Ysize >= 0 && inext3 + jnext3*Xsize + knext3*Xsize*Ysize<Xsize*Ysize*Zsize) { d3 = curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; } //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]++; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]++; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]++; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInnerWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; float curltmp = 0; float counttmp = 0; bool flag = 0; curltmp = curl[ilast + jlast*Xsize + klast*Xsize*Ysize]; counttmp = 1; pn[ilast + jlast*Xsize + klast*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] * 1 / curltmp*counttmp; pcountinner[ilast + jlast*Xsize + klast*Xsize*Ysize] += 1 / curltmp*counttmp; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, xt, yt, zt; /*r=k1*inext1-x*k1+k2*jnext1-k2*y+k3*knext1-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d1=sqrt((xt-inext1)*(xt-inext1)+(yt-jnext1)*(yt-jnext1)+(zt-knext1)*(zt-knext1)); r=k1*inext2-x*k1+k2*jnext2-k2*y+k3*knext2-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d2=sqrt((xt-inext2)*(xt-inext2)+(yt-jnext2)*(yt-jnext2)+(zt-knext2)*(zt-knext2)); r=k1*inext3-x*k1+k2*jnext3-k2*y+k3*knext3-k3*z; xt=x+k1*r;yt=y+k2*r;zt=z+k3*r; d3=sqrt((xt-inext3)*(xt-inext3)+(yt-jnext3)*(yt-jnext3)+(zt-knext3)*(zt-knext3));*/ //////End of calculation distance/////////////// ///***calculation of curl in three directions***////////////////// d1 = 1e10; d2 = 1e10; d3 = 1e10; if (inext1 + jnext1*Xsize + knext1*Xsize*Ysize >= 0 && inext1 + jnext1*Xsize + knext1*Xsize*Ysize<Xsize*Ysize*Zsize) { d1 = curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; } if (inext2 + jnext2*Xsize + knext2*Xsize*Ysize >= 0 && inext2 + jnext2*Xsize + knext2*Xsize*Ysize<Xsize*Ysize*Zsize) { d2 = curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; } if (inext3 + jnext3*Xsize + knext3*Xsize*Ysize >= 0 && inext3 + jnext3*Xsize + knext3*Xsize*Ysize<Xsize*Ysize*Zsize) { d3 = curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; } //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { pint += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]; counttmp++; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; ilast = inext1; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1 / curltmp*counttmp; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { pint += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]; counttmp++; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; jlast = jnext2; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1 / curltmp*counttmp; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { pint += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); curltmp += curl[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]; counttmp++; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint) * 1 / curltmp*counttmp; klast = knext3; pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1 / curltmp*counttmp; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __device__ __host__ float bodyIntegralInner2(long Xsize, long Ysize, long Zsize, int iin, int jin, int kin, int iout, int jout, int kout, float x, float y, float z, float k1, float k2, float k3, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float *p, float*pn, float*pcountinner) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; x = x + center_x; y = y + center_y; z = z + center_z; ilast = iin; jlast = jin; klast = kin; if (k1*(iout - iin) + k2*(jout - jin) + k3*(kout - kin)<0) { k1 = -k1; k2 = -k2; k3 = -k3; } //k1=iout-iin;k2=jout-jin;k3=kout-kin; float pint = 0; bool flag = 0; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 60000; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 60000; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 60000; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3; r = k1*inext1 - x*k1 + k2*jnext1 - k2*y + k3*knext1 - k3*z; x = x + k1*r; y = y + k2*r; z = z + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - x*k1 + k2*jnext2 - k2*y + k3*knext2 - k3*z; x = x + k1*r; y = y + k2*r; z = z + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - x*k1 + k2*jnext3 - k2*y + k3*knext3 - k3*z; x = x + k1*r; y = y + k2*r; z = z + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 flag = 0; if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize) { float pinttmp = -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pint += pinttmp; //float w=sqrtf((inext1-iin)*(inext1-iin)+(jnext1-jin)*(jnext1-jin)+(knext1-kin)*(knext1-kin)); pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] + pinttmp; pcountinner[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += 1; ilast = inext1; flag = 1; } if (d2<d1&&d2 <= d3&&jnext2 >= 0 && jnext2<Ysize) { float pinttmp = -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pint += pinttmp; //float w=sqrtf((inext2-iin)*(inext2-iin)+(jnext2-jin)*(jnext2-jin)+(knext2-kin)*(knext2-kin)); pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] + pinttmp; pcountinner[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += 1; jlast = jnext2; flag = 1; } if (d3<d1&&d3<d2&&knext3 >= 0 && knext3<Zsize) { float pinttmp = -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pint += pinttmp; //float w=sqrtf((inext3-iin)*(inext3-iin)+(jnext3-jin)*(jnext3-jin)+(knext3-kin)*(knext3-kin)); pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint); pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += (p[ilast + jlast*Xsize + klast*Xsize*Ysize] + pinttmp); pcountinner[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += 1; klast = knext3; flag = 1; } if (flag == 0) { printf("Error! Wrong Point Found!\n"); if (d3<d1&&d3<d2) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext3, jnext3, knext3, inext1, jnext1, knext1, inext2, jnext2, knext2); } if (d2<d1&&d2 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext2, jnext2, knext2, inext1, jnext1, knext1, inext3, jnext3, knext3); } if (d1 <= d2&&d1 <= d3) { printf("%6.5f %6.5f %6.5f (%02d %02d %02d) (%02d %02d %02d) (%02d %02d %02d)\n(%02d %02d %02d) (%02d %02d %02d)\n", d1, d2, d3, iin, jin, kin, iout, jout, kout, inext1, jnext1, knext1, inext3, jnext3, knext3, inext2, jnext2, knext2); } } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5&&flag == 1); return pint; } __global__ void initialIntegration(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* p, float* pn) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long nout = threadIdx.x + blockIdx.x*blockDim.x; while (nout<n) { int iout, jout, kout; ntoijk(Xsize, Ysize, Zsize, nout, &iout, &jout, &kout); p[nout] = bodyIntegralFromCenter(Xsize, Ysize, Zsize, Xsize / 2, Ysize / 2, Zsize / 2, iout, jout, kout, deltx, delty, deltz, density, DuDt, DvDt, DwDt); nout = nout + blockDim.x*gridDim.x; } } __global__ void omni3d(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, int*pcountinner) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long iin, jin, kin, iout, jout, kout, indexin, indexout; long nin = blockDim.x*blockIdx.x + threadIdx.x; long nout = blockDim.y*blockIdx.y + threadIdx.y; while (nin<n&&nout<n) { long iout, jout, kout; long facein, faceout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; faceout = 1; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; faceout = 2; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; faceout = 3; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; faceout = 4; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; faceout = 5; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; faceout = 6; } long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; facein = 1; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; facein = 2; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; facein = 3; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; facein = 4; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; facein = 5; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; facein = 6; } long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; if (nin != nout&&nin >= 0 && nin<n&&nout >= 0 && nout<n) { float k1 = iout - iin; float k2 = jout - jin; float k3 = kout - kin; float l = sqrt(k1*k1 + k2*k2 + k3*k3); k1 = k1 / l; k2 = k2 / l; k3 = k3 / l; //cout<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //cout<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; //log<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //log<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3, x, y, z; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 if (d1 <= d2&&d1 <= d3) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); } nin = nin + blockDim.x*gridDim.x; nout = nout + blockDim.y*gridDim.y; } //////End of calculation of pressure increment//////////////// } __global__ void omni3dparallellinesEqualSpacing(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesEqualSpacingWeighted(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float*pweight, float* pcount, float* pcountinner, float*curl) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegralWeighted(Xsize, Ysize, Zsize, n, nin, nout, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, pcountinner, pint, pcount, pweight); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesEqualSpacingWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner, float*curl) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegralWeightedMiniCurl(Xsize, Ysize, Zsize, n, nin, nout, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, pcountinner, pint, pcount); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesEqualSpacingSelect(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner, float*curl, float threshold) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 4 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<4 && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<4 && kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<4 && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } if (iin >= 0 && iin<Xsize&&jin >= 4 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 4 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } //select iout<iin __global__ void omni3dparallellinesEqualSpacingSelect2(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize && (phi<PI / 4 || phi>3 * PI / 4)) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni2dparallellinesOnFace(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.414 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid&&angle<NoAngles) { float theta = angle / NoAngles * 2 * PI; ///on XY face float k1 = __cosf(theta); float k2 = __sinf(theta); float k3 = 0; float x = __sinf(theta)*(point - NoGrid / 2)*linespacing; float y = __cosf(theta)*(point - NoGrid / 2)*linespacing; float z = -Zsize / 2.0; int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } ///on XY face 2 z = Zsize / 2.0; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { float pincre = bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); //float w=sqrtf((iout-iin)*(iout-iin)+(jout-jin)*(jout-jin)+(kout-kin)*(kout-kin)); pint[nin + nout*n] += pincre; //pcountinner[iin+jin*Xsize+kin*Xsize*Ysize]++; //pcountinner[iout+jout*Xsize+kout*Xsize*Ysize]++; pcount[nin + nout*n]++; //pint[nout+nin*n]+=-pincre; //pcount[nout+nin*n]++; } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void devidecount(long Xsize, long Ysize, long Zsize, float* pint, float* pcount) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid<n*n) { if (pcount[tid]>0) { pint[tid] /= pcount[tid]; } tid += blockDim.x*gridDim.x; } } __global__ void devidecountWeight(long Xsize, long Ysize, long Zsize, float* pint, float* pcount, float*pweight) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; long tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid<n*n) { if (pcount[tid]>0) { pint[tid] /= pcount[tid]; pweight[tid] /= pcount[tid]; } tid += blockDim.x*gridDim.x; } } __global__ void omni3dvirtual(long Xsize, long Ysize, long Zsize, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / Zsize / 2; float deltbeta = PI / Xsize / 2; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; indexin = blockDim.x*blockIdx.x + threadIdx.x; float thetain = (indexin / (2 * Zsize))*delttheta; float betain = (blockDim.x*blockIdx.x + threadIdx.x - 2 * Zsize*(indexin / (2 * Zsize)))*deltbeta; indexout = blockDim.y*blockIdx.y + threadIdx.y; float thetaout = (indexout / (2 * Xsize))*delttheta; float betaout = (blockDim.y*blockIdx.y + threadIdx.y - 2 * Xsize*(indexout / (2 * Xsize)))*deltbeta; while (indexin<int(PI / delttheta)*int(PI / deltbeta) * 2 && indexout<int(PI / delttheta)*int(PI / deltbeta) * 2) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z&&flag == 0)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize && (iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { int nin, nout; long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout&&nin<n&&nout<n) { do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// if (d1 <= d2&&d1 <= d3&&inext1 >= 0 && inext1<Xsize&&jnext1 >= 0 && jnext1<Ysize&&knext1 >= 0 && knext1<Zsize) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3&&inext2<Xsize&&jnext2 >= 0 && jnext2<Ysize&&knext2 >= 0 && knext2<Zsize) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2&&inext3<Xsize&&jnext3 >= 0 && jnext3<Ysize&&knext3 >= 0 && knext3<Zsize) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); pcount[nin + nout*n]++; } } indexin = indexin + blockDim.x*gridDim.x; indexout = indexout + blockDim.y*gridDim.y; } } __global__ void omni3virtualgrid(long Xsize, long Ysize, long Zsize, int NoTheta, int NoBeta, long* index, long* ninvir, long *noutvir, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pintvir) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / NoTheta; float deltbeta = 2 * PI / NoBeta; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; //int n=Xsize*Ysize*2+(Zsize-2)*Ysize*2+(Xsize-2)*(Zsize-2)*2; int iin, jin, kin, iout, jout, kout, indexin, indexout; indexin = blockDim.x*blockIdx.x + threadIdx.x; float thetain = (indexin / (NoBeta))*delttheta; float betain = (blockDim.x*blockIdx.x + threadIdx.x - NoBeta*(indexin / (NoBeta)))*deltbeta; indexout = blockDim.y*blockIdx.y + threadIdx.y; float thetaout = (indexout / (NoBeta))*delttheta; float betaout = (blockDim.y*blockIdx.y + threadIdx.y - NoBeta*(indexout / (NoBeta)))*deltbeta; while (indexin<NoTheta*NoBeta&&indexout<NoTheta*NoBeta) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; } if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if ((iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// ninvir[indexin + indexout*NoTheta*NoBeta] = index[iin + jin*Xsize + kin*Xsize*Ysize]; noutvir[indexin + indexout*NoTheta*NoBeta] = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (d1 <= d2&&d1 <= d3) { pintvir[indexin + indexout*NoTheta*NoBeta] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pintvir[indexin + indexout*NoTheta*NoBeta] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pintvir[indexin + indexout*NoTheta*NoBeta] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); } indexin = indexin + blockDim.x*gridDim.x; indexout = indexout + blockDim.y*gridDim.y; } } __global__ void omni3dvirtual2(long Xsize, long Ysize, long Zsize, long* index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / Zsize / 2; float deltbeta = PI / Xsize / 2; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; float r, d1, d2, d3; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin; indexin = blockDim.x*blockIdx.x + threadIdx.x; float thetain = (indexin / (2 * Zsize) - 1)*delttheta; float betain = (blockDim.x*blockIdx.x + threadIdx.x - 2 * Zsize*(indexin / (2 * Zsize)) - 1)*deltbeta; for (float thetaout = 0; thetaout<PI; thetaout += delttheta) { for (float betaout = 0; betaout<2 * PI; betaout += deltbeta) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; } if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if ((iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// int nin, nout; nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize*kout*Xsize*Ysize]; if (d1 <= d2&&d1 <= d3) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); } } } } __global__ void BCiteration(long Xsize, long Ysize, long Zsize, float* pint, float *pcount, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; pcount[nout] = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; pcount[nout]++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcount[nout]; //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void BCiterationFixedBC(long Xsize, long Ysize, long Zsize, float* pint, float *pcount, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; pcount[nout] = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// /// boundary pressure fixed on the top surface.... if (pint[nin + nout*n] != 0&&jout!=Ysize-1) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; pcount[nout]++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcount[nout]; //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void BCiterationWeighted(long Xsize, long Ysize, long Zsize, float* pint, float *pweight, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; float pcounttmp = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n])*pweight[nin + nout*n]; pcounttmp += pweight[nin + nout*n]; } } if (pcounttmp != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcounttmp; } //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void BCiterationWeightedFixedBC(long Xsize, long Ysize, long Zsize, float* pint, float *pweight, float *p, float* pn, int itrNo) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; long nout = blockDim.x*blockIdx.x + threadIdx.x; for (int iteration = 0; iteration<itrNo; iteration++) { nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; float pcounttmp = 0; for (int nin = 0; nin<n; nin++) { if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0&&jout!=0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += (p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n])*pweight[nin + nout*n]; pcounttmp += pweight[nin + nout*n]; } } if (pcounttmp != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / pcounttmp; } //p[iout+jout*Xsize+kout*Xsize*Ysize]=pn[iout+jout*Xsize+kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; nout = nout + blockDim.x*gridDim.x; //nin=nin+blockDim.y*gridDim.y; } nout = blockDim.x*blockIdx.x + threadIdx.x; while (nout<n) { if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; pn[iout + jout*Xsize + kout*Xsize*Ysize] = 0; nout = nout + blockDim.x*gridDim.x; } __syncthreads(); } } __global__ void omni3dparallellinesESInner(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerStepCount(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner, long* IntegrationSteps) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerStepCount(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner, IntegrationSteps); } } point += blockDim.x*gridDim.x; } } __global__ void omni3dparallellinesESInnerWeighted(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerWeighted(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerWeightedMiniCurl(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerMiniCurl(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, curl, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerSelect(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner, float threshold) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout < Ysize && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerSelect(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, curl, pcountinner, threshold); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerSelectFixedBC(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* curl, float*p, float*pn, float*pcountinner, float threshold) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin>=0&&jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize && kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInnerSelectFixedBC(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, curl, pcountinner, threshold); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni3dparallellinesESInnerSelect2(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize && (phi<PI / 4 || phi>3 * PI / 4)) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void omni2dparallellinesOnFaceInner(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.414 / linespacing; long angle = threadIdx.y + blockDim.y*blockIdx.y; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid&&angle<NoAngles) { float theta = angle / NoAngles * 2 * PI; ///on XY face float k1 = __cosf(theta); float k2 = __sinf(theta); float k3 = 0; float x = __sinf(theta)*(point - NoGrid / 2)*linespacing; float y = __cosf(theta)*(point - NoGrid / 2)*linespacing; float z = -Zsize / 2.0; int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } } ///on XY face 2 z = Zsize / 2.0; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; } } __global__ void omni3dparallellinesESInner2(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; long angle = threadIdx.y + blockDim.y*blockIdx.y; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732 / linespacing; int point = threadIdx.x + blockDim.x*blockIdx.x; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; while (point<NoGrid*NoGrid&&angle<NoAngles) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = 0; if (__sinf(theta) != 0) { phi = asinf(k2 / __sinf(theta)); if (k1 / __sinf(theta)<0) { phi = -phi + PI; } } else { phi = 0; } float x = xprime*__cosf(theta)*__cosf(phi) - yprime*__sinf(phi); float y = xprime*__cosf(theta)*__sinf(phi) + yprime*__cosf(phi); float z = -xprime*__sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner2(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } point += blockDim.x*gridDim.x; angle += blockDim.y*gridDim.y; } } __global__ void devidecountInner(long Xsize, long Ysize, long Zsize, float* p, float* pn, float* pcountinner) { long tid = threadIdx.x + blockDim.x*blockIdx.x; while (tid<Xsize*Ysize*Zsize) { if (pcountinner[tid]>0) { pn[tid] = pn[tid] / pcountinner[tid]; p[tid] = pn[tid]; pn[tid] = 0; } tid += blockDim.x*gridDim.x; } } __global__ void BCiterationvirtualgrid(long Xsize, long Ysize, long Zsize, int NoTheta, int NoBeta, long* index, long* ninvir, long *noutvir, float* pintvir, float*p, float *pn, int Noitr) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / NoTheta; float deltbeta = 2 * PI / NoBeta; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; for (int iteration = 0; iteration<Noitr; iteration++) { indexin = blockDim.x*blockIdx.x + threadIdx.x; indexout = blockDim.y*blockIdx.y + threadIdx.y; while (indexin<int(PI / delttheta)*int(PI / deltbeta) * 2 && indexout<int(PI / delttheta)*int(PI / deltbeta) * 2) { int nin, nout; nin = ninvir[indexin + indexout*NoTheta*NoBeta]; nout = noutvir[indexin + indexout*NoTheta*NoBeta]; long iout, jout, kout, iin, jin, kin; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } int beta = 0; if (pintvir[indexin + indexout*NoTheta*NoBeta] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] = (pn[iout + jout*Xsize + kout*Xsize*Ysize] + p[iin + jin*Xsize + kin*Xsize*Ysize] + pintvir[indexin + indexout*NoTheta*NoBeta])*0.5; } indexin = indexin + blockDim.x*gridDim.x; indexout = indexout + blockDim.y*gridDim.y; } } } __global__ void calCurlofMaterialAcc(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float* DuDt, float * DvDt, float * DwDt, float * curl) { int i = blockDim.x*blockIdx.x + threadIdx.x; int j = blockDim.y*blockIdx.y + threadIdx.y; int k = blockDim.z*blockIdx.z + threadIdx.z; while (i<Xsize&&j<Ysize&&k<Zsize) { int i0 = i - 1 >= 0 ? i - 1 : i; int j0 = j - 1 >= 0 ? j - 1 : j; int k0 = k - 1 >= 0 ? k - 1 : k; int ie = i + 1 <= Xsize - 1 ? i + 1 : i; int je = j + 1 <= Ysize - 1 ? j + 1 : j; int ke = k + 1 <= Zsize - 1 ? k + 1 : k; float curlx = (DwDt[i + je*Xsize + k*Xsize*Ysize] - DwDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curlx += -(DvDt[i + j*Xsize + ke*Xsize*Ysize] - DvDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curly = -(DwDt[ie + j*Xsize + k*Xsize*Ysize] - DwDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curly += (DuDt[i + j*Xsize + ke*Xsize*Ysize] - DuDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curlz = (DvDt[ie + j*Xsize + k*Xsize*Ysize] - DvDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curlz += -(DuDt[i + je*Xsize + k*Xsize*Ysize] - DuDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curl[i + j*Xsize + k*Xsize*Ysize] = sqrt(curlx*curlx + curly*curly + curlz*curlz); i += blockDim.x*gridDim.x; j += blockDim.y*gridDim.y; k += blockDim.z*gridDim.z; } } void omni3virtualcpu(long Xsize, long Ysize, long Zsize, long *index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, long *pcount) { float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; //virtual boundary an ellipsoid int a = Xsize - 1; int b = Ysize - 1; int c = Zsize - 1; float delttheta = PI / 16; float deltbeta = PI / 16; float xin, yin, zin, xout, yout, zout, k1, k2, k3, x, y, z; int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; int iin, jin, kin, iout, jout, kout, indexin, indexout; CStdioFile log; log.Open(_T("log.dat"), CFile::modeCreate | CFile::modeWrite); for (float thetaout = 0; thetaout<PI; thetaout += delttheta) { for (float betaout = 0; betaout<2 * PI; betaout += deltbeta) { for (float thetain = 0; thetain<PI; thetain += delttheta) { for (float betain = 0; betain<PI; betain += deltbeta) { xin = a*sin(thetain)*cos(betain); yin = b*sin(thetain)*sin(betain); zin = c*cos(thetain); xout = a*sin(thetaout)*cos(betaout); yout = b*sin(thetaout)*sin(betaout); zout = c*cos(thetaout); k1 = xout - xin; k2 = yout - yin; k3 = zout - zin; iin = 0; iout = 0; jin = 0; jout = 0; kin = 0; kout = 0; if (!(k1 == 0 && k2 == 0 && k3 == 0)) { /////case 1, vertical to x-axis if (k1 == 0 && k2 != 0 && k3 != 0) { if (xin >= -center_x&&xin <= center_x) { ////four crossing point;y=0;y=max;z=0;z=max; float r = (-center_y - yin) / k2; float y1 = -center_y; float z1 = zin + k3*r; r = (center_y - yin) / k2; float y2 = center_y; float z2 = zin + k3*r; r = (-center_z - zin) / k3; float z3 = -center_z; float y3 = yin + k2*r; r = (center_z - zin) / k3; float z4 = center_z; float y4 = yin + k2*r; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z)//cross y=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = 0; kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = 0; kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z)//y=max; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = Ysize - 1; kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = Ysize - 1; kout = int(z2 + center_z + 0.5); } flag = 1; } if (y3 <= center_y&&y3 >= -center_y)//z=0; { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y3 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y3 + center_y + 0.5); kout = 0; } flag = 1; } if (y4 <= center_y&&y4 >= -center_y) { if (flag == 0) { iin = int(xin + center_x + 0.5); jin = int(y4 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(xin + center_x + 0.5); jout = int(y4 + center_y + 0.5); kout = Zsize - 1; } } //sorting intersection point by in, out order if (flag != 0) { if ((jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 2, vertical to y-axis if (k1 != 0 && k2 == 0 && k3 != 0) { if (yin >= -center_y&&yin <= center_y) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float z1 = zin + k3*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float z2 = zin + k3*r;//x=max r = (-center_z - zin) / k3; float z3 = -center_z; float x3 = xin + k1*r;//z=0; r = (center_z - zin) / k3; float z4 = center_z; float x4 = xin + k1*r;//z=max; bool flag = 0; if (z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(yin + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(yin + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(yin + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(yin + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = 0; } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = int(yin + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = int(yin + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (kout - kin)*k3<0) { int temp; temp = iin; iin = iout; iout = temp; temp = kin; kin = kout; kout = temp; } } } } ///case 3, vertical to z-axis if (k1 != 0 && k2 != 0 && k3 == 0) { if (zin >= -center_z&&zin <= center_z) { ////four crossing point float r = (-center_x - xin) / k1; float x1 = -center_x; float y1 = yin + k2*r;//x=0; r = (center_x - xin) / k1; float x2 = center_x; float y2 = yin + k2*r;//x=max; r = (-center_y - zin) / k2; float y3 = -center_y; float x3 = xin + k1*r;//y=0; r = (center_y - zin) / k2; float y4 = center_y; float x4 = xin + k1*r;//y=max; bool flag = 0; if (y1 <= center_y&&y1 >= -center_y) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(zin + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(zin + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(zin + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(zin + center_z + 0.5); } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2<0) { int temp; temp = iin; iin = iout; iout = temp; temp = jin; jin = jout; jout = temp; } } } } ///case 4, vertical to plane IJ if (abs(k1)<zero&&abs(k2)<zero&&abs(k3) >= zero) { if (xin <= center_x&&xin >= -center_x&&yin <= center_y&&yin >= -center_y) { iin = int(xin + center_x + 0.5); iout = iin; jin = int(yin + center_y + 0.5); jout = jin; if (k3>0) { kin = 0; kout = Zsize - 1; } else{ kin = Zsize - 1; kout = 0; } } } ///case 5, vertical to IK plane if (abs(k1)<zero&&abs(k2) >= zero&&abs(k3)<zero) { if (xin >= -center_x&&xin <= center_x&&zin >= -center_z&&zin <= center_z) { iin = int(xin + center_x + 0.5); iout = iin; kin = int(zin + center_z + 0.5); kout = kin; if (k2>0) { jout = Ysize - 1; jin = 0; } else { jin = Ysize - 1; jout = 0; } } } ///case 6, vertical to JK plane if (abs(k1) >= zero&&abs(k2)<zero&&abs(k3)<zero) { if (yin >= -center_y&&yin<center_y&&zin >= -center_z&&zin <= center_z) { jin = int(yin + center_y + 0.5); jout = jin; kin = int(zin + center_z + 0.5); kout = kin; if (k1>0) { iout = Xsize - 1; iin = 0; } else { iin = Xsize - 1; iout = 0; } } } /// case 7, purely inclined if (abs(k1) >= zero&&abs(k2) >= zero&&abs(k3) >= zero) { /// six crossing point float r; float x1, x2, x3, x4, x5, x6; float y1, y2, y3, y4, y5, y6; float z1, z2, z3, z4, z5, z6; r = (-center_x - xin) / k1; x1 = -center_x; y1 = yin + k2*r; z1 = zin + k3*r;//x=0 r = (center_x - xin) / k1; x2 = center_x; y2 = yin + k2*r; z2 = zin + k3*r;//x=max r = (-center_y - yin) / k2; x3 = xin + k1*r; y3 = -center_y; z3 = zin + k3*r;//y=0; r = (center_y - yin) / k2; x4 = xin + k1*r; y4 = center_y; z4 = zin + k3*r;//y=max r = (-center_z - zin) / k3; x5 = xin + k1*r; y5 = yin + k2*r; z5 = -center_z;//z=0; r = (center_z - zin) / k3; x6 = xin + k1*r; y6 = yin + k2*r; z6 = center_z;//z=max bool flag = 0; if (y1 <= center_y&&y1 >= -center_y&&z1 <= center_z&&z1 >= -center_z) { if (flag == 0) { iin = 0; jin = int(y1 + center_y + 0.5); kin = int(z1 + center_z + 0.5); } if (flag == 1) { iout = 0; jout = int(y1 + center_y + 0.5); kout = int(z1 + center_z + 0.5); } flag = 1; } if (y2 <= center_y&&y2 >= -center_y&&z2 <= center_z&&z2 >= -center_z) { if (flag == 0) { iin = Xsize - 1; jin = int(y2 + center_y + 0.5); kin = int(z2 + center_z + 0.5); } if (flag == 1) { iout = Xsize - 1; jout = int(y2 + center_y + 0.5); kout = int(z2 + center_z + 0.5); } flag = 1; } if (x3 <= center_x&&x3 >= -center_x&&z3 <= center_z&&z3 >= -center_z) { if (flag == 0) { iin = int(x3 + center_x + 0.5); jin = 0; kin = int(z3 + center_z + 0.5); } if (flag == 1) { iout = int(x3 + center_x + 0.5); jout = 0; kout = int(z3 + center_z + 0.5); } flag = 1; } if (x4 <= center_x&&x4 >= -center_x&&z4 <= center_z&&z4 >= -center_z) { if (flag == 0) { iin = int(x4 + center_x + 0.5); jin = Ysize - 1; kin = int(z4 + center_z + 0.5); } if (flag == 1) { iout = int(x4 + center_x + 0.5); jout = Ysize - 1; kout = int(z4 + center_z + 0.5); } flag = 1; } if (x5 <= center_x&&x5 >= -center_x&&y5 <= center_y&&y5 >= -center_y) { if (flag == 0) { iin = int(x5 + center_x + 0.5); jin = int(y5 + center_y + 0.5); kin = 0; } if (flag == 1) { iout = int(x5 + center_x + 0.5); jout = int(y5 + center_y + 0.5); kout = 0; } flag = 1; } if (x6 <= center_x&&x6 >= -center_x&&y6 <= center_y&&y6 >= -center_y) { if (flag == 0) { iin = int(x6 + center_x + 0.5); jin = int(y6 + center_y + 0.5); kin = Zsize - 1; } if (flag == 1) { iout = int(x6 + center_x + 0.5); jout = int(y6 + center_y + 0.5); kout = Zsize - 1; } flag = 1; } //sorting intersection point by in, out order if (flag != 0) { if ((iout - iin)*k1 + (jout - jin)*k2 + (kout - kin)*k3<0) { int temp; temp = iin; iin = temp; iout = temp; temp = jin; jin = jout; jout = temp; temp = kin; kin = kout; kout = temp; } } } //////////////////////////////END OF CALCULATING IN AND OUT POINT ON REAL BOUNDARY//////////////////////////////// if ((iin - center_x - xin)*(iin - center_x - xout) + (jin - center_y - yin)*(jin - center_y - yout) + (kin - center_z - zin)*(kin - center_z - zout)<0 && (iin + jin + kin + iout + jout + kout) != 0 && !(iin == iout&&jin == jout&&kin == kout)) { int nin, nout; long ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to longegration path float r, d1, d2, d3; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; /*if(kin==0) { nin=iin+jin*Xsize; } if(iin==Xsize-1&&kin!=0) { nin=Xsize*Ysize-1+kin+(Ysize-1-jin)*(Zsize-1); } if(kin==Zsize-1&&iin!=Xsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Xsize-1-iin+jin*(Xsize-1); } if(jin==0&&iin!=Xsize-1&&kin!=0&&kin!=Zsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+Xsize-1-iin+(kin-1)*(Xsize-1);//???? } if(iin==0&&jin!=0&&kin!=0&&kin!=Zsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+Zsize-1-kin+(jin-1)*(Zsize-2); } if(jin==Ysize-1&&iin!=0&&iin!=Xsize-1&&kin!=0&&kin!=Zsize-1) { nin=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+(Ysize-1)*(Zsize-2)+iin+(kin-1)*(Xsize-2); } if(kout==0) { nout=iout+jout*Xsize; } if(iout==Xsize-1&&kout!=0) { nout=Xsize*Ysize-1+kout+(Ysize-1-jout)*(Zsize-1); } if(kout==Zsize-1&&iout!=Xsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Xsize-1-iout+jout*(Xsize-1); } if(jout==0&&iout!=Xsize-1&&kout!=0&&kout!=Zsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+Xsize-1-iout+(kout-1)*(Xsize-1); } if(iout==0&&jout!=0&&kout!=0&&kout!=Zsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+Zsize-1-kout+(jout-1)*(Zsize-2); } if(jout==Ysize-1&&iout!=0&&iout!=Xsize-1&&kout!=0&&kout!=Zsize-1) { nout=Xsize*Ysize-1+(Zsize-1)*Ysize+Ysize*(Xsize-1)+(Xsize-1)*(Zsize-2)+(Ysize-1)*(Zsize-2)+iout+(kout-1)*(Xsize-2); }*/ if (d1 <= d2&&d1 <= d3) { pint[nin + nout*n] += -density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pint[nin + nout*n] += -density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pint[nin + nout*n] += -density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-5); pcount[nin + nout*n]++; CString str; str.Format(_T("%04d--%04d (%02d,%02d,%02d) (%02d,%02d,%02d) %10.8f %02d\n"), nin, nout, iin, jin, kin, iout, jout, kout, pint[nin + nout*n], pcount[nin + nout*n]); cout << str; log.WriteString(str); } } } } } } int no = 0; for (int k = 0; k<n*n; k++) { if (pcount[k]>0) { pint[k] = pint[k] / pcount[k]; no++; } } cout << no << endl; log.Close(); } float BCIterationCPU(long Xsize, long Ysize, long Zsize, float* pint, float *p, float* pn, float eps, int Noitr) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float pdiffold = 0; float pdiffnew = 0; float pdiffrela = 100; float meanp = 0; long iteration = 0; while (iteration<Noitr&&pdiffrela>eps) { meanp = 0; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } long beta = 0; for (long nin = 0; nin<n; nin++) { long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; beta++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / (beta + 1); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //cout<<pn[iout+jout*Xsize+kout*Xsize*Ysize]<<endl; } iteration++; for (long nout = 0; nout<n; nout++) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } meanp += pn[iout + jout*Xsize + kout*Xsize*Ysize]; pdiffnew += abs(p[iout + jout*Xsize + kout*Xsize*Ysize] - pn[iout + jout*Xsize + kout*Xsize*Ysize]); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; } meanp = meanp / n; pdiffnew = pdiffnew / n; pdiffrela = abs(pdiffnew - pdiffold); pdiffold = pdiffnew; pdiffnew = 0; } return meanp; } float BCIterationCPUFixBC(long Xsize, long Ysize, long Zsize, float* pint, float *p, float* pn, float eps, int Noitr) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float pdiffold = 0; float pdiffnew = 0; float pdiffrela = 100; float meanp = 0; long iteration = 0; while (iteration<Noitr&&pdiffrela>eps) { meanp = 0; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } long beta = 0; for (long nin = 0; nin<n; nin++) { long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0 && jout != Ysize - 1) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; beta++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / (beta + 1); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //cout<<pn[iout+jout*Xsize+kout*Xsize*Ysize]<<endl; } iteration++; for (long nout = 0; nout<n; nout++) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } meanp += pn[iout + jout*Xsize + kout*Xsize*Ysize]; pdiffnew += abs(p[iout + jout*Xsize + kout*Xsize*Ysize] - pn[iout + jout*Xsize + kout*Xsize*Ysize]); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; } meanp = meanp / n; pdiffnew = pdiffnew / n; pdiffrela = abs(pdiffnew - pdiffold); pdiffold = pdiffnew; pdiffnew = 0; } return meanp; } float BCIterationCPUFixPoint(long Xsize, long Ysize, long Zsize, float* pint, float *p, float* pn, float eps, int Noitr) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float pdiffold = 0; float pdiffnew = 0; float pdiffrela = 100; float meanp = 0; long iteration = 0; while (iteration<Noitr&&pdiffrela>eps) { meanp = 0; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } long beta = 0; for (long nin = 0; nin<n; nin++) { long iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; } /////////// if (pint[nin + nout*n] != 0 && !(jout == Ysize - 1 && iout == 0 && kout == 0)) { pn[iout + jout*Xsize + kout*Xsize*Ysize] += p[iin + jin*Xsize + kin*Xsize*Ysize] + pint[nin + nout*n]; beta++; } } pn[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize] / (beta + 1); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //cout<<pn[iout+jout*Xsize+kout*Xsize*Ysize]<<endl; } iteration++; for (long nout = 0; nout<n; nout++) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } meanp += pn[iout + jout*Xsize + kout*Xsize*Ysize]; pdiffnew += abs(p[iout + jout*Xsize + kout*Xsize*Ysize] - pn[iout + jout*Xsize + kout*Xsize*Ysize]); p[iout + jout*Xsize + kout*Xsize*Ysize] = pn[iout + jout*Xsize + kout*Xsize*Ysize]; //pn[iout+jout*Xsize+kout*Xsize*Ysize]=0; } meanp = meanp / n; pdiffnew = pdiffnew / n; pdiffrela = abs(pdiffnew - pdiffold); pdiffold = pdiffnew; pdiffnew = 0; } return meanp; } void omni3Dinner(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, long *pcount, float *p, float* pn, int itrNo) { int iteration = 0; float rms = 0; long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; while (iteration<itrNo) { for (int nin = 0; nin<n; nin = nin + 1) { for (int nout = 0; nout<n; nout = nout + 1) { int iout, jout, kout; int facein, faceout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; faceout = 1; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; faceout = 2; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; faceout = 3; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; faceout = 4; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; faceout = 5; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; faceout = 6; } int iin, jin, kin; if (nin <= Xsize*Ysize - 1) { kin = 0; jin = nin / Xsize; iin = nin - Xsize*jin; facein = 1; } if (nin>Xsize*Ysize - 1 && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iin = Xsize - 1; jin = (nin - Xsize*Ysize) / (Zsize - 1); kin = nin - Xsize*Ysize - jin*(Zsize - 1) + 1; facein = 2; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kin = Zsize - 1; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - jin*(Xsize - 1); iin = Xsize - 2 - iin; facein = 3; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jin = 0; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kin*(Xsize - 1); iin = Xsize - 2 - iin; kin = kin + 1; facein = 4; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nin <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iin = 0; jin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jin*(Zsize - 2); kin = Zsize - 2 - kin; jin = jin + 1; facein = 5; } if (nin>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jin = Ysize - 1; kin = (nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iin = nin - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kin*(Xsize - 2); kin = Zsize - 2 - kin; iin = iin + 1; facein = 6; } int ilast, jlast, klast, inext1, inext2, inext3, jnext1, jnext2, jnext3, knext1, knext2, knext3; ilast = iin; jlast = jin; klast = kin; if (nin != nout&&nin >= 0 && nin<n&&nout >= 0 && nout<n) { float k1 = iout - iin; float k2 = jout - jin; float k3 = kout - kin; float l = sqrt(k1*k1 + k2*k2 + k3*k3); k1 = k1 / l; k2 = k2 / l; k3 = k3 / l; //cout<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //cout<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; //log<<"indexin: "<<nin<<" indexout:"<<nout<<endl; //log<<'('<<iin<<','<<jin<<','<<kin<<") "<<'('<<iout<<','<<jout<<','<<kout<<") "<<endl; do { if (ilast<iout) { inext1 = ilast + 1; jnext1 = jlast; knext1 = klast; } if (ilast == iout) { inext1 = ilast - 1e6; jnext1 = jlast; knext1 = klast; } if (ilast>iout) { inext1 = ilast - 1; jnext1 = jlast; knext1 = klast; } if (jlast<jout) { inext2 = ilast; jnext2 = jlast + 1; knext2 = klast; } if (jlast == jout) { inext2 = ilast; jnext2 = jlast - 1e6; knext2 = klast; } if (jlast>jout) { inext2 = ilast; jnext2 = jlast - 1; knext2 = klast; } if (klast<kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast + 1; } if (klast == kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1e6; } if (klast>kout) { inext3 = ilast; jnext3 = jlast; knext3 = klast - 1; } ///determine which one is closer to integration path float r, d1, d2, d3, x, y, z; r = k1*inext1 - iin*k1 + k2*jnext1 - k2*jin + k3*knext1 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d1 = sqrt((x - inext1)*(x - inext1) + (y - jnext1)*(y - jnext1) + (z - knext1)*(z - knext1)); r = k1*inext2 - iin*k1 + k2*jnext2 - k2*jin + k3*knext2 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d2 = sqrt((x - inext2)*(x - inext2) + (y - jnext2)*(y - jnext2) + (z - knext2)*(z - knext2)); r = k1*inext3 - iin*k1 + k2*jnext3 - k2*jin + k3*knext3 - k3*kin; x = iin + k1*r; y = jin + k2*r; z = kin + k3*r; d3 = sqrt((x - inext3)*(x - inext3) + (y - jnext3)*(y - jnext3) + (z - knext3)*(z - knext3)); //////End of calculation distance/////////////// //path 1 if (d1 <= d2&&d1 <= d3) { pn[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] - density*(inext1 - ilast)*deltx*0.5*(DuDt[inext1 + jnext1*Xsize + knext1*Xsize*Ysize] + DuDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pcount[inext1 + jnext1*Xsize + knext1*Xsize*Ysize]++; //pint[nin+nout*n]+=-density*(inext1-ilast)*deltx*0.5*(DuDt[inext1+jnext1*Xsize+knext1*Xsize*Ysize]+DuDt[ilast+jlast*Xsize+klast*Xsize*Ysize]); ilast = inext1; } if (d2<d1&&d2 <= d3) { pn[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] - density*(jnext2 - jlast)*delty*0.5*(DvDt[inext2 + jnext2*Xsize + knext2*Xsize*Ysize] + DvDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pcount[inext2 + jnext2*Xsize + knext2*Xsize*Ysize]++; //pint[nin+nout*n]+=-density*(jnext2-jlast)*delty*0.5*(DvDt[inext2+jnext2*Xsize+knext2*Xsize*Ysize]+DvDt[ilast+jlast*Xsize+klast*Xsize*Ysize]); jlast = jnext2; } if (d3<d1&&d3<d2) { pn[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] += p[ilast + jlast*Xsize + klast*Xsize*Ysize] - density*(knext3 - klast)*deltz*0.5*(DwDt[inext3 + jnext3*Xsize + knext3*Xsize*Ysize] + DwDt[ilast + jlast*Xsize + klast*Xsize*Ysize]); pcount[inext3 + jnext3*Xsize + knext3*Xsize*Ysize]++; //pint[nin+nout*n]+=-density*(knext3-klast)*deltz*0.5*(DwDt[inext3+jnext3*Xsize+knext3*Xsize*Ysize]+DwDt[ilast+jlast*Xsize+klast*Xsize*Ysize]); klast = knext3; } } while (abs(ilast - iout) + abs(jlast - jout) + abs(klast - kout)>1e-3); } //cout<<thetain<<' '<<betain<<endl; //cout<<thetaout<<' '<<betaout<<endl; //cout<<"k1="<<k1<<" k2="<<k2<<" k3="<<k3<<endl; //cout<<indexin<<" "<<indexout<<endl; } } rms = 0; for (int k = 0; k<Xsize*Ysize*Zsize; k++) { pn[k] = pn[k] / pcount[k]; pcount[k] = 0; rms += (p[k] - pn[k])*(p[k] - pn[k]); } rms = sqrt(rms / Xsize / Ysize / Zsize); cout << "Iteration: " << iteration << " rms: " << rms << endl; memcpy(p, pn, sizeof(float)*Xsize*Ysize*Zsize); memset(pn, 0, sizeof(float)*Xsize*Ysize*Zsize); iteration++; } } void calIndex(long*index, long Xsize, long Ysize, long Zsize) { long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; for (long nout = n - 1; nout >= 0; nout--) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } index[iout + jout*Xsize + kout*Xsize*Ysize] = nout; } } void omni3dparallellinesEqualSpacingCPU(long Xsize, long Ysize, long Zsize, int NoAngles, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float* pint, float* pcount, float* pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = 16; float spacing = 1; //CStdioFile log; //log.Open("log.dat",CFile::modeCreate|CFile::modeWrite); //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; for (int angle = 0; angle<NoAngles; angle++) { for (int point = 0; point<NoGrid*NoGrid; point++) { float xprime = (float(point / NoGrid) - 0.5*NoGrid)*spacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*NoGrid)*spacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = asinf(k2 / sinf(theta)); if (k1 / sinf(theta)<0) { phi = -phi + PI; } float x = xprime*cosf(theta)*cosf(phi) - yprime*sinf(phi); float y = xprime*cosf(theta)*sinf(phi) + yprime*cosf(phi); float z = -xprime*sinf(theta); //float k1=sinf(theta)*cosf(phi); //float k2=sinf(theta)*sinf(phi); //float k3=cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { pint[nin + nout*n] += bodyIntegral(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, pcountinner); pcount[nin + nout*n]++; } //CString str; //str.Format(_T("%6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %6.4f %02d %02d %02d %02d %02d %02d\n"),theta,phi,k1,k2,k3,x,y,z,iin,jin,kin,iout,jout,kout); //if(angle==10000/2-1) //{ // log.WriteString(str); //} } } } //log.Close(); } void omni3dparallellinesESInnerCPU(long Xsize, long Ysize, long Zsize, int NoAngles, float linespacing, float* k1_d, float* k2_d, float* k3_d, long*index, float deltx, float delty, float deltz, float density, float* DuDt, float *DvDt, float *DwDt, float*p, float*pn, float*pcountinner) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; float center_x = (Xsize - 1) / 2.0; float center_y = (Ysize - 1) / 2.0; float center_z = (Zsize - 1) / 2.0; int NoGrid = Xsize; if (NoGrid<Ysize) { NoGrid = Ysize; } if (NoGrid<Zsize) { NoGrid = Zsize; } NoGrid = NoGrid*1.732; //float spacing=sqrt(float(Xsize*Xsize+Ysize*Ysize+Zsize*Zsize))/NoGrid; for (int angle = 0; angle<NoAngles; angle++) { for (int point = 0; point<NoGrid*NoGrid; point++) { float xprime = (float(point / NoGrid) - 0.5*(NoGrid - 1))*linespacing; float yprime = (float(point - point / NoGrid*NoGrid) - 0.5*(NoGrid - 1))*linespacing; float k1, k2, k3; k1 = k1_d[angle]; k2 = k2_d[angle]; k3 = k3_d[angle]; float theta = acosf(k3); float phi = asinf(k2 / sinf(theta)); if (k1 / sinf(theta)<0) { phi = -phi + PI; } float x = xprime*cosf(theta)*cosf(phi) - yprime*sinf(phi); float y = xprime*cosf(theta)*sinf(phi) + yprime*cosf(phi); float z = -xprime*sinf(theta); //float k1=__sinf(theta)*__cosf(phi); //float k2=__sinf(theta)*__sinf(phi); //float k3=__cosf(theta); int iin, jin, kin, iout, jout, kout; cross2point(Xsize, Ysize, Zsize, &iin, &jin, &kin, x, y, z, k1, k2, k3, &iout, &jout, &kout); int nin, nout; if (iin >= 0 && iin<Xsize&&jin >= 0 && jin<Ysize&&kin >= 0 && kin<Zsize&&iout >= 0 && iout<Xsize&&jout >= 0 && jout<Ysize&&kout >= 0 && kout<Zsize) { nin = index[iin + jin*Xsize + kin*Xsize*Ysize]; nout = index[iout + jout*Xsize + kout*Xsize*Ysize]; if (nin != nout) { bodyIntegralInner(Xsize, Ysize, Zsize, iin, jin, kin, iout, jout, kout, x, y, z, k1, k2, k3, deltx, delty, deltz, density, DuDt, DvDt, DwDt, p, pn, pcountinner); } } } } } void devidecountCPU(long Xsize, long Ysize, long Zsize, float* pint, float* pcount) { int n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; for (int tid = 0; tid<n*n; tid++) { if (pcount[tid]>1) { pint[tid] /= pcount[tid]; } } } void devidecountInnerCPU(long Xsize, long Ysize, long Zsize, float* p, float* pn, float* pcountinner) { for (int tid = 0; tid<Xsize*Ysize*Zsize; tid++) { if (pcountinner[tid]>1) { p[tid] = pn[tid] / pcountinner[tid]; pn[tid] = 0; } } } void calCurlofMaterialAccCPU(long Xsize, long Ysize, long Zsize, float deltx, float delty, float deltz, float* DuDt, float * DvDt, float * DwDt, float * curl,float* mask) { for (int k = 0; k<Zsize; k++) { for (int j = 0; j<Ysize; j++) { for (int i = 0; i<Xsize; i++) { int i0 = i - 1 >= 0 ? i - 1 : i; int j0 = j - 1 >= 0 ? j - 1 : j; int k0 = k - 1 >= 0 ? k - 1 : k; int ie = i + 1 <= Xsize - 1 ? i + 1 : i; int je = j + 1 <= Ysize - 1 ? j + 1 : j; int ke = k + 1 <= Zsize - 1 ? k + 1 : k; float curlx = (DwDt[i + je*Xsize + k*Xsize*Ysize] - DwDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curlx += -(DvDt[i + j*Xsize + ke*Xsize*Ysize] - DvDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curly = -(DwDt[ie + j*Xsize + k*Xsize*Ysize] - DwDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curly += (DuDt[i + j*Xsize + ke*Xsize*Ysize] - DuDt[i + j*Xsize + k0*Xsize*Ysize]) / (ke - k0) / deltz; float curlz = (DvDt[ie + j*Xsize + k*Xsize*Ysize] - DvDt[i0 + j*Xsize + k*Xsize*Ysize]) / (ie - i0) / deltx; curlz += -(DuDt[i + je*Xsize + k*Xsize*Ysize] - DuDt[i + j0*Xsize + k*Xsize*Ysize]) / (je - j0) / delty; curl[i + j*Xsize + k*Xsize*Ysize] = sqrt(curlx*curlx + curly*curly + curlz*curlz)*mask[i + j*Xsize + k*Xsize*Ysize]; } } } } void thredholdHistMaterialAccCPU(int Imax, int Jmax, int Kmax, float* curl, float percentage, float* threshold) { //get min max values for curl; float minv = 1e4; float maxv = -1e4; for (int i = 0; i<Imax*Jmax*Kmax; i++) { if (minv>curl[i]) { minv = curl[i]; } if (maxv<curl[i]) { maxv = curl[i]; } } //generate 1000 bins; int * hist; int N = 10000; hist = new int[N]; memset(hist, 0, sizeof(int)*N); for (int i = 0; i<Imax*Jmax*Kmax; i++) { int ind = (int)((curl[i] - minv) / (maxv - minv)*N); hist[ind]++; } float totnum = 0; for (int j = N - 1; j >= 0; j++) { totnum += hist[j]; if (totnum >= Imax*Jmax*Kmax*percentage) { threshold[0] = float(j) / N*(maxv - minv) + minv; return; } } threshold[0] = maxv; delete[] hist; } __global__ void calIndexGPU(long*index, long Xsize, long Ysize, long Zsize) { long nout = threadIdx.x + blockIdx.x*blockDim.x; long n = Xsize*Ysize * 2 + (Zsize - 2)*Ysize * 2 + (Xsize - 2)*(Zsize - 2) * 2; while (nout<n) { long iout, jout, kout; if (nout <= Xsize*Ysize - 1) { kout = 0; jout = nout / Xsize; iout = nout - Xsize*jout; } if (nout>Xsize*Ysize - 1 && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1)) { iout = Xsize - 1; jout = (nout - Xsize*Ysize) / (Zsize - 1); kout = nout - Xsize*Ysize - jout*(Zsize - 1) + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize) { kout = Zsize - 1; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1)) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - jout*(Xsize - 1); iout = Xsize - 2 - iout; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize&&nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2)) { jout = 0; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize) / (Xsize - 1); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - kout*(Xsize - 1); iout = Xsize - 2 - iout; kout = kout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) && nout <= Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { iout = 0; jout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2)) / (Zsize - 2); kout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - jout*(Zsize - 2); kout = Zsize - 2 - kout; jout = jout + 1; } if (nout>Xsize*Ysize - 1 + Ysize*(Zsize - 1) + (Xsize - 1)*Ysize + (Xsize - 1)*(Zsize - 2) + (Ysize - 1)*(Zsize - 2)) { jout = Ysize - 1; kout = (nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2)) / (Xsize - 2); iout = nout - Xsize*Ysize - Ysize*(Zsize - 1) - (Xsize - 1)*Ysize - (Xsize - 1)*(Zsize - 2) - (Ysize - 1)*(Zsize - 2) - kout*(Xsize - 2); kout = Zsize - 2 - kout; iout = iout + 1; } index[iout + jout*Xsize + kout*Xsize*Ysize] = nout; nout += blockDim.x*gridDim.x; } } /////////////////////////////For Experimental Data Version/////////////////////////////// int main() { cudaDeviceProp prop; long DeviceNo=0; cudaSetDevice(DeviceNo); cudaGetDeviceProperties(&prop, DeviceNo) ; printf( " ----------- General Information for device %d --------\n", DeviceNo ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " ------------ Memory Information for device %d ---------\n", DeviceNo ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", DeviceNo ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); ofstream log; log.open("log.dat"); long ImaxOrg,JmaxOrg,KmaxOrg,Imax,Jmax,Kmax,n,PlaneSt,Planedt,PlaneEnd,FileNumSt,FileNumDelt,FileNumEnd; float rho,scale,linespacing; float density=1; float eps=1e-10; float meanpcal=0; float meanpdns=0; float* x,*y,*z,*u,*v,*w,*dudt,*dvdt,*dwdt,*pint,*p,*pn,*pdns,*RHS,*curl,*mask; float* k1,*k2,*k3; float* dudt_d,*dvdt_d,*dwdt_d,*pint_d,*p_d,*pn_d,*curl_d; float* k1_d,*k2_d,*k3_d; long *index,*index_d; float *pcountinner,*pcountinner_d; float* pcount_d,*pcountitr_d,*pcount; float* pweight_d; float threshold; float pref=0; int NoAngles=10000; int NoItr=100; int cutzs,cutze; int cutxs,cutxe; int cutys,cutye; Imax=64; Jmax=64; Kmax=64; float deltx=0.006135923151543; float delty=0.006135923151543; float deltz=0.006135923151543; CString pathpressure,pathacc,fileacc,basefile; CString filegrid; ///////////////////Reading parameters////////////////////////////// CStdioFile par; CString str; if (!par.Open(_T("Parameter_Omni3D.dat"), CFile::modeRead)) { cout << "Parameter input file: \"Parameter_Omni3D.dat\" open error" << endl; //MessageBox(NULL, _T("Parameter input file: \"Parameter_Omni3D.dat\" open error"), _T("Omni3D Message"), MB_OK); cin >> Imax; return; } par.ReadString(str);ImaxOrg=_wtoi(str); par.ReadString(str);JmaxOrg=_wtoi(str); par.ReadString(str);KmaxOrg=_wtoi(str); par.ReadString(str);deltx=_wtof(str); par.ReadString(str);delty=_wtof(str); par.ReadString(str);deltz=_wtof(str); par.ReadString(str);density=_wtof(str); par.ReadString(str);scale=_wtoi(str); par.ReadString(str);linespacing=_wtof(str); par.ReadString(str);NoAngles=_wtoi(str); par.ReadString(filegrid); par.ReadString(pathacc); par.ReadString(pathpressure); par.ReadString(str);NoItr=_wtoi(str); par.ReadString(str); threshold = _wtof(str); par.ReadString(str); pref = _wtof(str); if (str == ""){ pref = 0; } par.ReadString(str); cutxs = _wtoi(str); if (str == ""){ cutxs = 0; } par.ReadString(str); cutxe = _wtoi(str); if (str == ""){ cutxe = ImaxOrg-1; } par.ReadString(str); cutys = _wtoi(str); if (str == ""){ cutys = 0; } par.ReadString(str); cutye = _wtoi(str); if (str == ""){ cutye = JmaxOrg-1; } par.ReadString(str); cutzs = _wtoi(str); if (str == ""){ cutzs = 0; } par.ReadString(str); cutze = _wtoi(str); if (str == ""){ cutze = KmaxOrg-1; } par.Close(); ////////////////////////////////Reading parameter completed//////////////////////// Imax=cutxe-cutxs+1; Jmax=cutye-cutys+1; Kmax=cutze-cutzs+1; x=new float[Imax*Jmax*Kmax]; y=new float[Imax*Jmax*Kmax]; z=new float[Imax*Jmax*Kmax]; u=new float[Imax*Jmax*Kmax]; v=new float[Imax*Jmax*Kmax]; w=new float[Imax*Jmax*Kmax]; dudt=new float[Imax*Jmax*Kmax]; dvdt=new float[Imax*Jmax*Kmax]; dwdt=new float[Imax*Jmax*Kmax]; n=Imax*Jmax*2+(Jmax-2)*Kmax*2+(Imax-2)*(Kmax-2)*2; p=new float[Imax*Jmax*Kmax]; pn=new float[Imax*Jmax*Kmax]; pdns=new float[Imax*Jmax*Kmax]; RHS=new float[Imax*Jmax*Kmax]; curl = new float[Imax*Jmax*Kmax]; mask = new float[Imax*Jmax*Kmax]; pint=new float[n*n]; pcountinner=new float[Imax*Jmax*Kmax]; pcount=new float[n*n]; index=new long[Imax*Jmax*Kmax]; k1=new float[NoAngles]; k2=new float[NoAngles]; k3=new float[NoAngles]; memset(p,0,sizeof(float)*Imax*Jmax*Kmax); memset(pn,0,sizeof(float)*Imax*Jmax*Kmax); memset(RHS,0,sizeof(float)*Imax*Jmax*Kmax); memset(curl, 0, sizeof(float)*Imax*Jmax*Kmax); memset(mask, 0, sizeof(float)*Imax*Jmax*Kmax); memset(pint,0,sizeof(float)*n*n); memset(pcountinner,0,sizeof(float)*Imax*Jmax*Kmax); memset(pcount,0,sizeof(float)*n*n); //calIndex(index,Imax,Jmax,Kmax); CStdioFile fin; ////Read Random Numbers; cout<<"Reading virtual grid points on the sphere.........."<<endl; if (!fin.Open(filegrid, CFile::modeRead)) { cout << "Virtual grid points:" << CT2A(filegrid)<<" open error" << endl; //MessageBox(NULL, _T("Parameter input file: \"Parameter_Omni3D.dat\" open error"), _T("Omni3D Message"), MB_OK); cin >> Imax; return; } for(int j=0;j<NoAngles;j++) { long pos; fin.ReadString(str); pos=str.ReverseFind(' '); k3[j]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); pos=str.ReverseFind(' '); } k2[j]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); pos=str.ReverseFind(' '); } k1[j]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); pos=str.ReverseFind(' '); } } fin.Close(); cout << "done" << endl; /////////////////////////////////////////////// cudaMalloc((void **)&dudt_d,sizeof(float)*Imax*Jmax*Kmax); cudaMalloc((void **)&dvdt_d,sizeof(float)*Imax*Jmax*Kmax); cudaMalloc((void **)&dwdt_d,sizeof(float)*Imax*Jmax*Kmax); cudaMalloc((void **)&curl_d,sizeof(float)*Imax*Jmax*Kmax); cudaMalloc((void **)&pint_d,sizeof(float)*n*n); cudaMalloc((void **)&pcount_d,sizeof(float)*n*n); cudaMalloc((void **)&pweight_d,sizeof(float)*n*n); cudaMalloc((void **)&p_d,sizeof(float)*Imax*Jmax*Kmax); cudaMalloc((void **)&pn_d,sizeof(float)*Imax*Jmax*Kmax); cudaMalloc((void **)&index_d,sizeof(long)*Imax*Jmax*Kmax); //cudaMalloc((void**)&pcountitr_d,sizeof(int)*n); cudaMalloc((void**)&pcountinner_d,sizeof(float)*Imax*Jmax*Kmax); cudaMalloc((void**)&k1_d,sizeof(float)*NoAngles); cudaMalloc((void**)&k2_d,sizeof(float)*NoAngles); cudaMalloc((void**)&k3_d,sizeof(float)*NoAngles); //////////////////////End of allocate memory on GPU////////////// cudaMemcpy(k1_d, k1, sizeof(float)*NoAngles, cudaMemcpyHostToDevice); cudaMemcpy(k2_d, k2, sizeof(float)*NoAngles, cudaMemcpyHostToDevice); cudaMemcpy(k3_d, k3, sizeof(float)*NoAngles, cudaMemcpyHostToDevice); ///Read all the files inside a folder//////////////////////////// //Step.1 judge whether folder exsists/////////////////////////// if (!folderExists(pathacc)) { cout << "Acceleration folder does not exist: " << CT2A(pathacc)<< endl; //MessageBox(NULL, _T("Parameter input file: \"Parameter_Omni3D.dat\" open error"), _T("Omni3D Message"), MB_OK); cin >> Imax; //MessageBox(NULL, _T("Acceleration folder does not exist"), _T("Omni3D Message"), MB_OK); return; } if (!folderExists(pathpressure)) { createFolder(pathpressure); } // Step.2 Read all the acceleration filenames//// vector<string> filesacc; vector<string> filesvel; string s = CT2A(pathacc); getFiles(s, filesacc); ///////////////////////////////////////////////////////////////// int size = filesacc.size(); cout << "Processing Starts" << endl; log << "Processing Starts" << endl; for(int FileNum=0;FileNum<size;FileNum++) { fileacc=filesacc[FileNum].c_str(); int pos = fileacc.ReverseFind('\\'); cout << CT2A(fileacc.Right(fileacc.GetLength() - pos - 1)) <<endl; log << CT2A(fileacc.Right(fileacc.GetLength() - pos - 1)); if (!fin.Open(fileacc, CFile::modeRead)){ log << " failed to open" << endl; continue; } fin.ReadString(str);fin.ReadString(str);fin.ReadString(str); //fin.ReadString(str);fin.ReadString(str);fin.ReadString(str); //With mask files... for(long k=0;k<KmaxOrg;k++) { for(long j=0;j<JmaxOrg;j++) { for(long i=0;i<ImaxOrg;i++) { long pos; int ind=i-cutxs+(j-cutys)*(cutxe-cutxs+1)+(k-cutzs)*(cutxe-cutxs+1)*(cutye-cutys+1); fin.ReadString(str); str = str.TrimRight(); if(i>=cutxs&&i<=cutxe&&j>=cutys&&j<=cutye&&k>=cutzs&&k<=cutze) { pos = str.ReverseFind(' '); mask[ind] = _wtof(str.Right(str.GetLength() - pos - 1)); for (long m = 0; m<1; m++) { str = str.Left(pos); str = str.TrimRight(); pos = str.ReverseFind(' '); } pos=str.ReverseFind(' '); dwdt[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } dvdt[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } dudt[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } w[ind]=_wtof(str.Right(str.GetLength()-pos-1)); str=str.Left(pos); pos=str.ReverseFind(' '); v[ind]=_wtof(str.Right(str.GetLength()-pos-1)); str=str.Left(pos); pos=str.ReverseFind(' '); u[ind]=_wtof(str.Right(str.GetLength()-pos-1)); str=str.Left(pos); pos=str.ReverseFind(' '); z[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } y[ind]=_wtof(str.Right(str.GetLength()-pos-1)); for(long m=0;m<1;m++) { str=str.Left(pos); str = str.TrimRight(); pos=str.ReverseFind(' '); } x[ind]=_wtof(str.Right(str.GetLength()-pos-1)); } } } } fin.Close(); cudaMemset(p_d,0,sizeof(float)*Imax*Jmax*Kmax); cudaMemset(pn_d,0,sizeof(float)*Imax*Jmax*Kmax); cudaMemset(curl_d, 0, sizeof(float)*Imax*Jmax*Kmax); cudaMemset(pint_d,0,sizeof(float)*n*n); cudaMemset(pcount_d,0,sizeof(float)*n*n); cudaMemset(pcountinner_d,0,sizeof(float)*Imax*Jmax*Kmax); cudaMemset(pweight_d,0,sizeof(float)*n*n); //cudaMemset(pcountitr_d,0,sizeof(int)*n); cudaMemcpy(dudt_d,dudt,sizeof(float)*Imax*Jmax*Kmax,cudaMemcpyHostToDevice); cudaMemcpy(dvdt_d,dvdt,sizeof(float)*Imax*Jmax*Kmax,cudaMemcpyHostToDevice); cudaMemcpy(dwdt_d,dwdt,sizeof(float)*Imax*Jmax*Kmax,cudaMemcpyHostToDevice); //set boundary pressure as from Bernoulli equation for (int i = 0; i < Imax; i++){ for (int k = 0; k < Kmax; k++){ int ind = i + 0*Imax + k*Imax*Jmax; p[ind] = pref-0.5*density*(u[ind] * u[ind] + v[ind] * v[ind] + w[ind] * w[ind]); } } cudaMemcpy(p_d,p,sizeof(float)*Imax*Jmax*Kmax,cudaMemcpyHostToDevice); cudaMemcpy(pn_d, p, sizeof(float)*Imax*Jmax*Kmax, cudaMemcpyHostToDevice); //////////////////////End of allocate memory on GPU////////////// dim3 threadPerBlock(8,8); dim3 blockPerGrid(512,512); dim3 threadPerBlock1(8,8,8); dim3 blockPerGrid1(256, 256,256); //calCurlofMaterialAcc <<<blockPerGrid1, threadPerBlock1>>>(Imax, Jmax, Kmax, deltx, delty, deltz, dudt_d, dvdt_d, dwdt_d, curl_d); calCurlofMaterialAccCPU(Imax, Jmax, Kmax, deltx, delty, deltz, dudt, dvdt, dwdt, curl,mask); cudaMemcpy(curl_d, curl, sizeof(float)*Imax*Jmax*Kmax, cudaMemcpyHostToDevice); // thredholdHistMaterialAccCPU(Imax,Jmax,Kmax,curl1,percentage,&threshold); ////////////////////Start Kernels on GPU//////////////////////////////////////////// calIndexGPU <<<n/512, 512 >>>(index_d, Imax, Jmax, Kmax); //omni3dparallellinesEqualSpacing <<<blockPerGrid, threadPerBlock >>>(Imax, Jmax, Kmax, NoAngles, linespacing, k1_d, k2_d, k3_d, index_d, deltx, delty, deltz, density, dudt_d, dvdt_d, dwdt_d, pint_d, pcount_d, pcountinner_d); //omni3dparallellinesEqualSpacingWeighted<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,pint_d,pweight_d,pcount_d,pcountinner_d,curl); //omni3dparallellinesEqualSpacingSelect<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,pint_d,pcount_d,pcountinner_d); // devidecount <<<n/ 512, 512 >>>(Imax, Jmax, Kmax, pint_d, pcount_d); //BCiteration <<<n / 512, 512 >>>(Imax, Jmax, Kmax, pint_d, pcount_d, p_d, pn_d, NoItr); /////-------------couting time----------------//////////// //omni3dparallellinesESInner<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d); //omni3dparallellinesESInnerSelect<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d); for (int i = 0; i<NoItr; i++) { //omni3dparallellinesESInnerWeighted<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,curl_d,p_d,pn_d,pcountinner_d); //omni3dparallellinesESInnerStepCount<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d,IntegrationSteps_d); //omni3dparallellinesESInnerWeightedMiniCurl<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,linespacing,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,curl,p_d,pn_d,pcountinner_d); omni3dparallellinesESInnerSelect <<<blockPerGrid, threadPerBlock >>>(Imax, Jmax, Kmax, NoAngles, linespacing, k1_d, k2_d, k3_d, index_d, deltx, delty, deltz, density, dudt_d, dvdt_d, dwdt_d, curl_d, p_d, pn_d, pcountinner_d, threshold); //omni3dparallellinesESInnerSelectFixedBC << <blockPerGrid, threadPerBlock >> >(Imax, Jmax, Kmax, NoAngles, linespacing, k1_d, k2_d, k3_d, index_d, deltx, delty, deltz, density, dudt_d, dvdt_d, dwdt_d, curl_d, p_d, pn_d, pcountinner_d, threshold); //omni2dparallellinesOnFaceInner<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,10000,linespacing,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d,pcountinner_d); //omni3dparallellinesInner<<<blockPerGrid,threadPerBlock>>>(Imax,Jmax,Kmax,NoAngles,k1_d,k2_d,k3_d,index_d,deltx,delty,deltz,density,dudt_d,dvdt_d,dwdt_d,p_d,pn_d); devidecountInner <<<n/512, 512 >>>(Imax, Jmax, Kmax, p_d, pn_d, pcountinner_d); if (i == NoItr - 1) { cudaMemcpy(pcountinner, pcountinner_d, sizeof(float)*Imax*Jmax*Kmax, cudaMemcpyDeviceToHost); } //cudaMemset(pcountinner_d,1.0,sizeof(float)*Imax*Jmax*Kmax); cudaMemset(pcountinner_d, 0, sizeof(int)*Imax*Jmax*Kmax); } //devidecountInner<<<n/512,512>>>(Imax,Jmax,Kmax,p_d,pn_d,pcountinner_d); cudaMemcpy(p, p_d, sizeof(float)*Imax*Jmax*Kmax, cudaMemcpyDeviceToHost); //cudaMemcpy(pcountinner, pcountinner_d, sizeof(float)*Imax*Jmax*Kmax, cudaMemcpyDeviceToHost); //cudaMemcpy(curl1,curl,sizeof(float)*Imax*Jmax*Kmax,cudaMemcpyDeviceToHost); // check for error cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); } CStdioFile fout; CString outfile=pathpressure; outfile.AppendFormat(_T("PressureOmni3D_%05d.dat"),FileNum); fout.Open(outfile,CFile::modeWrite|CFile::modeCreate); ////////////////////Write Data to file///////////////////// /* meanpcal=0; int count = 0; for(long k=0;k<Kmax;k++) { for(long j=0;j<Jmax;j++) { for(long i=0;i<Imax;i++) { int ind=i+j*Imax+k*Imax*Jmax; if (curl[ind] != 0) { meanpcal += p[ind]; count++; } } } } //meanpcal=meanpcal/n; meanpcal=meanpcal/count; for(long k=0;k<Kmax;k++) { for(long j=0;j<Jmax;j++) { for(long i=0;i<Imax;i++) { int ind=i+j*Imax+k*Imax*Jmax; if (curl[ind] != 0) { p[ind]=p[ind]-meanpcal; } } } } */ fout.WriteString(_T("TITLE = \"Pressure Integrated From GPU Based Omni 3D Method\"\n")); fout.WriteString(_T("VARIABLES = \"X\",\"Y\",\"Z\",\"P\",\"Count\"\n")); str.Format(_T("ZONE I=%i, J=%i, K=%i,F=POINT\n"),Imax,Jmax,Kmax); fout.WriteString(str); //pmax=1;meanpdns=0; for(long k=0;k<Kmax;k++) { for(long j=0;j<Jmax;j++) { for(long i=0;i<Imax;i++) { int ind=i+j*Imax+k*Imax*Jmax; str.Format(_T("%15.9f %15.9f %15.9f %15.9f %15.9f\n"), x[ind], y[ind], z[ind], p[ind], pcountinner[ind]); fout.WriteString(str); } } } fout.Close(); /////////////Iteration completed///////////////////////////////////////////// if (FileNum == 0) { fout.Open(_T("CurlofMaterialAcc_Sample.dat"), CFile::modeWrite | CFile::modeCreate); fout.WriteString(_T("TITLE = \"Curl of Material Acceleration Multiplied by the Mask\"\n")); fout.WriteString(_T("VARIABLES = \"X\",\"Y\",\"Z\",\"Curl of Acceleration\"\n")); str.Format(_T("ZONE I=%i, J=%i, K=%i,F=POINT\n"), Imax, Jmax, Kmax); fout.WriteString(str); //pmax=1;meanpdns=0; for (long k = 0; k<Kmax; k++) { for (long j = 0; j<Jmax; j++) { for (long i = 0; i<Imax; i++) { int ind = i + j*Imax + k*Imax*Jmax; str.Format(_T("%15.9f %15.9f %15.9f %15.9f\n"), x[ind], y[ind], z[ind], curl[ind]); fout.WriteString(str); } } } fout.Close(); } } delete []x,y,z,u,v,w,dudt,dvdt,dwdt,pint,p,pn,pdns,RHS,pcount,pcountinner,k1,k2,k3,curl,mask; log.close(); cudaFree(dudt_d); cudaFree(dvdt_d); cudaFree(dwdt_d); cudaFree(pint_d); cudaFree(pcount_d); cudaFree(p_d); cudaFree(pn_d); cudaFree(pcountinner_d); cudaFree(k1_d); cudaFree(k2_d); cudaFree(k3_d); cudaFree(pweight_d); cudaFree(curl_d); cudaDeviceReset(); exit(true ? EXIT_SUCCESS : EXIT_FAILURE); return 0; }
dfb7b8ba03d69298af00ba560c20bf9c5feae49a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include <optix.h> #include "per_ray_data.h" #include "light_definition.h" #include "shader_common.h" #include "system_data.h" #include "transform.h" extern "C" __constant__ SystemData sysData; // Not actually a light. Never appears inside the sysLightDefinitions. extern "C" __global__ void __miss__env_null() { // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); thePrd->radiance = make_float3(0.0f); thePrd->flags |= FLAG_TERMINATE; } extern "C" __global__ void __miss__env_constant() { // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); // The environment light is always in the first element. float3 emission = sysData.lightDefinitions[0].emission; // Constant emission. if (sysData.directLighting) { // If the last surface intersection was a diffuse which was directly lit with multiple importance sampling, // then calculate light emission with multiple importance sampling as well. const float weightMIS = (thePrd->flags & FLAG_DIFFUSE) ? balanceHeuristic(thePrd->pdf, 0.25f * M_1_PIf) : 1.0f; emission *= weightMIS; } thePrd->radiance = emission; thePrd->flags |= FLAG_TERMINATE; } extern "C" __global__ void __miss__env_sphere() { // The environment light is always in the first element. const LightDefinition& light = sysData.lightDefinitions[0]; // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); const float3 R = transformVector(light.oriInv, thePrd->wi); // Transform the ray.direction from world space to light object space. // All lights shine down the positive z-axis in this renderer. const float u = (atan2f(-R.x, R.z) + M_PIf) * 0.5f * M_1_PIf; const float v = acosf(-R.y) * M_1_PIf; // Texture is with origin at lower left, v == 0.0f is south pole. float3 emission = make_float3(tex2D<float4>(light.textureEmission, u, v)); if (sysData.directLighting) { // If the last surface intersection was a diffuse event which was directly lit with multiple importance sampling, // then calculate light emission with multiple importance sampling for this implicit light hit as well. if (thePrd->flags & FLAG_DIFFUSE) { // For simplicity we pretend that we perfectly importance-sampled the actual texture-filtered environment map // and not the Gaussian smoothed one used to actually generate the CDFs. const float pdfLight = intensity(emission) / light.integral; emission *= balanceHeuristic(thePrd->pdf, pdfLight); } } thePrd->radiance = emission * light.emission; thePrd->flags |= FLAG_TERMINATE; }
dfb7b8ba03d69298af00ba560c20bf9c5feae49a.cu
/* * Copyright (c) 2013-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include <optix.h> #include "per_ray_data.h" #include "light_definition.h" #include "shader_common.h" #include "system_data.h" #include "transform.h" extern "C" __constant__ SystemData sysData; // Not actually a light. Never appears inside the sysLightDefinitions. extern "C" __global__ void __miss__env_null() { // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); thePrd->radiance = make_float3(0.0f); thePrd->flags |= FLAG_TERMINATE; } extern "C" __global__ void __miss__env_constant() { // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); // The environment light is always in the first element. float3 emission = sysData.lightDefinitions[0].emission; // Constant emission. if (sysData.directLighting) { // If the last surface intersection was a diffuse which was directly lit with multiple importance sampling, // then calculate light emission with multiple importance sampling as well. const float weightMIS = (thePrd->flags & FLAG_DIFFUSE) ? balanceHeuristic(thePrd->pdf, 0.25f * M_1_PIf) : 1.0f; emission *= weightMIS; } thePrd->radiance = emission; thePrd->flags |= FLAG_TERMINATE; } extern "C" __global__ void __miss__env_sphere() { // The environment light is always in the first element. const LightDefinition& light = sysData.lightDefinitions[0]; // Get the current rtPayload pointer from the unsigned int payload registers p0 and p1. PerRayData* thePrd = mergePointer(optixGetPayload_0(), optixGetPayload_1()); const float3 R = transformVector(light.oriInv, thePrd->wi); // Transform the ray.direction from world space to light object space. // All lights shine down the positive z-axis in this renderer. const float u = (atan2f(-R.x, R.z) + M_PIf) * 0.5f * M_1_PIf; const float v = acosf(-R.y) * M_1_PIf; // Texture is with origin at lower left, v == 0.0f is south pole. float3 emission = make_float3(tex2D<float4>(light.textureEmission, u, v)); if (sysData.directLighting) { // If the last surface intersection was a diffuse event which was directly lit with multiple importance sampling, // then calculate light emission with multiple importance sampling for this implicit light hit as well. if (thePrd->flags & FLAG_DIFFUSE) { // For simplicity we pretend that we perfectly importance-sampled the actual texture-filtered environment map // and not the Gaussian smoothed one used to actually generate the CDFs. const float pdfLight = intensity(emission) / light.integral; emission *= balanceHeuristic(thePrd->pdf, pdfLight); } } thePrd->radiance = emission * light.emission; thePrd->flags |= FLAG_TERMINATE; }
3602e425a4b94edf6428d96e168a5e1b78f5111a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* *Adapted From "Fast Hough Trasform on GPU's" * */ #include"hough.hpp" #include"cuda_error_check.hpp" bool debug_hough = false; #define THREADS_X_HOUGH 32 #define THREADS_Y_HOUGH 4 #define PIXELS_PER_THREAD 16 __device__ static int g_counter; __device__ static int g_counter_lines; extern __shared__ int shmem[]; void print_array(float *arr, int size) { for(int i =0;i<size;i++) { cout<<*(arr + i)<<"\t"; } cout<<endl; } void print_image(unsigned char *image, int height, int width) { for(int i =0;i<height;i++) { for(int j =0;j<width;j++) { cout<<(int)*(image + i*width + j)<<"\t"; } cout<<endl; } } __global__ void getNonzeroEdgepoints(unsigned char const* const image, unsigned int* const list) { __shared__ unsigned int s_queues[THREADS_Y_HOUGH][THREADS_X_HOUGH * PIXELS_PER_THREAD]; __shared__ int s_qsize[THREADS_Y_HOUGH]; __shared__ int s_globStart[THREADS_Y_HOUGH]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(threadIdx.x == 0) s_qsize[threadIdx.y] = 0; __syncthreads(); if(y < 224) { const unsigned char* srcRow = image + y*IMG_WIDTH; for(int i = 0,xx = x; i<PIXELS_PER_THREAD && xx < 192;++i,xx += blockDim.x) { if(srcRow[xx]) { const unsigned int val = (y<<16)|xx; const int qidx = atomicAdd(&s_qsize[threadIdx.y],1); s_queues[threadIdx.y][qidx] = val; } } } __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0 ) { int totalSize = 0; for(int i =0;i<blockDim.y;++i) { s_globStart[i] = totalSize; totalSize += s_qsize[i]; } const int global_Offset = atomicAdd(&g_counter, totalSize); for(int i =0 ;i<blockDim.y;++i) s_globStart[i] += global_Offset; } __syncthreads(); const int qsize = s_qsize[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i<qsize; i+=blockDim.x, gidx +=blockDim.x) { list[gidx] = s_queues[threadIdx.y][i]; } } __global__ void fillHoughSpace(unsigned int* const list, const int count, int* hough_space,const float irho, const float theta, const int numrho) { int* smem = (int*)shmem; for(int i =threadIdx.x; i< numrho + 1;i+=blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n*theta; //printf("The angle value of n is %d \n", blockIdx.x); //printf("Angle Values : %f \n", ang); //printf("Inside Kernel"); float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho -1)/2; for(int i = threadIdx.x; i<count; i+= blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x*cosVal + y*sinVal); //printf("The value of x %d and the value of y %d : the value of r %d \n",x,y,r); r += shift; atomicAdd(&smem[r+1],1); } __syncthreads(); int* hrow = hough_space + (n+1)*(numrho + 2); for(int i = threadIdx.x ;i< numrho + 1; i+=blockDim.x) { //printf("value of shared_memory at %d is %d \n",i,smem[i]); hrow[i] = smem[i]; } } __global__ void getLines(const int * hough_space, float2* lines, int* votes, const int maxLines, const float rho, const float theta, const int threshold, const int numrho, const int rhspace) { const int r = blockIdx.x*blockDim.x + threadIdx.x; const int n = blockIdx.y*blockDim.y + threadIdx.y; if(r >=numrho || n >=rhspace -2) { return; } const int curVotes = *(hough_space + (n+1)*(numrho + 2)+ (r+1)); if(curVotes > *(hough_space + n*(numrho+2) + (r-1)) && curVotes > *(hough_space + n*(numrho + 2) + r) && curVotes > *(hough_space + n*(numrho + 2)+(r+1)) && curVotes > *(hough_space + n*(numrho + 2) + (r+2)) && curVotes > *(hough_space + n*(numrho+2) + (r+3)) && curVotes > *(hough_space + (n+1)*(numrho +2)+ r-1) && curVotes > *(hough_space + (n+1)*(numrho + 2) + r) && curVotes > *(hough_space +(n+1)*(numrho +2) + (r+2)) && curVotes > *(hough_space +(n+1)*(numrho +2) + (r+3)) && curVotes > *(hough_space +(n+2)*(numrho +2) + (r-1)) && curVotes > *(hough_space + (n+2)*(numrho +2) + r) && curVotes > *(hough_space + (n+2)*(numrho +2) + (r+1)) && curVotes > *(hough_space + (n+2)*(numrho +2) + (r+2)) && curVotes > *(hough_space + (n+2)*(numrho +2) + (r+3)) && curVotes > threshold) { const float radius = (r - (numrho -1)*0.5f)*rho; const float angle = n*theta; const int index = atomicAdd(&g_counter_lines,1); if(index < maxLines) { //printf("index Value - %d \n", index); //printf("Current Votes - %d \n", curVotes); //printf("radius %f and angle %f \n", radius, angle); //*(lines + index) = make_float2(radius, angle); (lines + index)->x = radius; (lines + index)->y = angle; //printf("value of radius - %f and value of angle - %f and curVotes - %d \n ", (lines +index)->x,(lines + index)->y, curVotes); *(votes + index) = curVotes; } } } lines_w_non_zero* houghTransform(unsigned char const* const edges,const int numangle, const int numrho,float thetaStep, float rStep) { /* if(debug_hough) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); } */ /*Replace by maximum function using cuda*/ const int threshold = 35; unsigned char* gimage; unsigned int* glist; void* counterPtr; hipGetSymbolAddress(&counterPtr, g_counter); hipMemset(counterPtr,0,sizeof(int)); CudaCheckError(); hipFuncSetCacheConfig(getNonzeroEdgepoints, hipFuncCachePreferShared); hipMalloc((void**)&gimage, IMG_SIZE*sizeof(unsigned char)); CudaCheckError(); hipMalloc((void**) &glist, IMG_SIZE*sizeof(unsigned int)); CudaCheckError(); /*Copy Image to GPU */ hipMemcpy(gimage, edges, IMG_SIZE*sizeof(unsigned char),hipMemcpyHostToDevice); CudaCheckError(); dim3 dimBlock1(THREADS_X_HOUGH, THREADS_Y_HOUGH); dim3 dimGrid1(1, 56); hipLaunchKernelGGL(( getNonzeroEdgepoints), dim3(dimGrid1),dim3(dimBlock1), 0, 0, gimage, glist); CudaCheckError(); hipDeviceSynchronize(); int totalCount ; hipMemcpy(&totalCount, counterPtr, sizeof(int),hipMemcpyDeviceToHost); //cout<<"Total Count :"<<totalCount<<endl; unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int)); hipMemcpy(clist, glist, totalCount*sizeof(unsigned int),hipMemcpyDeviceToHost); CudaCheckError(); if(debug_hough) { unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int)); hipMemcpy(clist, glist, totalCount*sizeof(unsigned int),hipMemcpyDeviceToHost); CudaCheckError(); for(int i = 0; i< totalCount; i++) { unsigned int const q_value = clist[i]; cout<<"q_value : "<<q_value<<endl; const int x = (q_value & 0xFFFF); const int y = (q_value >> 16 ) & 0xFFFF; cout<<"coordinate ("<<x<<","<<y<<")"<<endl; cout<<"Value at coordinate :"<<(int)*(edges + y*IMG_WIDTH + x)<<endl; } } //Initialize hough_space int hough_size = (numangle + 2)*(numrho + 2); int rhspace = numangle + 2; int colhspace = numrho + 2; //cout<<"rows : "<<rhspace<<endl; const dim3 block(1024); const dim3 grid(rhspace -2); //smemSize should be less than 49152 bytes size_t smemSize = (colhspace - 1)*sizeof(int); cout<<smemSize<<endl; thetaStep = thetaStep*(CV_PI/180); /*Allocate houghSpace on Gpu*/ int *d_hough_space; hipMalloc((void**)&d_hough_space,hough_size*sizeof(int)); CudaCheckError(); hipMemset(d_hough_space, 0, hough_size*sizeof(int)); CudaCheckError(); hipLaunchKernelGGL(( fillHoughSpace), dim3(grid),dim3(block), smemSize, 0, glist, totalCount,d_hough_space, 1.0f/rStep, thetaStep, colhspace -2); CudaCheckError(); hipDeviceSynchronize(); if(debug_hough) { int* hough_space = (int*)malloc(hough_size*sizeof(int)); hipMemcpy(hough_space, d_hough_space, hough_size*sizeof(int),hipMemcpyDeviceToHost); CudaCheckError(); for(int i =0;i<rhspace;i++) { for(int j =0;j<colhspace;j++) { cout<<*(hough_space + i*colhspace +j)<<"\t"; } cout<<endl; } } int maxLines = 10; float2* d_lines; int* d_votes; hipMalloc((void**)&d_lines,maxLines*sizeof(float2)); CudaCheckError(); hipMalloc((void**)&d_votes, maxLines*sizeof(int)); CudaCheckError(); void *counterPtr_lines; hipGetSymbolAddress(&counterPtr_lines, g_counter_lines); hipMemset(counterPtr_lines, 0, sizeof(int)); CudaCheckError(); const dim3 block_1(32,8); const int blocks_x = ((colhspace - 2 + block_1.x - 1)/(block_1.x)); const int blocks_y = ((rhspace - 2 + block_1.y -1 )/(block_1.y)); const dim3 grid_1(blocks_x, blocks_y); hipFuncSetCacheConfig(getLines, hipFuncCachePreferL1); hipLaunchKernelGGL(( getLines), dim3(grid_1), dim3(block_1), 0, 0, d_hough_space, d_lines, d_votes, maxLines,rStep, thetaStep, threshold, colhspace -2, rhspace); CudaCheckError(); hipDeviceSynchronize(); int countlines; hipMemcpy(&countlines, counterPtr_lines, sizeof(int),hipMemcpyDeviceToHost); CudaCheckError(); cout<<"totalCount of lines"<<countlines<<endl; countlines = min(countlines, maxLines); float2* lines = (float2*)malloc(countlines*sizeof(float2)); int* votes = (int*)malloc(countlines*sizeof(int)); hipMemcpy(lines, d_lines, countlines*sizeof(float2),hipMemcpyDeviceToHost); CudaCheckError(); hipMemcpy(votes, d_votes, countlines*sizeof(int),hipMemcpyDeviceToHost); CudaCheckError(); if(debug_hough) { Mat gray_image = imread("/home/nvidia/Lane_Detection/Test_Images/IPM_test_image_4.png",0); for(int i =0;i<countlines;i++) { float theta_line = (lines + i)->y; float rho = (lines + i)->x; cout<<"Rho - "<<rho<<"theta- "<<theta_line<<endl; cv::Point pt1, pt2; double a = cos(theta_line); double b = sin(theta_line); double x0 = a*rho; double y0 = b*rho; pt1.x = (int)(x0 + 400*(-b)); pt1.y = (int)(y0 + 400*(a)); pt2.x = (int)(x0 - 400*(-b)); pt2.y = (int)(x0 - 400*(a)); line(gray_image, pt1,pt2, (255,0,0),1); } imshow("IMage", gray_image); waitKey(0); } lines_w_non_zero* values = (lines_w_non_zero*)malloc(sizeof(lines_w_non_zero)); lin_votes* mem_hough_lines = (lin_votes*)malloc(sizeof(lin_votes)); values->hough_lines = mem_hough_lines; values->hough_lines->lines = lines; values->hough_lines->countlines = countlines; values->clist = clist; values->count = totalCount; /* lin_votes* hough_lines = (lin_votes*)malloc(sizeof(lin_votes)); hough_lines->lines = lines; hough_lines->countlines = countlines; */ /* if(debug_hough) { hipEventRecord(stop,0); hipEventSynchronize(stop); float elapsed = 0; hipEventElapsedTime(&elapsed, start, stop); cout<<"Elapsed Time"<<elapsed; } */ return values; }
3602e425a4b94edf6428d96e168a5e1b78f5111a.cu
/* *Adapted From "Fast Hough Trasform on GPU's" * */ #include"hough.hpp" #include"cuda_error_check.hpp" bool debug_hough = false; #define THREADS_X_HOUGH 32 #define THREADS_Y_HOUGH 4 #define PIXELS_PER_THREAD 16 __device__ static int g_counter; __device__ static int g_counter_lines; extern __shared__ int shmem[]; void print_array(float *arr, int size) { for(int i =0;i<size;i++) { cout<<*(arr + i)<<"\t"; } cout<<endl; } void print_image(unsigned char *image, int height, int width) { for(int i =0;i<height;i++) { for(int j =0;j<width;j++) { cout<<(int)*(image + i*width + j)<<"\t"; } cout<<endl; } } __global__ void getNonzeroEdgepoints(unsigned char const* const image, unsigned int* const list) { __shared__ unsigned int s_queues[THREADS_Y_HOUGH][THREADS_X_HOUGH * PIXELS_PER_THREAD]; __shared__ int s_qsize[THREADS_Y_HOUGH]; __shared__ int s_globStart[THREADS_Y_HOUGH]; const int x = blockIdx.x * blockDim.x * PIXELS_PER_THREAD + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if(threadIdx.x == 0) s_qsize[threadIdx.y] = 0; __syncthreads(); if(y < 224) { const unsigned char* srcRow = image + y*IMG_WIDTH; for(int i = 0,xx = x; i<PIXELS_PER_THREAD && xx < 192;++i,xx += blockDim.x) { if(srcRow[xx]) { const unsigned int val = (y<<16)|xx; const int qidx = atomicAdd(&s_qsize[threadIdx.y],1); s_queues[threadIdx.y][qidx] = val; } } } __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0 ) { int totalSize = 0; for(int i =0;i<blockDim.y;++i) { s_globStart[i] = totalSize; totalSize += s_qsize[i]; } const int global_Offset = atomicAdd(&g_counter, totalSize); for(int i =0 ;i<blockDim.y;++i) s_globStart[i] += global_Offset; } __syncthreads(); const int qsize = s_qsize[threadIdx.y]; int gidx = s_globStart[threadIdx.y] + threadIdx.x; for(int i = threadIdx.x; i<qsize; i+=blockDim.x, gidx +=blockDim.x) { list[gidx] = s_queues[threadIdx.y][i]; } } __global__ void fillHoughSpace(unsigned int* const list, const int count, int* hough_space,const float irho, const float theta, const int numrho) { int* smem = (int*)shmem; for(int i =threadIdx.x; i< numrho + 1;i+=blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n*theta; //printf("The angle value of n is %d \n", blockIdx.x); //printf("Angle Values : %f \n", ang); //printf("Inside Kernel"); float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho -1)/2; for(int i = threadIdx.x; i<count; i+= blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x*cosVal + y*sinVal); //printf("The value of x %d and the value of y %d : the value of r %d \n",x,y,r); r += shift; atomicAdd(&smem[r+1],1); } __syncthreads(); int* hrow = hough_space + (n+1)*(numrho + 2); for(int i = threadIdx.x ;i< numrho + 1; i+=blockDim.x) { //printf("value of shared_memory at %d is %d \n",i,smem[i]); hrow[i] = smem[i]; } } __global__ void getLines(const int * hough_space, float2* lines, int* votes, const int maxLines, const float rho, const float theta, const int threshold, const int numrho, const int rhspace) { const int r = blockIdx.x*blockDim.x + threadIdx.x; const int n = blockIdx.y*blockDim.y + threadIdx.y; if(r >=numrho || n >=rhspace -2) { return; } const int curVotes = *(hough_space + (n+1)*(numrho + 2)+ (r+1)); if(curVotes > *(hough_space + n*(numrho+2) + (r-1)) && curVotes > *(hough_space + n*(numrho + 2) + r) && curVotes > *(hough_space + n*(numrho + 2)+(r+1)) && curVotes > *(hough_space + n*(numrho + 2) + (r+2)) && curVotes > *(hough_space + n*(numrho+2) + (r+3)) && curVotes > *(hough_space + (n+1)*(numrho +2)+ r-1) && curVotes > *(hough_space + (n+1)*(numrho + 2) + r) && curVotes > *(hough_space +(n+1)*(numrho +2) + (r+2)) && curVotes > *(hough_space +(n+1)*(numrho +2) + (r+3)) && curVotes > *(hough_space +(n+2)*(numrho +2) + (r-1)) && curVotes > *(hough_space + (n+2)*(numrho +2) + r) && curVotes > *(hough_space + (n+2)*(numrho +2) + (r+1)) && curVotes > *(hough_space + (n+2)*(numrho +2) + (r+2)) && curVotes > *(hough_space + (n+2)*(numrho +2) + (r+3)) && curVotes > threshold) { const float radius = (r - (numrho -1)*0.5f)*rho; const float angle = n*theta; const int index = atomicAdd(&g_counter_lines,1); if(index < maxLines) { //printf("index Value - %d \n", index); //printf("Current Votes - %d \n", curVotes); //printf("radius %f and angle %f \n", radius, angle); //*(lines + index) = make_float2(radius, angle); (lines + index)->x = radius; (lines + index)->y = angle; //printf("value of radius - %f and value of angle - %f and curVotes - %d \n ", (lines +index)->x,(lines + index)->y, curVotes); *(votes + index) = curVotes; } } } lines_w_non_zero* houghTransform(unsigned char const* const edges,const int numangle, const int numrho,float thetaStep, float rStep) { /* if(debug_hough) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); } */ /*Replace by maximum function using cuda*/ const int threshold = 35; unsigned char* gimage; unsigned int* glist; void* counterPtr; cudaGetSymbolAddress(&counterPtr, g_counter); cudaMemset(counterPtr,0,sizeof(int)); CudaCheckError(); cudaFuncSetCacheConfig(getNonzeroEdgepoints, cudaFuncCachePreferShared); cudaMalloc((void**)&gimage, IMG_SIZE*sizeof(unsigned char)); CudaCheckError(); cudaMalloc((void**) &glist, IMG_SIZE*sizeof(unsigned int)); CudaCheckError(); /*Copy Image to GPU */ cudaMemcpy(gimage, edges, IMG_SIZE*sizeof(unsigned char),cudaMemcpyHostToDevice); CudaCheckError(); dim3 dimBlock1(THREADS_X_HOUGH, THREADS_Y_HOUGH); dim3 dimGrid1(1, 56); getNonzeroEdgepoints<<<dimGrid1,dimBlock1>>>(gimage, glist); CudaCheckError(); cudaDeviceSynchronize(); int totalCount ; cudaMemcpy(&totalCount, counterPtr, sizeof(int),cudaMemcpyDeviceToHost); //cout<<"Total Count :"<<totalCount<<endl; unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int)); cudaMemcpy(clist, glist, totalCount*sizeof(unsigned int),cudaMemcpyDeviceToHost); CudaCheckError(); if(debug_hough) { unsigned int* clist = (unsigned int*)malloc(totalCount*sizeof(unsigned int)); cudaMemcpy(clist, glist, totalCount*sizeof(unsigned int),cudaMemcpyDeviceToHost); CudaCheckError(); for(int i = 0; i< totalCount; i++) { unsigned int const q_value = clist[i]; cout<<"q_value : "<<q_value<<endl; const int x = (q_value & 0xFFFF); const int y = (q_value >> 16 ) & 0xFFFF; cout<<"coordinate ("<<x<<","<<y<<")"<<endl; cout<<"Value at coordinate :"<<(int)*(edges + y*IMG_WIDTH + x)<<endl; } } //Initialize hough_space int hough_size = (numangle + 2)*(numrho + 2); int rhspace = numangle + 2; int colhspace = numrho + 2; //cout<<"rows : "<<rhspace<<endl; const dim3 block(1024); const dim3 grid(rhspace -2); //smemSize should be less than 49152 bytes size_t smemSize = (colhspace - 1)*sizeof(int); cout<<smemSize<<endl; thetaStep = thetaStep*(CV_PI/180); /*Allocate houghSpace on Gpu*/ int *d_hough_space; cudaMalloc((void**)&d_hough_space,hough_size*sizeof(int)); CudaCheckError(); cudaMemset(d_hough_space, 0, hough_size*sizeof(int)); CudaCheckError(); fillHoughSpace<<<grid,block, smemSize>>>(glist, totalCount,d_hough_space, 1.0f/rStep, thetaStep, colhspace -2); CudaCheckError(); cudaDeviceSynchronize(); if(debug_hough) { int* hough_space = (int*)malloc(hough_size*sizeof(int)); cudaMemcpy(hough_space, d_hough_space, hough_size*sizeof(int),cudaMemcpyDeviceToHost); CudaCheckError(); for(int i =0;i<rhspace;i++) { for(int j =0;j<colhspace;j++) { cout<<*(hough_space + i*colhspace +j)<<"\t"; } cout<<endl; } } int maxLines = 10; float2* d_lines; int* d_votes; cudaMalloc((void**)&d_lines,maxLines*sizeof(float2)); CudaCheckError(); cudaMalloc((void**)&d_votes, maxLines*sizeof(int)); CudaCheckError(); void *counterPtr_lines; cudaGetSymbolAddress(&counterPtr_lines, g_counter_lines); cudaMemset(counterPtr_lines, 0, sizeof(int)); CudaCheckError(); const dim3 block_1(32,8); const int blocks_x = ((colhspace - 2 + block_1.x - 1)/(block_1.x)); const int blocks_y = ((rhspace - 2 + block_1.y -1 )/(block_1.y)); const dim3 grid_1(blocks_x, blocks_y); cudaFuncSetCacheConfig(getLines, cudaFuncCachePreferL1); getLines<<<grid_1, block_1>>>(d_hough_space, d_lines, d_votes, maxLines,rStep, thetaStep, threshold, colhspace -2, rhspace); CudaCheckError(); cudaDeviceSynchronize(); int countlines; cudaMemcpy(&countlines, counterPtr_lines, sizeof(int),cudaMemcpyDeviceToHost); CudaCheckError(); cout<<"totalCount of lines"<<countlines<<endl; countlines = min(countlines, maxLines); float2* lines = (float2*)malloc(countlines*sizeof(float2)); int* votes = (int*)malloc(countlines*sizeof(int)); cudaMemcpy(lines, d_lines, countlines*sizeof(float2),cudaMemcpyDeviceToHost); CudaCheckError(); cudaMemcpy(votes, d_votes, countlines*sizeof(int),cudaMemcpyDeviceToHost); CudaCheckError(); if(debug_hough) { Mat gray_image = imread("/home/nvidia/Lane_Detection/Test_Images/IPM_test_image_4.png",0); for(int i =0;i<countlines;i++) { float theta_line = (lines + i)->y; float rho = (lines + i)->x; cout<<"Rho - "<<rho<<"theta- "<<theta_line<<endl; cv::Point pt1, pt2; double a = cos(theta_line); double b = sin(theta_line); double x0 = a*rho; double y0 = b*rho; pt1.x = (int)(x0 + 400*(-b)); pt1.y = (int)(y0 + 400*(a)); pt2.x = (int)(x0 - 400*(-b)); pt2.y = (int)(x0 - 400*(a)); line(gray_image, pt1,pt2, (255,0,0),1); } imshow("IMage", gray_image); waitKey(0); } lines_w_non_zero* values = (lines_w_non_zero*)malloc(sizeof(lines_w_non_zero)); lin_votes* mem_hough_lines = (lin_votes*)malloc(sizeof(lin_votes)); values->hough_lines = mem_hough_lines; values->hough_lines->lines = lines; values->hough_lines->countlines = countlines; values->clist = clist; values->count = totalCount; /* lin_votes* hough_lines = (lin_votes*)malloc(sizeof(lin_votes)); hough_lines->lines = lines; hough_lines->countlines = countlines; */ /* if(debug_hough) { cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsed = 0; cudaEventElapsedTime(&elapsed, start, stop); cout<<"Elapsed Time"<<elapsed; } */ return values; }
4a7fed924df84b051e241600a3c2adf17a56df95.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nn.h" void Neural_network::use_GPU(){ block = batch_size; block2 = max(layers_size[1],layers_size[0])+1; grid = max(layers_size[1],layers_size[0])+1; hipMalloc((double**)&training_X_GPU, training_size*input*sizeof(double)); hipMalloc((double**)&training_Y_GPU, training_size*sizeof(double)); hipMalloc((double**)&test_X_GPU, test_size*input*sizeof(double)); hipMalloc((double**)&test_Y_GPU, test_size*sizeof(double)); w_GPU = new double* [layers_number-1]; for( int i=0;i<layers_number-1;i++){ hipMalloc((double**)&(w_GPU[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); hipMalloc((double**)&(w_gradient[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); hipMalloc((double**)&(w_gradient_old[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); hipMalloc((double**)&(w_gradient_old2[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); hipMemcpy(w_GPU[i], w[i], (layers_size[i]+1)*layers_size[i+1]*sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( set_GPU), dim3(grid), dim3(block2) , 0, 0, w_gradient[i], (layers_size[i]+1), layers_size[i+1], 0); hipLaunchKernelGGL(( set_GPU), dim3(grid), dim3(block2) , 0, 0, w_gradient_old[i], (layers_size[i]+1), layers_size[i+1], 0); hipLaunchKernelGGL(( set_GPU), dim3(grid), dim3(block2) , 0, 0, w_gradient_old2[i], (layers_size[i]+1), layers_size[i+1], 0); } for(int i=0; i<layers_number-1; i++){ hipMalloc((double**)&(l[i]), layers_size[i+1]*batch_size*sizeof(double)); hipMalloc((double**)&(d_l[i]), layers_size[i+1]*batch_size*sizeof(double)); hipMalloc((double**)&(delta[i]), layers_size[i+1]*batch_size*sizeof(double)); } for(int i=1; i<layers_number-1; i++){ hipMalloc((double**)&(a_l[i]), (layers_size[i]+1)*batch_size*sizeof(double)); hipLaunchKernelGGL(( set_GPU), dim3(grid), dim3(block) , 0, 0, a_l[i], layers_size[i]+1, batch_size, 1); } hipMalloc((double**)&(a_l[layers_number-1]), output*batch_size*sizeof(double)); hipMalloc((double**)&error_GPU, batch_size*sizeof(double)); hipMalloc((double**)&loss_GPU, batch_size*sizeof(double)); error_CPU=new double [batch_size]; loss_CPU=new double [batch_size]; } void Neural_network::train_with_GPU(int epoch_number){ GPU_bool=true; hipMemcpy(training_X_GPU, training_X, training_size*input*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(training_Y_GPU, training_Y, training_size*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(test_X_GPU, test_X, test_size*input*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(test_Y_GPU, test_Y, test_size*sizeof(double), hipMemcpyHostToDevice); for(int epoch=0; epoch<epoch_number; epoch++){ error=loss=0; for(int batch=0; batch<training_size; batch+=batch_size){ feed_forward_GPU( &training_X_GPU[batch*input],&training_Y_GPU[batch], &error, &loss); for(int i=layers_number-2; i>0; i--){ hipLaunchKernelGGL(( error_calculate_GPU), dim3(grid), dim3(block) , 0, 0, delta[i], delta[i-1], w_GPU[i], layers_size[i+1], layers_size[i], batch_size); } for( int i=0;i<layers_number-1;i++){ hipLaunchKernelGGL(( set_GPU), dim3(grid), dim3(block2) , 0, 0, w_gradient[i], layers_size[i]+1, layers_size[i+1], 0); hipLaunchKernelGGL(( gradient_calculate_GPU), dim3(grid), dim3(block2) , 0, 0, a_l[i], w_gradient[i], delta[i], d_l[i], layers_size[i]+1, layers_size[i+1], batch_size); } update_GPU(); } std::cout<<"Epoch "<<epoch<<" Training loss = "<<loss/training_size*batch_size<<" error = "<<1-(error/training_size*batch_size); error=loss=0; for(int batch=0; batch<test_size; batch+=batch_size){ feed_forward_GPU( &test_X_GPU[batch*input],&test_Y_GPU[batch], &error, &loss); } std::cout<<" Validation loss "<<loss/test_size*batch_size<<" error = "<<1-(error/test_size*batch_size)<<std::endl; } for( int i=0;i<layers_number-1;i++){ hipMemcpy(w[i], w_GPU[i], (layers_size[i]+1)*layers_size[i+1]*sizeof(double), hipMemcpyDeviceToHost); } } void Neural_network::feed_forward_GPU(double* X, double* Y, double* error, double* loss){ a_l[0] = X; for(int i=0; i<layers_number-2; i++){ hipLaunchKernelGGL(( matrix_multiplication_GPU), dim3(grid), dim3(block) , 0, 0, l[i], a_l[i], w_GPU[i], layers_size[i+1], layers_size[i]+1, batch_size); hipLaunchKernelGGL(( matrix_activation_GPU), dim3(grid), dim3(block) , 0, 0, l[i], a_l[i+1], d_l[i], layers_size[i+1], batch_size); } hipLaunchKernelGGL(( matrix_multiplication_GPU), dim3(grid), dim3(block) , 0, 0, l[layers_number-2], a_l[layers_number-2], w_GPU[layers_number-2], layers_size[layers_number-1], layers_size[layers_number-2]+1, batch_size); hipLaunchKernelGGL(( softmax_GPU), dim3(grid), dim3(block) , 0, 0, l[layers_number-2], a_l[layers_number-1], output, batch_size); hipLaunchKernelGGL(( error_check_GPU), dim3(grid), dim3(block) , 0, 0, Y, a_l[layers_number-1], delta[layers_number-2], d_l[layers_number-2], error_GPU, loss_GPU, output, batch_size); hipMemcpy(error_CPU, error_GPU, batch_size*sizeof(double), hipMemcpyDeviceToHost); hipMemcpy(loss_CPU, loss_GPU, batch_size*sizeof(double), hipMemcpyDeviceToHost); for(int i=1;i<batch_size;i++) loss_CPU[0]+=loss_CPU[i]; for(int i=1;i<batch_size;i++) error_CPU[0]+=error_CPU[i]; (*error)+=error_CPU[0]/batch_size; (*loss)+=loss_CPU[0]/batch_size; } void Neural_network::update_GPU(){ switch (gradient){ case 1: for(int i=0; i<layers_number-1; i++) hipLaunchKernelGGL(( normal_gradient_update_GPU), dim3(grid), dim3(block2) , 0, 0, w_GPU[i], w_gradient[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 2: for(int i=0; i<layers_number-1; i++) hipLaunchKernelGGL(( momentum_update_GPU), dim3(grid), dim3(block2) , 0, 0, w_GPU[i], w_gradient[i], w_gradient_old[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 3: for(int i=0; i<layers_number-1; i++) hipLaunchKernelGGL(( adagrad_update_GPU), dim3(grid), dim3(block2) , 0, 0, w_GPU[i], w_gradient[i], w_gradient_old[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 4: for(int i=0; i<layers_number-1; i++) hipLaunchKernelGGL(( RMSprop_update_GPU), dim3(grid), dim3(block2) , 0, 0, w_GPU[i], w_gradient[i], w_gradient_old[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 5: for(int i=0; i<layers_number-1; i++) hipLaunchKernelGGL(( adam_update_GPU), dim3(grid), dim3(block2) , 0, 0, w_GPU[i], w_gradient[i], w_gradient_old[i], w_gradient_old2[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; } } __global__ void matrix_multiplication_GPU(double *l2, double *l1, double *w, int l2_size, int l1_size, int batch_size){ unsigned int j = threadIdx.x; unsigned int i = blockIdx.x; if(i<l2_size){ l2[j*l2_size+i]=0; for(int k=0;k<l1_size;k++) l2[j*l2_size+i]+=l1[j*l1_size+k]*w[k*l2_size+i]; } } __global__ void matrix_activation_GPU( double *l, double *a_l, double *d_l, int l_size, int batch_size){ unsigned int j = threadIdx.x; unsigned int i = blockIdx.x; if(i<l_size){ if(l[j*l_size+i]>0){ a_l[j*(l_size+1)+i]=l[j*l_size+i]; d_l[j*l_size+i]=1; } else{ a_l[j*(l_size+1)+i]=0.01*l[j*l_size+i]; d_l[j*l_size+i]=0.01; } } } __global__ void softmax_GPU( double *l, double *a_l, int l_size, int batch_size){ unsigned int j = threadIdx.x + blockIdx.x*blockDim.x; if(j<batch_size){ double sume=0; for(int i=0;i<l_size;i++){ a_l[j*l_size+i]=exp(l[j*l_size+i]); sume+=a_l[j*l_size+i]; } for(int i=0;i<l_size;i++) a_l[j*l_size+i]/=sume; } } __global__ void error_check_GPU(double *Y, double *a_l, double *delta, double *d_l, double *error, double *loss, int output, int batch_size){ unsigned int j = threadIdx.x + blockIdx.x*blockDim.x; if(j<batch_size){ int y; loss[j]=0; error[j]=0; for(int i=0;i<output;i++){ d_l[j*output+i]=1.; y=(Y[j]-i)*(Y[j]-i); delta[j*output+i]=a_l[j*output+i]-y; loss[j]-=y*log(a_l[j*output+i]+0.00001); } //int wynik; if(a_l[j*output]>a_l[j*output+1]) error[j]+=1; error[j]+=Y[j]; if(error[j]==1) error[j]=0; else error[j]=1; } } __global__ void error_calculate_GPU(double *l2, double *l1, double *w, int l2_size, int l1_size, int batch_size){ unsigned int j = threadIdx.x; unsigned int i = blockIdx.x; if(i<l1_size){ l1[j*l1_size+i]=0; for(int k=0;k<l2_size;k++) l1[j*l1_size+i]+=l2[j*l2_size+k]*w[i*l2_size+k]; } } __global__ void set_GPU(double *w, int l1_size, int l2_size, double d){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w[i+index]=d; } } } __global__ void gradient_calculate_GPU(double *a_l1, double *w, double *delta, double *d_l2, int l1_size, int l2_size, int batch_size){ unsigned int i = threadIdx.x; unsigned int k = blockIdx.x; if(i<l1_size){ if(k<l2_size){ w[i*l2_size+k]=0; for(int j=0;j<batch_size;j++){ double update=a_l1[j*l1_size+i]*delta[j*l2_size+k]*d_l2[j*l2_size+k]; if(isnan(update)==0) w[i*l2_size+k]+=update; //w[i*l2_size+k]+=a_l1[j*l1_size+i]*delta[j*l2_size+k]*d_l2[j*l2_size+k]; } w[i*l2_size+k]/=batch_size; } } } __global__ void normal_gradient_update_GPU(double *w, double *w_g, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w[i+index]-=w_g[i+index]*learning_rate; //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; } } } __global__ void momentum_update_GPU(double *w, double *w_g, double *w_g_old, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ double v=0; v=0.8*w_g_old[i+index]+learning_rate*w_g[i+index]; //if(v>1) v=1; //if(v<-1) v=-1; w[i+index]-=v; //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; w_g_old[i+index]=v; } } } __global__ void adagrad_update_GPU(double *w, double *w_g, double *w_g_old, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w_g_old[i+index]+=w_g[i+index]*w_g[i+index]; double v=0; v=learning_rate*w_g[i+index]/(sqrt(w_g_old[i+index]+0.000001)); //if(v>1) v=1; //if(v<-1) v=-1; w[i+index]-=v; //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; } } } __global__ void RMSprop_update_GPU(double *w, double *w_g, double *w_g_old, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w_g_old[i+index]=0.1*(w_g[i+index]*w_g[i+index])+0.9*w_g_old[i+index]; w[i+index]-=w_g[i+index]*learning_rate/(sqrt(w_g_old[i+index]+0.000001)); if(w[i+index]>2) w[i+index]=2.; if(w[i+index]<-2) w[i+index]=-2.; } } } __global__ void adam_update_GPU(double *w, double *w_g, double *w_g_old, double *w_g_old2, int l1_size, int l2_size, double learning_rate){ double B1=0.9; double B2=0.999; double m; double v; unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w_g_old[i+index]=B1*w_g_old[i+index]+(1-B1)*w_g[i+index]; w_g_old2[i+index]=B2*w_g_old2[i+index]+(1-B2)*w_g[i+index]*w_g[i+index]; m=w_g_old[i+index]/(1-B1); v=w_g_old2[i+index]/(1-B2); w[i+index]-=m*learning_rate/(sqrt(v+0.000001)); //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; } } }
4a7fed924df84b051e241600a3c2adf17a56df95.cu
#include "nn.h" void Neural_network::use_GPU(){ block = batch_size; block2 = max(layers_size[1],layers_size[0])+1; grid = max(layers_size[1],layers_size[0])+1; cudaMalloc((double**)&training_X_GPU, training_size*input*sizeof(double)); cudaMalloc((double**)&training_Y_GPU, training_size*sizeof(double)); cudaMalloc((double**)&test_X_GPU, test_size*input*sizeof(double)); cudaMalloc((double**)&test_Y_GPU, test_size*sizeof(double)); w_GPU = new double* [layers_number-1]; for( int i=0;i<layers_number-1;i++){ cudaMalloc((double**)&(w_GPU[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); cudaMalloc((double**)&(w_gradient[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); cudaMalloc((double**)&(w_gradient_old[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); cudaMalloc((double**)&(w_gradient_old2[i]), (layers_size[i]+1)*layers_size[i+1]*sizeof(double)); cudaMemcpy(w_GPU[i], w[i], (layers_size[i]+1)*layers_size[i+1]*sizeof(double), cudaMemcpyHostToDevice); set_GPU<<< grid, block2 >>>(w_gradient[i], (layers_size[i]+1), layers_size[i+1], 0); set_GPU<<< grid, block2 >>>(w_gradient_old[i], (layers_size[i]+1), layers_size[i+1], 0); set_GPU<<< grid, block2 >>>(w_gradient_old2[i], (layers_size[i]+1), layers_size[i+1], 0); } for(int i=0; i<layers_number-1; i++){ cudaMalloc((double**)&(l[i]), layers_size[i+1]*batch_size*sizeof(double)); cudaMalloc((double**)&(d_l[i]), layers_size[i+1]*batch_size*sizeof(double)); cudaMalloc((double**)&(delta[i]), layers_size[i+1]*batch_size*sizeof(double)); } for(int i=1; i<layers_number-1; i++){ cudaMalloc((double**)&(a_l[i]), (layers_size[i]+1)*batch_size*sizeof(double)); set_GPU<<< grid, block >>>(a_l[i], layers_size[i]+1, batch_size, 1); } cudaMalloc((double**)&(a_l[layers_number-1]), output*batch_size*sizeof(double)); cudaMalloc((double**)&error_GPU, batch_size*sizeof(double)); cudaMalloc((double**)&loss_GPU, batch_size*sizeof(double)); error_CPU=new double [batch_size]; loss_CPU=new double [batch_size]; } void Neural_network::train_with_GPU(int epoch_number){ GPU_bool=true; cudaMemcpy(training_X_GPU, training_X, training_size*input*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(training_Y_GPU, training_Y, training_size*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(test_X_GPU, test_X, test_size*input*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(test_Y_GPU, test_Y, test_size*sizeof(double), cudaMemcpyHostToDevice); for(int epoch=0; epoch<epoch_number; epoch++){ error=loss=0; for(int batch=0; batch<training_size; batch+=batch_size){ feed_forward_GPU( &training_X_GPU[batch*input],&training_Y_GPU[batch], &error, &loss); for(int i=layers_number-2; i>0; i--){ error_calculate_GPU<<< grid, block >>>(delta[i], delta[i-1], w_GPU[i], layers_size[i+1], layers_size[i], batch_size); } for( int i=0;i<layers_number-1;i++){ set_GPU<<< grid, block2 >>>(w_gradient[i], layers_size[i]+1, layers_size[i+1], 0); gradient_calculate_GPU<<< grid, block2 >>>(a_l[i], w_gradient[i], delta[i], d_l[i], layers_size[i]+1, layers_size[i+1], batch_size); } update_GPU(); } std::cout<<"Epoch "<<epoch<<" Training loss = "<<loss/training_size*batch_size<<" error = "<<1-(error/training_size*batch_size); error=loss=0; for(int batch=0; batch<test_size; batch+=batch_size){ feed_forward_GPU( &test_X_GPU[batch*input],&test_Y_GPU[batch], &error, &loss); } std::cout<<" Validation loss "<<loss/test_size*batch_size<<" error = "<<1-(error/test_size*batch_size)<<std::endl; } for( int i=0;i<layers_number-1;i++){ cudaMemcpy(w[i], w_GPU[i], (layers_size[i]+1)*layers_size[i+1]*sizeof(double), cudaMemcpyDeviceToHost); } } void Neural_network::feed_forward_GPU(double* X, double* Y, double* error, double* loss){ a_l[0] = X; for(int i=0; i<layers_number-2; i++){ matrix_multiplication_GPU<<< grid, block >>>(l[i], a_l[i], w_GPU[i], layers_size[i+1], layers_size[i]+1, batch_size); matrix_activation_GPU<<< grid, block >>>(l[i], a_l[i+1], d_l[i], layers_size[i+1], batch_size); } matrix_multiplication_GPU<<< grid, block >>>(l[layers_number-2], a_l[layers_number-2], w_GPU[layers_number-2], layers_size[layers_number-1], layers_size[layers_number-2]+1, batch_size); softmax_GPU<<< grid, block >>>( l[layers_number-2], a_l[layers_number-1], output, batch_size); error_check_GPU<<< grid, block >>>(Y, a_l[layers_number-1], delta[layers_number-2], d_l[layers_number-2], error_GPU, loss_GPU, output, batch_size); cudaMemcpy(error_CPU, error_GPU, batch_size*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(loss_CPU, loss_GPU, batch_size*sizeof(double), cudaMemcpyDeviceToHost); for(int i=1;i<batch_size;i++) loss_CPU[0]+=loss_CPU[i]; for(int i=1;i<batch_size;i++) error_CPU[0]+=error_CPU[i]; (*error)+=error_CPU[0]/batch_size; (*loss)+=loss_CPU[0]/batch_size; } void Neural_network::update_GPU(){ switch (gradient){ case 1: for(int i=0; i<layers_number-1; i++) normal_gradient_update_GPU<<< grid, block2 >>>(w_GPU[i], w_gradient[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 2: for(int i=0; i<layers_number-1; i++) momentum_update_GPU<<< grid, block2 >>>(w_GPU[i], w_gradient[i], w_gradient_old[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 3: for(int i=0; i<layers_number-1; i++) adagrad_update_GPU<<< grid, block2 >>>(w_GPU[i], w_gradient[i], w_gradient_old[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 4: for(int i=0; i<layers_number-1; i++) RMSprop_update_GPU<<< grid, block2 >>>(w_GPU[i], w_gradient[i], w_gradient_old[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; case 5: for(int i=0; i<layers_number-1; i++) adam_update_GPU<<< grid, block2 >>>(w_GPU[i], w_gradient[i], w_gradient_old[i], w_gradient_old2[i], layers_size[i]+1, layers_size[i+1], learning_rate); break; } } __global__ void matrix_multiplication_GPU(double *l2, double *l1, double *w, int l2_size, int l1_size, int batch_size){ unsigned int j = threadIdx.x; unsigned int i = blockIdx.x; if(i<l2_size){ l2[j*l2_size+i]=0; for(int k=0;k<l1_size;k++) l2[j*l2_size+i]+=l1[j*l1_size+k]*w[k*l2_size+i]; } } __global__ void matrix_activation_GPU( double *l, double *a_l, double *d_l, int l_size, int batch_size){ unsigned int j = threadIdx.x; unsigned int i = blockIdx.x; if(i<l_size){ if(l[j*l_size+i]>0){ a_l[j*(l_size+1)+i]=l[j*l_size+i]; d_l[j*l_size+i]=1; } else{ a_l[j*(l_size+1)+i]=0.01*l[j*l_size+i]; d_l[j*l_size+i]=0.01; } } } __global__ void softmax_GPU( double *l, double *a_l, int l_size, int batch_size){ unsigned int j = threadIdx.x + blockIdx.x*blockDim.x; if(j<batch_size){ double sume=0; for(int i=0;i<l_size;i++){ a_l[j*l_size+i]=exp(l[j*l_size+i]); sume+=a_l[j*l_size+i]; } for(int i=0;i<l_size;i++) a_l[j*l_size+i]/=sume; } } __global__ void error_check_GPU(double *Y, double *a_l, double *delta, double *d_l, double *error, double *loss, int output, int batch_size){ unsigned int j = threadIdx.x + blockIdx.x*blockDim.x; if(j<batch_size){ int y; loss[j]=0; error[j]=0; for(int i=0;i<output;i++){ d_l[j*output+i]=1.; y=(Y[j]-i)*(Y[j]-i); delta[j*output+i]=a_l[j*output+i]-y; loss[j]-=y*log(a_l[j*output+i]+0.00001); } //int wynik; if(a_l[j*output]>a_l[j*output+1]) error[j]+=1; error[j]+=Y[j]; if(error[j]==1) error[j]=0; else error[j]=1; } } __global__ void error_calculate_GPU(double *l2, double *l1, double *w, int l2_size, int l1_size, int batch_size){ unsigned int j = threadIdx.x; unsigned int i = blockIdx.x; if(i<l1_size){ l1[j*l1_size+i]=0; for(int k=0;k<l2_size;k++) l1[j*l1_size+i]+=l2[j*l2_size+k]*w[i*l2_size+k]; } } __global__ void set_GPU(double *w, int l1_size, int l2_size, double d){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w[i+index]=d; } } } __global__ void gradient_calculate_GPU(double *a_l1, double *w, double *delta, double *d_l2, int l1_size, int l2_size, int batch_size){ unsigned int i = threadIdx.x; unsigned int k = blockIdx.x; if(i<l1_size){ if(k<l2_size){ w[i*l2_size+k]=0; for(int j=0;j<batch_size;j++){ double update=a_l1[j*l1_size+i]*delta[j*l2_size+k]*d_l2[j*l2_size+k]; if(isnan(update)==0) w[i*l2_size+k]+=update; //w[i*l2_size+k]+=a_l1[j*l1_size+i]*delta[j*l2_size+k]*d_l2[j*l2_size+k]; } w[i*l2_size+k]/=batch_size; } } } __global__ void normal_gradient_update_GPU(double *w, double *w_g, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w[i+index]-=w_g[i+index]*learning_rate; //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; } } } __global__ void momentum_update_GPU(double *w, double *w_g, double *w_g_old, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ double v=0; v=0.8*w_g_old[i+index]+learning_rate*w_g[i+index]; //if(v>1) v=1; //if(v<-1) v=-1; w[i+index]-=v; //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; w_g_old[i+index]=v; } } } __global__ void adagrad_update_GPU(double *w, double *w_g, double *w_g_old, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w_g_old[i+index]+=w_g[i+index]*w_g[i+index]; double v=0; v=learning_rate*w_g[i+index]/(sqrt(w_g_old[i+index]+0.000001)); //if(v>1) v=1; //if(v<-1) v=-1; w[i+index]-=v; //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; } } } __global__ void RMSprop_update_GPU(double *w, double *w_g, double *w_g_old, int l1_size, int l2_size, double learning_rate){ unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w_g_old[i+index]=0.1*(w_g[i+index]*w_g[i+index])+0.9*w_g_old[i+index]; w[i+index]-=w_g[i+index]*learning_rate/(sqrt(w_g_old[i+index]+0.000001)); if(w[i+index]>2) w[i+index]=2.; if(w[i+index]<-2) w[i+index]=-2.; } } } __global__ void adam_update_GPU(double *w, double *w_g, double *w_g_old, double *w_g_old2, int l1_size, int l2_size, double learning_rate){ double B1=0.9; double B2=0.999; double m; double v; unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; int n=l1_size*l2_size; for(int i=0; i<n; i+=gridDim.x*blockDim.x){ if(i+index<n){ w_g_old[i+index]=B1*w_g_old[i+index]+(1-B1)*w_g[i+index]; w_g_old2[i+index]=B2*w_g_old2[i+index]+(1-B2)*w_g[i+index]*w_g[i+index]; m=w_g_old[i+index]/(1-B1); v=w_g_old2[i+index]/(1-B2); w[i+index]-=m*learning_rate/(sqrt(v+0.000001)); //if(w[i+index]>2) w[i+index]=2.; //if(w[i+index]<-2) w[i+index]=-2.; } } }
c8da72f07e21673706c5665bd86ac58b802f6e83.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <iomanip> #include <vector> #include <sstream> #include <cmath> #include <mpi.h> //activate mpi #include "dg/algorithm.h" #include "dg/backend/timer.cuh" #include "dg/backend/xspacelib.cuh" #include "dg/backend/interpolation.cuh" #include "netcdf_par.h" //exclude if par netcdf=OFF #include "file/nc_utilities.h" #include "asela.cuh" /* - the only difference to the asela_hpc.cu file is that this program uses the MPI backend and the parallel netcdf output - pay attention that both the grid dimensions as well as the output dimensions must be divisible by the mpi process numbers */ int main( int argc, char* argv[]) { ////////////////////////////////setup MPI/////////////////////////////// int provided; MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided); if( provided != MPI_THREAD_FUNNELED) { std::cerr << "wrong mpi-thread environment provided!\n"; return -1; } int periods[3] = {false, false, true}; //non-, non-, periodic int rank, size; MPI_Comm_rank( MPI_COMM_WORLD, &rank); MPI_Comm_size( MPI_COMM_WORLD, &size); #if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA int num_devices=0; hipGetDeviceCount(&num_devices); if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;} int device = rank % num_devices; //assume # of gpus/node is fixed hipSetDevice( device); #endif//cuda int np[3]; if(rank==0) { std::cin>> np[0] >> np[1] >>np[2]; std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" x "<<np[2] << " = "<<size<<std::endl; assert( size == np[0]*np[1]*np[2]); } MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD); MPI_Comm comm; MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm); ////////////////////////Parameter initialisation////////////////////////// Json::Reader reader; Json::Value js, gs; if( argc != 4) { if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [geomfile] [outputfile]\n"; return -1; } else { std::ifstream is(argv[1]); std::ifstream ks(argv[2]); reader.parse(is,js,false); reader.parse(ks,gs,false); } const asela::Parameters p( js); const dg::geo::solovev::Parameters gp(gs); if(rank==0)p.display( std::cout); if(rank==0)gp.display( std::cout); std::string input = js.toStyledString(), geom = gs.toStyledString(); ////////////////////////////////set up computations/////////////////////////// double Rmin=gp.R_0-p.boxscaleRm*gp.a; double Zmin=-p.boxscaleZm*gp.a*gp.elongation; double Rmax=gp.R_0+p.boxscaleRp*gp.a; double Zmax=p.boxscaleZp*gp.a*gp.elongation; //Make grids dg::CylindricalMPIGrid3d grid( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n, p.Nx, p.Ny, p.Nz, p.bc, p.bc, dg::PER, comm); dg::CylindricalMPIGrid3d grid_out( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n_out, p.Nx_out, p.Ny_out, p.Nz_out, p.bc, p.bc, dg::PER, comm); //create RHS if(rank==0)std::cout << "Constructing Asela...\n"; asela::Asela<dg::CylindricalMPIGrid3d, dg::MIDMatrix, dg::MDMatrix, dg::MDVec> asela( grid, p, gp); //initialize before rolkar! if(rank==0)std::cout << "Constructing Implicit...\n"; asela::Implicit< dg::CylindricalMPIGrid3d, dg::MIDMatrix, dg::MDMatrix, dg::MDVec > rolkar( grid, p, gp, asela.ds(), asela.dsDIR()); if(rank==0)std::cout << "Done!\n"; /////////////////////The initial field///////////////////////////////////////// //background profile dg::geo::Nprofile prof(p.bgprofamp, p.nprofileamp, gp, dg::geo::solovev::Psip(gp)); //initial background profile std::vector<dg::MDVec> y0(4, dg::evaluate( prof, grid)), y1(y0); //perturbation dg::GaussianZ gaussianZ( 0., p.sigma_z*M_PI, 1); //modulation along fieldline if( p.mode == 0 || p.mode == 1) { dg::Gaussian init0( gp.R_0+p.posX*gp.a, p.posY*gp.a, p.sigma, p.sigma, p.amp); if( p.mode == 0) y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 3); //rounds =3 ->2*3-1 if( p.mode == 1) y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); //rounds =1 ->2*1-1 } if( p.mode == 2) { dg::BathRZ init0(16,16,p.Nz,Rmin,Zmin, 30.,5.,p.amp); y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } if( p.mode == 3) { dg::geo::ZonalFlow init0(p.amp, p.k_psi, gp, dg::geo::solovev::Psip(gp)); y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } dg::blas1::axpby( 1., y1[1], 1., y0[1]); //sum up background and perturbation dg::blas1::plus(y0[1], -1); //initialize ni-1 if( p.mode == 2 || p.mode == 3) { dg::MDVec damping = dg::evaluate( dg::geo::GaussianProfXDamping(dg::geo::solovev::Psip(gp), gp), grid); dg::blas1::pointwiseDot(damping, y0[1], y0[1]); //damp with gaussprofdamp } std::cout << "intiialize ne" << std::endl; if( p.initcond == 0) asela.initializene( y0[1], y0[0]); if( p.initcond == 1) dg::blas1::axpby( 1., y0[1], 0.,y0[0], y0[0]); //set n_e = N_i std::cout << "Done!\n"; dg::blas1::axpby( 0., y0[2], 0., y0[2]); //set Ue = 0 dg::blas1::axpby( 0., y0[3], 0., y0[3]); //set Ui = 0 dg::Karniadakis< std::vector<dg::MDVec> > karniadakis( y0, y0[0].size(), p.eps_time); karniadakis.init( asela, rolkar, y0, p.dt); /////////////////////////////set up netcdf///////////////////////////////// file::NC_Error_Handle err; int ncid; MPI_Info info = MPI_INFO_NULL; err = nc_create_par( argv[3], NC_NETCDF4|NC_MPIIO|NC_CLOBBER, comm, info, &ncid); //MPI ON // err = nc_create( argv[3],NC_NETCDF4|NC_CLOBBER, &ncid);//MPI OFF err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data()); err = nc_put_att_text( ncid, NC_GLOBAL, "geomfile", geom.size(), geom.data()); int dimids[4], tvarID; { err = file::define_dimensions( ncid, dimids, &tvarID, grid_out.global()); dg::geo::TokamakMagneticField c = dg::geo::createSolovevField(gp); dg::geo::FieldR fieldR(c); dg::geo::FieldZ fieldZ(c); dg::geo::FieldP fieldP(c); dg::HVec vecR = dg::evaluate( fieldR, grid_out.global()); dg::HVec vecZ = dg::evaluate( fieldZ, grid_out.global()); dg::HVec vecP = dg::evaluate( fieldP, grid_out.global()); int vecID[3]; err = nc_def_var( ncid, "BR", NC_DOUBLE, 3, &dimids[1], &vecID[0]); err = nc_def_var( ncid, "BZ", NC_DOUBLE, 3, &dimids[1], &vecID[1]); err = nc_def_var( ncid, "BP", NC_DOUBLE, 3, &dimids[1], &vecID[2]); err = nc_enddef( ncid); err = nc_put_var_double( ncid, vecID[0], vecR.data()); err = nc_put_var_double( ncid, vecID[1], vecZ.data()); err = nc_put_var_double( ncid, vecID[2], vecP.data()); err = nc_redef(ncid); } //field IDs std::string names[6] = {"electrons", "ions", "Ue", "Ui", "potential","Aparallel"}; int dataIDs[6]; //VARIABLE IDS for( unsigned i=0; i<6; i++) err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 4, dimids, &dataIDs[i]); //energy IDs int EtimeID, EtimevarID; err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID); int energyID, massID, energyIDs[6], dissID, alignedID, dEdtID, accuracyID; err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID); err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID); std::string energies[6] = {"Se", "Si", "Uperp", "Upare", "Upari","Uapar"}; for( unsigned i=0; i<6; i++) err = nc_def_var( ncid, energies[i].data(), NC_DOUBLE, 1, &EtimeID, &energyIDs[i]); err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID); err = nc_def_var( ncid, "alignment", NC_DOUBLE, 1, &EtimeID, &alignedID); err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID); err = nc_def_var( ncid, "accuracy", NC_DOUBLE, 1, &EtimeID, &accuracyID); //probe vars definition int NepID,phipID; err = nc_def_var( ncid, "Ne_p", NC_DOUBLE, 1, &EtimeID, &NepID); err = nc_def_var( ncid, "phi_p", NC_DOUBLE, 1, &EtimeID, &phipID); for(unsigned i=0; i<6; i++) { err = nc_var_par_access( ncid, energyIDs[i], NC_COLLECTIVE); err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE); } err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, massID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE); err = nc_var_par_access( ncid, alignedID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE); err = nc_var_par_access( ncid, accuracyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, NepID, NC_COLLECTIVE); err = nc_var_par_access( ncid, phipID, NC_COLLECTIVE); err = nc_enddef(ncid); ///////////////////////////////////PROBE////////////////////////////// const dg::HVec Xprobe(1,gp.R_0+p.boxscaleRp*gp.a); const dg::HVec Zprobe(1,0.); const dg::HVec Phiprobe(1,M_PI); dg::IDMatrix probeinterp; int probeRANK = grid.pidOf( Xprobe[0], Zprobe[0], Phiprobe[0]); if(rank==probeRANK) probeinterp=dg::create::interpolation( Xprobe,Zprobe,Phiprobe,grid.local(), dg::NEU); dg::DVec probevalue(1,0.); ///////////////////////////first output///////////////////////////////// if(rank==0)std::cout << "First output ... \n"; int dims[3], coords[3]; MPI_Cart_get( comm, 3, dims, periods, coords); size_t count[4] = {1, grid_out.local().Nz(), grid_out.n()*(grid_out.local().Ny()), grid_out.n()*(grid_out.local().Nx())}; size_t start[4] = {0, coords[2]*count[1], coords[1]*count[2], coords[0]*count[3]}; dg::MDVec transfer( dg::evaluate(dg::zero, grid)); dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local())); dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local())); dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix for( unsigned i=0; i<2; i++) { dg::blas2::gemv( interpolate, y0[i].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() ); } transfer = asela.uparallel()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() ); transfer = asela.uparallel()[1]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() ); transfer = asela.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() ); transfer = asela.aparallel(); dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[5], start, count, transferH.data() ); double time = 0; err = nc_put_vara_double( ncid, tvarID, start, count, &time); err = nc_put_vara_double( ncid, EtimevarID, start, count, &time); size_t Estart[] = {0}; size_t Ecount[] = {1}; double energy0 = asela.energy(), mass0 = asela.mass(), E0 = energy0, mass = mass0, E1 = 0.0, dEdt = 0., diss = 0., aligned=0, accuracy=0.; std::vector<double> evec = asela.energy_vector(); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &energy0); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass0); for( unsigned i=0; i<6; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); //probe double Nep=0, phip=0; if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep=probevalue[0] ; dg::blas2::gemv(probeinterp,asela.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep,1 , MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 , MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "First write successful!\n"; ///////////////////////////////////////Timeloop///////////////////////////////// dg::Timer t; t.tic(); #ifdef DG_BENCHMARK unsigned step = 0; #endif //DG_BENCHMARK for( unsigned i=1; i<=p.maxout; i++) { #ifdef DG_BENCHMARK dg::Timer ti; ti.tic(); #endif//DG_BENCHMARK for( unsigned j=0; j<p.itstp; j++) { try{ karniadakis( asela, rolkar, y0);} catch( dg::Fail& fail) { if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n"; if(rank==0)std::cerr << "Does Simulation respect CFL condition?"<<std::endl; err = nc_close(ncid); MPI_Finalize(); return -1; } step++; time+=p.dt; Estart[0] = step; E1 = asela.energy(), mass = asela.mass(), diss = asela.energy_diffusion(); dEdt = (E1 - E0)/p.dt; E0 = E1; accuracy = 2.*fabs( (dEdt-diss)/(dEdt + diss)); evec = asela.energy_vector(); err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &E1); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass); for( unsigned i=0; i<6; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep= probevalue[0] ; dg::blas2::gemv(probeinterp,asela.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep, 1 ,MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 ,MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (asela.mass()-mass0)/mass0<<"\t"; if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t"; if(rank==0)std::cout <<" d E/dt = " << dEdt <<" Lambda = " << diss << " -> Accuracy: "<< accuracy << "\n"; } #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time; if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s"; ti.tic(); #endif//DG_BENCHMARK //err = nc_open_par( argv[3], NC_WRITE|NC_MPIIO, comm, info, &ncid); //dont do it //////////////////////////write fields//////////////////////// start[0] = i; for( unsigned j=0; j<2; j++) { dg::blas2::gemv( interpolate, y0[j].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data()); } transfer = asela.uparallel()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() ); transfer = asela.uparallel()[1]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() ); transfer = asela.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() ); transfer = asela.aparallel(); dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[5], start, count, transferH.data() ); err = nc_put_vara_double( ncid, tvarID, start, count, &time); //err = nc_close(ncid); DONT DO IT! #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Time for output: "<<ti.diff()<<"s\n\n"<<std::flush; #endif//DG_BENCHMARK } t.toc(); unsigned hour = (unsigned)floor(t.diff()/3600); unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60); double second = t.diff() - hour*3600 - minute*60; if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0'); if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n"; if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n"; err = nc_close(ncid); MPI_Finalize(); return 0; }
c8da72f07e21673706c5665bd86ac58b802f6e83.cu
#include <iostream> #include <iomanip> #include <vector> #include <sstream> #include <cmath> #include <mpi.h> //activate mpi #include "dg/algorithm.h" #include "dg/backend/timer.cuh" #include "dg/backend/xspacelib.cuh" #include "dg/backend/interpolation.cuh" #include "netcdf_par.h" //exclude if par netcdf=OFF #include "file/nc_utilities.h" #include "asela.cuh" /* - the only difference to the asela_hpc.cu file is that this program uses the MPI backend and the parallel netcdf output - pay attention that both the grid dimensions as well as the output dimensions must be divisible by the mpi process numbers */ int main( int argc, char* argv[]) { ////////////////////////////////setup MPI/////////////////////////////// int provided; MPI_Init_thread( &argc, &argv, MPI_THREAD_FUNNELED, &provided); if( provided != MPI_THREAD_FUNNELED) { std::cerr << "wrong mpi-thread environment provided!\n"; return -1; } int periods[3] = {false, false, true}; //non-, non-, periodic int rank, size; MPI_Comm_rank( MPI_COMM_WORLD, &rank); MPI_Comm_size( MPI_COMM_WORLD, &size); #if THRUST_DEVICE_SYSTEM==THRUST_DEVICE_SYSTEM_CUDA int num_devices=0; cudaGetDeviceCount(&num_devices); if(num_devices==0){std::cerr << "No CUDA capable devices found"<<std::endl; return -1;} int device = rank % num_devices; //assume # of gpus/node is fixed cudaSetDevice( device); #endif//cuda int np[3]; if(rank==0) { std::cin>> np[0] >> np[1] >>np[2]; std::cout << "Computing with "<<np[0]<<" x "<<np[1]<<" x "<<np[2] << " = "<<size<<std::endl; assert( size == np[0]*np[1]*np[2]); } MPI_Bcast( np, 3, MPI_INT, 0, MPI_COMM_WORLD); MPI_Comm comm; MPI_Cart_create( MPI_COMM_WORLD, 3, np, periods, true, &comm); ////////////////////////Parameter initialisation////////////////////////// Json::Reader reader; Json::Value js, gs; if( argc != 4) { if(rank==0)std::cerr << "ERROR: Wrong number of arguments!\nUsage: "<< argv[0]<<" [inputfile] [geomfile] [outputfile]\n"; return -1; } else { std::ifstream is(argv[1]); std::ifstream ks(argv[2]); reader.parse(is,js,false); reader.parse(ks,gs,false); } const asela::Parameters p( js); const dg::geo::solovev::Parameters gp(gs); if(rank==0)p.display( std::cout); if(rank==0)gp.display( std::cout); std::string input = js.toStyledString(), geom = gs.toStyledString(); ////////////////////////////////set up computations/////////////////////////// double Rmin=gp.R_0-p.boxscaleRm*gp.a; double Zmin=-p.boxscaleZm*gp.a*gp.elongation; double Rmax=gp.R_0+p.boxscaleRp*gp.a; double Zmax=p.boxscaleZp*gp.a*gp.elongation; //Make grids dg::CylindricalMPIGrid3d grid( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n, p.Nx, p.Ny, p.Nz, p.bc, p.bc, dg::PER, comm); dg::CylindricalMPIGrid3d grid_out( Rmin,Rmax, Zmin,Zmax, 0, 2.*M_PI, p.n_out, p.Nx_out, p.Ny_out, p.Nz_out, p.bc, p.bc, dg::PER, comm); //create RHS if(rank==0)std::cout << "Constructing Asela...\n"; asela::Asela<dg::CylindricalMPIGrid3d, dg::MIDMatrix, dg::MDMatrix, dg::MDVec> asela( grid, p, gp); //initialize before rolkar! if(rank==0)std::cout << "Constructing Implicit...\n"; asela::Implicit< dg::CylindricalMPIGrid3d, dg::MIDMatrix, dg::MDMatrix, dg::MDVec > rolkar( grid, p, gp, asela.ds(), asela.dsDIR()); if(rank==0)std::cout << "Done!\n"; /////////////////////The initial field///////////////////////////////////////// //background profile dg::geo::Nprofile prof(p.bgprofamp, p.nprofileamp, gp, dg::geo::solovev::Psip(gp)); //initial background profile std::vector<dg::MDVec> y0(4, dg::evaluate( prof, grid)), y1(y0); //perturbation dg::GaussianZ gaussianZ( 0., p.sigma_z*M_PI, 1); //modulation along fieldline if( p.mode == 0 || p.mode == 1) { dg::Gaussian init0( gp.R_0+p.posX*gp.a, p.posY*gp.a, p.sigma, p.sigma, p.amp); if( p.mode == 0) y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 3); //rounds =3 ->2*3-1 if( p.mode == 1) y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); //rounds =1 ->2*1-1 } if( p.mode == 2) { dg::BathRZ init0(16,16,p.Nz,Rmin,Zmin, 30.,5.,p.amp); y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } if( p.mode == 3) { dg::geo::ZonalFlow init0(p.amp, p.k_psi, gp, dg::geo::solovev::Psip(gp)); y1[1] = asela.ds().fieldaligned().evaluate( init0, gaussianZ, (unsigned)p.Nz/2, 1); } dg::blas1::axpby( 1., y1[1], 1., y0[1]); //sum up background and perturbation dg::blas1::plus(y0[1], -1); //initialize ni-1 if( p.mode == 2 || p.mode == 3) { dg::MDVec damping = dg::evaluate( dg::geo::GaussianProfXDamping(dg::geo::solovev::Psip(gp), gp), grid); dg::blas1::pointwiseDot(damping, y0[1], y0[1]); //damp with gaussprofdamp } std::cout << "intiialize ne" << std::endl; if( p.initcond == 0) asela.initializene( y0[1], y0[0]); if( p.initcond == 1) dg::blas1::axpby( 1., y0[1], 0.,y0[0], y0[0]); //set n_e = N_i std::cout << "Done!\n"; dg::blas1::axpby( 0., y0[2], 0., y0[2]); //set Ue = 0 dg::blas1::axpby( 0., y0[3], 0., y0[3]); //set Ui = 0 dg::Karniadakis< std::vector<dg::MDVec> > karniadakis( y0, y0[0].size(), p.eps_time); karniadakis.init( asela, rolkar, y0, p.dt); /////////////////////////////set up netcdf///////////////////////////////// file::NC_Error_Handle err; int ncid; MPI_Info info = MPI_INFO_NULL; err = nc_create_par( argv[3], NC_NETCDF4|NC_MPIIO|NC_CLOBBER, comm, info, &ncid); //MPI ON // err = nc_create( argv[3],NC_NETCDF4|NC_CLOBBER, &ncid);//MPI OFF err = nc_put_att_text( ncid, NC_GLOBAL, "inputfile", input.size(), input.data()); err = nc_put_att_text( ncid, NC_GLOBAL, "geomfile", geom.size(), geom.data()); int dimids[4], tvarID; { err = file::define_dimensions( ncid, dimids, &tvarID, grid_out.global()); dg::geo::TokamakMagneticField c = dg::geo::createSolovevField(gp); dg::geo::FieldR fieldR(c); dg::geo::FieldZ fieldZ(c); dg::geo::FieldP fieldP(c); dg::HVec vecR = dg::evaluate( fieldR, grid_out.global()); dg::HVec vecZ = dg::evaluate( fieldZ, grid_out.global()); dg::HVec vecP = dg::evaluate( fieldP, grid_out.global()); int vecID[3]; err = nc_def_var( ncid, "BR", NC_DOUBLE, 3, &dimids[1], &vecID[0]); err = nc_def_var( ncid, "BZ", NC_DOUBLE, 3, &dimids[1], &vecID[1]); err = nc_def_var( ncid, "BP", NC_DOUBLE, 3, &dimids[1], &vecID[2]); err = nc_enddef( ncid); err = nc_put_var_double( ncid, vecID[0], vecR.data()); err = nc_put_var_double( ncid, vecID[1], vecZ.data()); err = nc_put_var_double( ncid, vecID[2], vecP.data()); err = nc_redef(ncid); } //field IDs std::string names[6] = {"electrons", "ions", "Ue", "Ui", "potential","Aparallel"}; int dataIDs[6]; //VARIABLE IDS for( unsigned i=0; i<6; i++) err = nc_def_var( ncid, names[i].data(), NC_DOUBLE, 4, dimids, &dataIDs[i]); //energy IDs int EtimeID, EtimevarID; err = file::define_time( ncid, "energy_time", &EtimeID, &EtimevarID); int energyID, massID, energyIDs[6], dissID, alignedID, dEdtID, accuracyID; err = nc_def_var( ncid, "energy", NC_DOUBLE, 1, &EtimeID, &energyID); err = nc_def_var( ncid, "mass", NC_DOUBLE, 1, &EtimeID, &massID); std::string energies[6] = {"Se", "Si", "Uperp", "Upare", "Upari","Uapar"}; for( unsigned i=0; i<6; i++) err = nc_def_var( ncid, energies[i].data(), NC_DOUBLE, 1, &EtimeID, &energyIDs[i]); err = nc_def_var( ncid, "dissipation", NC_DOUBLE, 1, &EtimeID, &dissID); err = nc_def_var( ncid, "alignment", NC_DOUBLE, 1, &EtimeID, &alignedID); err = nc_def_var( ncid, "dEdt", NC_DOUBLE, 1, &EtimeID, &dEdtID); err = nc_def_var( ncid, "accuracy", NC_DOUBLE, 1, &EtimeID, &accuracyID); //probe vars definition int NepID,phipID; err = nc_def_var( ncid, "Ne_p", NC_DOUBLE, 1, &EtimeID, &NepID); err = nc_def_var( ncid, "phi_p", NC_DOUBLE, 1, &EtimeID, &phipID); for(unsigned i=0; i<6; i++) { err = nc_var_par_access( ncid, energyIDs[i], NC_COLLECTIVE); err = nc_var_par_access( ncid, dataIDs[i], NC_COLLECTIVE); } err = nc_var_par_access( ncid, tvarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, EtimevarID, NC_COLLECTIVE); err = nc_var_par_access( ncid, energyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, massID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dissID, NC_COLLECTIVE); err = nc_var_par_access( ncid, alignedID, NC_COLLECTIVE); err = nc_var_par_access( ncid, dEdtID, NC_COLLECTIVE); err = nc_var_par_access( ncid, accuracyID, NC_COLLECTIVE); err = nc_var_par_access( ncid, NepID, NC_COLLECTIVE); err = nc_var_par_access( ncid, phipID, NC_COLLECTIVE); err = nc_enddef(ncid); ///////////////////////////////////PROBE////////////////////////////// const dg::HVec Xprobe(1,gp.R_0+p.boxscaleRp*gp.a); const dg::HVec Zprobe(1,0.); const dg::HVec Phiprobe(1,M_PI); dg::IDMatrix probeinterp; int probeRANK = grid.pidOf( Xprobe[0], Zprobe[0], Phiprobe[0]); if(rank==probeRANK) probeinterp=dg::create::interpolation( Xprobe,Zprobe,Phiprobe,grid.local(), dg::NEU); dg::DVec probevalue(1,0.); ///////////////////////////first output///////////////////////////////// if(rank==0)std::cout << "First output ... \n"; int dims[3], coords[3]; MPI_Cart_get( comm, 3, dims, periods, coords); size_t count[4] = {1, grid_out.local().Nz(), grid_out.n()*(grid_out.local().Ny()), grid_out.n()*(grid_out.local().Nx())}; size_t start[4] = {0, coords[2]*count[1], coords[1]*count[2], coords[0]*count[3]}; dg::MDVec transfer( dg::evaluate(dg::zero, grid)); dg::DVec transferD( dg::evaluate(dg::zero, grid_out.local())); dg::HVec transferH( dg::evaluate(dg::zero, grid_out.local())); dg::IDMatrix interpolate = dg::create::interpolation( grid_out.local(), grid.local()); //create local interpolation matrix for( unsigned i=0; i<2; i++) { dg::blas2::gemv( interpolate, y0[i].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[i], start, count, transferH.data() ); } transfer = asela.uparallel()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() ); transfer = asela.uparallel()[1]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() ); transfer = asela.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() ); transfer = asela.aparallel(); dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[5], start, count, transferH.data() ); double time = 0; err = nc_put_vara_double( ncid, tvarID, start, count, &time); err = nc_put_vara_double( ncid, EtimevarID, start, count, &time); size_t Estart[] = {0}; size_t Ecount[] = {1}; double energy0 = asela.energy(), mass0 = asela.mass(), E0 = energy0, mass = mass0, E1 = 0.0, dEdt = 0., diss = 0., aligned=0, accuracy=0.; std::vector<double> evec = asela.energy_vector(); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &energy0); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass0); for( unsigned i=0; i<6; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); //probe double Nep=0, phip=0; if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep=probevalue[0] ; dg::blas2::gemv(probeinterp,asela.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep,1 , MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 , MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "First write successful!\n"; ///////////////////////////////////////Timeloop///////////////////////////////// dg::Timer t; t.tic(); #ifdef DG_BENCHMARK unsigned step = 0; #endif //DG_BENCHMARK for( unsigned i=1; i<=p.maxout; i++) { #ifdef DG_BENCHMARK dg::Timer ti; ti.tic(); #endif//DG_BENCHMARK for( unsigned j=0; j<p.itstp; j++) { try{ karniadakis( asela, rolkar, y0);} catch( dg::Fail& fail) { if(rank==0)std::cerr << "CG failed to converge to "<<fail.epsilon()<<"\n"; if(rank==0)std::cerr << "Does Simulation respect CFL condition?"<<std::endl; err = nc_close(ncid); MPI_Finalize(); return -1; } step++; time+=p.dt; Estart[0] = step; E1 = asela.energy(), mass = asela.mass(), diss = asela.energy_diffusion(); dEdt = (E1 - E0)/p.dt; E0 = E1; accuracy = 2.*fabs( (dEdt-diss)/(dEdt + diss)); evec = asela.energy_vector(); err = nc_put_vara_double( ncid, EtimevarID, Estart, Ecount, &time); err = nc_put_vara_double( ncid, energyID, Estart, Ecount, &E1); err = nc_put_vara_double( ncid, massID, Estart, Ecount, &mass); for( unsigned i=0; i<6; i++) err = nc_put_vara_double( ncid, energyIDs[i], Estart, Ecount, &evec[i]); err = nc_put_vara_double( ncid, dissID, Estart, Ecount,&diss); err = nc_put_vara_double( ncid, alignedID, Estart, Ecount,&aligned); err = nc_put_vara_double( ncid, dEdtID, Estart, Ecount,&dEdt); err = nc_put_vara_double( ncid, accuracyID, Estart, Ecount,&accuracy); if(rank==probeRANK) { dg::blas2::gemv(probeinterp,y0[0].data(),probevalue); Nep= probevalue[0] ; dg::blas2::gemv(probeinterp,asela.potential()[0].data(),probevalue); phip=probevalue[0] ; } MPI_Bcast( &Nep, 1 ,MPI_DOUBLE, probeRANK, grid.communicator()); MPI_Bcast( &phip,1 ,MPI_DOUBLE, probeRANK, grid.communicator()); err = nc_put_vara_double( ncid, NepID, Estart, Ecount,&Nep); err = nc_put_vara_double( ncid, phipID, Estart, Ecount,&phip); if(rank==0)std::cout << "(m_tot-m_0)/m_0: "<< (asela.mass()-mass0)/mass0<<"\t"; if(rank==0)std::cout << "(E_tot-E_0)/E_0: "<< (E1-energy0)/energy0<<"\t"; if(rank==0)std::cout <<" d E/dt = " << dEdt <<" Lambda = " << diss << " -> Accuracy: "<< accuracy << "\n"; } #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Step "<<step <<" of "<<p.itstp*p.maxout <<" at time "<<time; if(rank==0)std::cout << "\n\t Average time for one step: "<<ti.diff()/(double)p.itstp<<"s"; ti.tic(); #endif//DG_BENCHMARK //err = nc_open_par( argv[3], NC_WRITE|NC_MPIIO, comm, info, &ncid); //dont do it //////////////////////////write fields//////////////////////// start[0] = i; for( unsigned j=0; j<2; j++) { dg::blas2::gemv( interpolate, y0[j].data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[j], start, count, transferH.data()); } transfer = asela.uparallel()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[2], start, count, transferH.data() ); transfer = asela.uparallel()[1]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[3], start, count, transferH.data() ); transfer = asela.potential()[0]; dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[4], start, count, transferH.data() ); transfer = asela.aparallel(); dg::blas2::gemv( interpolate, transfer.data(), transferD); dg::blas1::transfer( transferD, transferH); err = nc_put_vara_double( ncid, dataIDs[5], start, count, transferH.data() ); err = nc_put_vara_double( ncid, tvarID, start, count, &time); //err = nc_close(ncid); DONT DO IT! #ifdef DG_BENCHMARK ti.toc(); if(rank==0)std::cout << "\n\t Time for output: "<<ti.diff()<<"s\n\n"<<std::flush; #endif//DG_BENCHMARK } t.toc(); unsigned hour = (unsigned)floor(t.diff()/3600); unsigned minute = (unsigned)floor( (t.diff() - hour*3600)/60); double second = t.diff() - hour*3600 - minute*60; if(rank==0)std::cout << std::fixed << std::setprecision(2) <<std::setfill('0'); if(rank==0)std::cout <<"Computation Time \t"<<hour<<":"<<std::setw(2)<<minute<<":"<<second<<"\n"; if(rank==0)std::cout <<"which is \t"<<t.diff()/p.itstp/p.maxout<<"s/step\n"; err = nc_close(ncid); MPI_Finalize(); return 0; }
0f12fe09857df1d157dc6e6d0ab1d2ee982107a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <common/grid_sync.cuh> #include <gtest/gtest.h> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace MLCommon { __global__ void gridSyncTestKernel(void* workspace, int* out, SyncType type) { GridSync gs(workspace, type, true); bool master; int updatePosition; if (type == ACROSS_ALL) { master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0; updatePosition = 0; } else { master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0; updatePosition = blockIdx.y + blockIdx.z * gridDim.y; } if (master) { out[updatePosition] = 1; __threadfence(); } gs.sync(); int val = out[updatePosition]; // make sure everybody has read the updated value! gs.sync(); raft::myAtomicAdd(out + updatePosition, val); } struct GridSyncInputs { dim3 gridDim, blockDim; bool checkWorkspaceReuse; SyncType type; }; void gridSyncTest(int* out, int* out1, const GridSyncInputs& params, hipStream_t stream) { size_t workspaceSize = GridSync::computeWorkspaceSize(params.gridDim, params.type, true); rmm::device_uvector<char> workspace(workspaceSize, stream); RAFT_CUDA_TRY(hipMemset(workspace.data(), 0, workspace.size())); hipLaunchKernelGGL(( gridSyncTestKernel), dim3(params.gridDim), dim3(params.blockDim), 0, 0, workspace.data(), out, params.type); RAFT_CUDA_TRY(hipPeekAtLastError()); if (params.checkWorkspaceReuse) { RAFT_CUDA_TRY(hipDeviceSynchronize()); hipLaunchKernelGGL(( gridSyncTestKernel), dim3(params.gridDim), dim3(params.blockDim), 0, 0, workspace.data(), out1, params.type); RAFT_CUDA_TRY(hipPeekAtLastError()); } } ::std::ostream& operator<<(::std::ostream& os, const GridSyncInputs& dims) { return os; } class GridSyncTest : public ::testing::TestWithParam<GridSyncInputs> { protected: GridSyncTest() : out(0, stream), out1(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<GridSyncInputs>::GetParam(); size_t len = computeOutLen(); RAFT_CUDA_TRY(hipStreamCreate(&stream)); out.resize(len, stream); out1.resize(len, stream); gridSyncTest(out.data(), out1.data(), params, stream); } size_t computeOutLen() const { size_t len; if (params.type == ACROSS_ALL) { len = 1; } else { len = params.gridDim.y * params.gridDim.z; } return len; } protected: hipStream_t stream = 0; GridSyncInputs params; rmm::device_uvector<int> out, out1; }; const std::vector<GridSyncInputs> inputs = { {{2, 1, 1}, {32, 1, 1}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 4}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 1, 1}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 1}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 4}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 1, 1}, false, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, false, ACROSS_X}, {{2, 2, 2}, {32, 1, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_X}, {{2, 2, 1}, {32, 2, 1}, false, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 2, 4}, false, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, false, ACROSS_X}, {{2, 2, 2}, {32, 2, 4}, false, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 1, 1}, true, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, true, ACROSS_X}, {{2, 2, 2}, {32, 1, 1}, true, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, true, ACROSS_X}, {{2, 2, 1}, {32, 2, 1}, true, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, true, ACROSS_X}, {{2, 1, 1}, {32, 2, 4}, true, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, true, ACROSS_X}, {{2, 2, 2}, {32, 2, 4}, true, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, true, ACROSS_X}}; TEST_P(GridSyncTest, Result) { size_t len = computeOutLen(); // number of blocks raft::myAtomicAdd'ing the same location int nblks = params.type == ACROSS_X ? params.gridDim.x : params.gridDim.x * params.gridDim.y * params.gridDim.z; int nthreads = params.blockDim.x * params.blockDim.y * params.blockDim.z; int expected = (nblks * nthreads) + 1; ASSERT_TRUE(MLCommon::devArrMatch(expected, out.data(), len, MLCommon::Compare<int>())); if (params.checkWorkspaceReuse) { ASSERT_TRUE(MLCommon::devArrMatch(expected, out1.data(), len, MLCommon::Compare<int>())); } } INSTANTIATE_TEST_CASE_P(GridSyncTests, GridSyncTest, ::testing::ValuesIn(inputs)); } // end namespace MLCommon
0f12fe09857df1d157dc6e6d0ab1d2ee982107a5.cu
/* * Copyright (c) 2019-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <common/grid_sync.cuh> #include <gtest/gtest.h> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace MLCommon { __global__ void gridSyncTestKernel(void* workspace, int* out, SyncType type) { GridSync gs(workspace, type, true); bool master; int updatePosition; if (type == ACROSS_ALL) { master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0; updatePosition = 0; } else { master = threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0; updatePosition = blockIdx.y + blockIdx.z * gridDim.y; } if (master) { out[updatePosition] = 1; __threadfence(); } gs.sync(); int val = out[updatePosition]; // make sure everybody has read the updated value! gs.sync(); raft::myAtomicAdd(out + updatePosition, val); } struct GridSyncInputs { dim3 gridDim, blockDim; bool checkWorkspaceReuse; SyncType type; }; void gridSyncTest(int* out, int* out1, const GridSyncInputs& params, cudaStream_t stream) { size_t workspaceSize = GridSync::computeWorkspaceSize(params.gridDim, params.type, true); rmm::device_uvector<char> workspace(workspaceSize, stream); RAFT_CUDA_TRY(cudaMemset(workspace.data(), 0, workspace.size())); gridSyncTestKernel<<<params.gridDim, params.blockDim>>>(workspace.data(), out, params.type); RAFT_CUDA_TRY(cudaPeekAtLastError()); if (params.checkWorkspaceReuse) { RAFT_CUDA_TRY(cudaDeviceSynchronize()); gridSyncTestKernel<<<params.gridDim, params.blockDim>>>(workspace.data(), out1, params.type); RAFT_CUDA_TRY(cudaPeekAtLastError()); } } ::std::ostream& operator<<(::std::ostream& os, const GridSyncInputs& dims) { return os; } class GridSyncTest : public ::testing::TestWithParam<GridSyncInputs> { protected: GridSyncTest() : out(0, stream), out1(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<GridSyncInputs>::GetParam(); size_t len = computeOutLen(); RAFT_CUDA_TRY(cudaStreamCreate(&stream)); out.resize(len, stream); out1.resize(len, stream); gridSyncTest(out.data(), out1.data(), params, stream); } size_t computeOutLen() const { size_t len; if (params.type == ACROSS_ALL) { len = 1; } else { len = params.gridDim.y * params.gridDim.z; } return len; } protected: cudaStream_t stream = 0; GridSyncInputs params; rmm::device_uvector<int> out, out1; }; const std::vector<GridSyncInputs> inputs = { {{2, 1, 1}, {32, 1, 1}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 4}, false, ACROSS_ALL}, {{2, 1, 1}, {32, 1, 1}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 1}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 2, 4}, true, ACROSS_ALL}, {{2, 1, 1}, {32, 1, 1}, false, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, false, ACROSS_X}, {{2, 2, 2}, {32, 1, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, false, ACROSS_X}, {{2, 2, 1}, {32, 2, 1}, false, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 2, 4}, false, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, false, ACROSS_X}, {{2, 2, 2}, {32, 2, 4}, false, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, false, ACROSS_X}, {{2, 1, 1}, {32, 1, 1}, true, ACROSS_X}, {{2, 2, 1}, {32, 1, 1}, true, ACROSS_X}, {{2, 2, 2}, {32, 1, 1}, true, ACROSS_X}, {{2, 1, 1}, {32, 2, 1}, true, ACROSS_X}, {{2, 2, 1}, {32, 2, 1}, true, ACROSS_X}, {{2, 2, 2}, {32, 2, 1}, true, ACROSS_X}, {{2, 1, 1}, {32, 2, 4}, true, ACROSS_X}, {{2, 2, 1}, {32, 2, 4}, true, ACROSS_X}, {{2, 2, 2}, {32, 2, 4}, true, ACROSS_X}, {{32, 256, 1}, {1, 1, 1}, true, ACROSS_X}}; TEST_P(GridSyncTest, Result) { size_t len = computeOutLen(); // number of blocks raft::myAtomicAdd'ing the same location int nblks = params.type == ACROSS_X ? params.gridDim.x : params.gridDim.x * params.gridDim.y * params.gridDim.z; int nthreads = params.blockDim.x * params.blockDim.y * params.blockDim.z; int expected = (nblks * nthreads) + 1; ASSERT_TRUE(MLCommon::devArrMatch(expected, out.data(), len, MLCommon::Compare<int>())); if (params.checkWorkspaceReuse) { ASSERT_TRUE(MLCommon::devArrMatch(expected, out1.data(), len, MLCommon::Compare<int>())); } } INSTANTIATE_TEST_CASE_P(GridSyncTests, GridSyncTest, ::testing::ValuesIn(inputs)); } // end namespace MLCommon
b61e97831c920756ab61ec9924d9e639667892ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: rows of output matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: subset of number of non-zeroes of input matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is dense and the output matrix is dense. * * @params in dense input pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param inClen number of columns of input matrix * @param retRlen number of rows of output matrix * @param retClen number of columns of output matrix */ extern "C" /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void bias_multiply(double* input, double* bias, double* ret, int rlen, int clen, int PQ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int biasIndex = iy / PQ; ret[tid] = input[tid] * bias[biasIndex]; } }
b61e97831c920756ab61ec9924d9e639667892ee.cu
#include "includes.h" /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /********************************** When updating a kernel or adding a new one, please compile the ptx file and commit it: nvcc -ptx -arch=sm_30 SystemML.cu ***********************************/ /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: rows of output matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is sparse and the output matrix is dense. * This function avoids unnecessary sparse to dense conversion of the input matrix. * Parallelization: subset of number of non-zeroes of input matrix. * * @params inVal input val pointer * @params inRowPtr input row pointer * @params colInd input col index pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param retClen number of columns of output matrix */ extern "C" /** * Performs a slice operation where the input matrix is dense and the output matrix is dense. * * @params in dense input pointer * @params ret dense output pointer * @param rl row lower * @param ru row upper * @param cl column lower * @param cu column upper * @param inClen number of columns of input matrix * @param retRlen number of rows of output matrix * @param retClen number of columns of output matrix */ extern "C" /** * Does a copy of upper to lower triangle of the given matrix * @param ret the input and output array allocated on the GPU * @param dim the number of rows of the square matrix ret * @param N total number of elements of the matrix */ extern "C" extern "C" __global__ void bias_multiply(double* input, double* bias, double* ret, int rlen, int clen, int PQ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int ix = tid / clen; int iy = tid % clen; if(ix < rlen && iy < clen) { int biasIndex = iy / PQ; ret[tid] = input[tid] * bias[biasIndex]; } }
40fd529e47686f0e99f28611178e58be53918704.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" //#include "caffe/layers/unpooling_layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, const Dtype* bottom_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_data += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; top_data[mask_index] = bottom_data[index]; } else { top_data[unpooled_index] = bottom_data[index]; } } } template <typename Dtype> __global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); distval += bottom_data[ph * width + pw] / pool_size; } } top_data[index] = distval; } } template <typename Dtype> __global__ void TileUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); distval += bottom_data[ph * width + pw]; } } top_data[index] = distval; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); int count = bottom[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); caffe_gpu_set(top[0]->count(), Dtype(0.), top_data); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, bottom_mask); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AveUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( TileUnpoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_diff += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; bottom_diff[index] = top_diff[mask_index]; } else { bottom_diff[index] = top_diff[unpooled_index]; } } } template <typename Dtype> __global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> __global__ void TileUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_mask, top[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AveUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( TileUnpoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer); } // namespace caffe
40fd529e47686f0e99f28611178e58be53918704.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" //#include "caffe/layers/unpooling_layer.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void MaxUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data, const Dtype* bottom_mask) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_data += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; top_data[mask_index] = bottom_data[index]; } else { top_data[unpooled_index] = bottom_data[index]; } } } template <typename Dtype> __global__ void AveUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); distval += bottom_data[ph * width + pw] / pool_size; } } top_data[index] = distval; } } template <typename Dtype> __global__ void TileUnpoolForward(const int nthreads, const Dtype* bottom_data, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int w = index % unpooled_width + pad_w; int h = (index / unpooled_width) % unpooled_height + pad_h; int c = (index / unpooled_width / unpooled_height) % channels; int n = index / unpooled_width / unpooled_height / channels; int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; int phend = min(h / stride_h + 1, height); int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; int pwend = min(w / stride_w + 1, width); Dtype distval = 0; bottom_data += (n * channels + c) * height * width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); distval += bottom_data[ph * width + pw]; } } top_data[index] = distval; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); int count = bottom[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); caffe_gpu_set(top[0]->count(), Dtype(0.), top_data); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, bottom_mask); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AveUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) TileUnpoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( top[0]->count(), bottom_data, bottom[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxUnpoolBackward(const int nthreads, const Dtype* top_diff, const Dtype* bottom_mask, const int num, const int channels, const int height, const int width, const int unpooled_height, const int unpooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int uph = max(0, min(ph * stride_h - pad_h, unpooled_height - 1)); int upw = max(0, min(pw * stride_w - pad_w, unpooled_width - 1)); int unpooled_index = uph * unpooled_width + upw; top_diff += (n * channels + c) * unpooled_height * unpooled_width; if (bottom_mask) { const int mask_index = bottom_mask[index]; bottom_diff[index] = top_diff[mask_index]; } else { bottom_diff[index] = top_diff[unpooled_index]; } } } template <typename Dtype> __global__ void AveUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> __global__ void TileUnpoolBackward(const int nthreads, const Dtype* top_diff, const int num, const int channels, const int unpooled_height, const int unpooled_width, const int height, const int width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { int pw = index % width; int ph = (index / width) % height; int c = (index / width / height) % channels; int n = index / width / height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, unpooled_height + pad_h); int wend = min(wstart + kernel_w, unpooled_width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, unpooled_height); wend = min(wend, unpooled_width); Dtype gradient = 0; top_diff += (n * channels + c) * unpooled_height * unpooled_width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { gradient += top_diff[h * unpooled_width + w]; } } bottom_diff[index] = gradient / pool_size; } } template <typename Dtype> void UnpoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll get the mask from bottom[1] if it's of size >1. const bool use_bottom_mask = bottom.size() > 1; const Dtype* bottom_mask = NULL; switch (this->layer_param_.unpooling_param().unpool()) { case UnpoolingParameter_UnpoolMethod_MAX: if (use_bottom_mask) { bottom_mask = bottom[1]->gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_mask, top[0]->num(), channels_, height_, width_, unpooled_height_, unpooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AveUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; case UnpoolingParameter_UnpoolMethod_TILE: // NOLINT_NEXT_LINE(whitespace/operators) TileUnpoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->count(), top_diff, top[0]->num(), channels_, unpooled_height_, unpooled_width_, height_, width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); break; default: LOG(FATAL) << "Unknown unpooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(UnpoolingLayer); } // namespace caffe
dd4e45946b249c74c86eabcddc6c9d5e3b18cda4.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "lerror_layer_tester_cuda.h" #include <hip/hip_runtime.h> #include "../lerror_layer.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; template<int n_type> __global__ void lerror_kernel( float * __restrict output, const float * __restrict input0, const float * __restrict input1, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float n_value, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float local_err = input0[input_offset] - input1[input_offset]; if (n_type == 1) err += fabsf(local_err); else if (n_type == 2) err += local_err * local_err; else err += __powf(fabsf(local_err), n_value); feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) err += __shfl_down(err, tx); int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) err += __shfl_down(err, tx); } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } void lerror_layer_tester_cuda::enqueue_forward_propagation( hipStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); if (n_value == 1.0F) hipLaunchKernelGGL(( lerror_kernel<1>), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id, *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], n_value, scale, entry_count); else if (n_value == 2.0F) hipLaunchKernelGGL(( lerror_kernel<2>), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id, *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], n_value, scale, entry_count); else hipLaunchKernelGGL(( lerror_kernel<-1>), dim3(dim3(input_elem_count_per_feature_map_list[0], entry_count)), dim3(threadblock_size), smem_size, stream_id, *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], n_value, scale, entry_count); } void lerror_layer_tester_cuda::tester_configured() { std::shared_ptr<const lerror_layer> layer_derived = std::dynamic_pointer_cast<const lerror_layer>(layer_schema); scale = layer_derived->scale; n_value = layer_derived->n; } int lerror_layer_tester_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
dd4e45946b249c74c86eabcddc6c9d5e3b18cda4.cu
/* * Copyright 2011-2016 Maxim Milakov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "lerror_layer_tester_cuda.h" #include <cuda_runtime.h> #include "../lerror_layer.h" namespace nnforge { namespace cuda { extern __shared__ float arr_sh[]; template<int n_type> __global__ void lerror_kernel( float * __restrict output, const float * __restrict input0, const float * __restrict input1, const float * __restrict scale_mask, int input_feature_map_count, int elem_count_per_feature_map, float n_value, float scale, int entry_count) { int feature_map_id = threadIdx.x; int neuron_id = blockIdx.x; int entry_id = blockIdx.y; int threadblock_size = blockDim.x; float err = 0.0F; int output_offset = entry_id * elem_count_per_feature_map + neuron_id; float mask = 1.0F; if (scale_mask) mask = scale_mask[output_offset]; int thread_id = threadIdx.x; if (mask != 0.0F) { int input_offset = (entry_id * input_feature_map_count + feature_map_id) * elem_count_per_feature_map + neuron_id; while (feature_map_id < input_feature_map_count) { float local_err = input0[input_offset] - input1[input_offset]; if (n_type == 1) err += fabsf(local_err); else if (n_type == 2) err += local_err * local_err; else err += __powf(fabsf(local_err), n_value); feature_map_id += threadblock_size; input_offset += threadblock_size * elem_count_per_feature_map; } int lane_id = thread_id & 31; #pragma unroll for(int tx = 16; tx > 0; tx >>= 1) err += __shfl_down(err, tx); int warp_count = threadblock_size >> 5; if (warp_count > 1) { if (lane_id == 0) arr_sh[thread_id >> 5] = err; __syncthreads(); if (thread_id < 32) { err = 0.0F; if (thread_id < warp_count) err = arr_sh[thread_id]; #pragma unroll for(int tx = 4; tx > 0; tx >>= 1) err += __shfl_down(err, tx); } } } if (thread_id == 0) output[output_offset] = err * (mask * scale); } void lerror_layer_tester_cuda::enqueue_forward_propagation( cudaStream_t stream_id, cuda_linear_buffer_device::ptr output_buffer, const std::vector<cuda_linear_buffer_device::const_ptr>& schema_data, const std::vector<cuda_linear_buffer_device::const_ptr>& data, const std::vector<cuda_linear_buffer_device::const_ptr>& data_custom, const std::vector<cuda_linear_buffer_device::const_ptr>& input_buffers, const std::vector<cuda_linear_buffer_device::const_ptr>& persistent_working_data, cuda_linear_buffer_device::ptr temporary_working_fixed_buffer, cuda_linear_buffer_device::ptr temporary_working_per_entry_buffer, unsigned int entry_count) { int threadblock_size = get_threadblock_size(input_configuration_specific_list[0].feature_map_count); const float * scale_mask = 0; if (input_buffers.size() > 2) scale_mask = *input_buffers[2]; int smem_size = ((threadblock_size + 32 - 1) / 32) * sizeof(float); if (n_value == 1.0F) lerror_kernel<1><<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>( *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], n_value, scale, entry_count); else if (n_value == 2.0F) lerror_kernel<2><<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>( *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], n_value, scale, entry_count); else lerror_kernel<-1><<<dim3(input_elem_count_per_feature_map_list[0], entry_count), threadblock_size, smem_size, stream_id>>>( *output_buffer, *input_buffers[0], *input_buffers[1], scale_mask, input_configuration_specific_list[0].feature_map_count, input_elem_count_per_feature_map_list[0], n_value, scale, entry_count); } void lerror_layer_tester_cuda::tester_configured() { std::shared_ptr<const lerror_layer> layer_derived = std::dynamic_pointer_cast<const lerror_layer>(layer_schema); scale = layer_derived->scale; n_value = layer_derived->n; } int lerror_layer_tester_cuda::get_threadblock_size(int input_feature_map_count) { int threadblock_size; if (input_feature_map_count < 256) { threadblock_size = (input_feature_map_count + 32 - 1) / 32 * 32; } else { int threadblock_count = (input_feature_map_count + 256 - 1) / 256; threadblock_size = (input_feature_map_count + threadblock_count - 1) / threadblock_count; threadblock_size = (threadblock_size + 32 - 1) / 32 * 32; } return threadblock_size; } } }
236e06f5e95818d54c572071088e287f34b8163f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void orcu_kernel32303(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [64]; param N[] = [64]; param P[] = [64]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [18.611499999999999, 18.5871, 18.591100000000001, 18.608799999999999, 18.550799999999999] Tuned for specific problem sizes: DOF = 16 M = 64 N = 64 NOS = 7 P = 64 Best performance parameters: BC = 28 PL = 32 TC = 896 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { hipDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=896; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=28; /*allocate device memory*/ hipMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); hipMalloc(&dev_x,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_y,M *N *P *DOF*sizeof(double)); hipMalloc(&dev_offsets,NOS*sizeof(int)); hipDeviceSetCacheConfig(hipFuncCachePreferEqual); /*copy data from host to device*/ hipEventRecord(tstart,0); hipMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),hipMemcpyHostToDevice); hipMemcpy(dev_offsets,offsets,NOS*sizeof(int),hipMemcpyHostToDevice); hipEventRecord(tstop,0); hipEventSynchronize(tstop); hipEventElapsedTime(&orcu_transfer,tstart,tstop); hipEventRecord(start,0); /*invoke device kernel*/ hipLaunchKernelGGL(( orcu_kernel32303), dim3(dimGrid),dim3(dimBlock), 0, 0, nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ hipMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),hipMemcpyDeviceToHost); hipDeviceSetCacheConfig(hipFuncCachePreferNone); /*free allocated memory*/ hipFree(dev_A); hipFree(dev_x); hipFree(dev_y); hipFree(dev_offsets); hipError_t err=hipGetLastError(); if (hipSuccess!=err) printf("CUDA runtime error: %s@",hipGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
236e06f5e95818d54c572071088e287f34b8163f.cu
__global__ void orcu_kernel32303(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) { const int tid=blockIdx.x*blockDim.x+threadIdx.x; const int gsize=gridDim.x*blockDim.x; double ysum; int j, k, col, row; for (int i=tid; i<=nrows-1; i+=gsize) { { ysum=0.0; for (j=0; j<=ndiags-1; j++ ) { row=i+j*sbdiag; col=(floor((float)i/ndofs)+offsets[j])*ndofs; if (col>=0&&col<nrows) for (k=0; k<=ndofs-1; k++ ) ysum=ysum+A[row+k*nrows]*x[col+k]; } y[i]=ysum; } } } void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) { register int i,j,k; int col,row; double ysum; /*@ begin PerfTuning ( def performance_params { param TC[] = range(32,1025,32); param BC[] = range(14,105,14); param PL[] = [16,32,48]; } def input_params { param M[] = [64]; param N[] = [64]; param P[] = [64]; param NOS = 7; param DOF[] = range(1,17); constraint c1 = (M==N); constraint c2 = (N==P); } def input_vars { decl dynamic double A[M*N*P*DOF*DOF*NOS] = random; decl dynamic double x[M*N*P*DOF] = random; decl dynamic double y[M*N*P*DOF] = 0; decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF}; } ) @*/ /**-- (Generated by Orio) Best performance cost: [18.611499999999999, 18.5871, 18.591100000000001, 18.608799999999999, 18.550799999999999] Tuned for specific problem sizes: DOF = 16 M = 64 N = 64 NOS = 7 P = 64 Best performance parameters: BC = 28 PL = 32 TC = 896 --**/ int nrows=M*N*P*DOF; int ndiags=NOS; int ndofs=DOF; int sbdiag=M*N*P*DOF*DOF; /*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL) for(i=0; i<=nrows-1; i++){ ysum = 0.0; for(j=0; j<=ndiags-1; j++){ row = i+j*sbdiag; col = (floor((float)i/ndofs)+offsets[j])*ndofs; if(col>=0&&col<nrows) for(k=0; k<=ndofs-1; k++) ysum += A[row+k*nrows] * x[col+k]; } y[i] = ysum; } ) @*/ { cudaDeviceSynchronize(); /*declare variables*/ double *dev_A, *dev_x, *dev_y; int *dev_offsets; int nthreads=896; /*calculate device dimensions*/ dim3 dimGrid, dimBlock; dimBlock.x=nthreads; dimGrid.x=28; /*allocate device memory*/ cudaMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double)); cudaMalloc(&dev_x,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_y,M *N *P *DOF*sizeof(double)); cudaMalloc(&dev_offsets,NOS*sizeof(int)); cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual); /*copy data from host to device*/ cudaEventRecord(tstart,0); cudaMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dev_offsets,offsets,NOS*sizeof(int),cudaMemcpyHostToDevice); cudaEventRecord(tstop,0); cudaEventSynchronize(tstop); cudaEventElapsedTime(&orcu_transfer,tstart,tstop); cudaEventRecord(start,0); /*invoke device kernel*/ orcu_kernel32303<<<dimGrid,dimBlock>>>(nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&orcu_elapsed,start,stop); /*copy data from device to host*/ cudaMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),cudaMemcpyDeviceToHost); cudaDeviceSetCacheConfig(cudaFuncCachePreferNone); /*free allocated memory*/ cudaFree(dev_A); cudaFree(dev_x); cudaFree(dev_y); cudaFree(dev_offsets); cudaError_t err=cudaGetLastError(); if (cudaSuccess!=err) printf("CUDA runtime error: %s@",cudaGetErrorString(err)); } /*@ end @*/ /*@ end @*/ }
85d63da8f7222c09f3443ef3a1c5a2286481305c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_add_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" #include "paddle/fluid/operators/reduce_ops/reduce_functor_op.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> class ElementwiseAddKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { std::vector<const framework::Tensor*> ins; std::vector<framework::Tensor*> outs; const auto& cuda_ctx = ctx.template device_context<platform::CUDADeviceContext>(); int axis = PackTensorsIntoVector<T>(ctx, &ins, &outs); LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>( cuda_ctx, ins, &outs, axis, AddFunctor<T>()); } }; template <typename T> static __global__ void SimpleElemwiseAddGradCUDAKernel( const T* __restrict__ dout, int size, int vec_size, T* dx, T* dy) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; int loop = size / vec_size; int remainder = size % vec_size; const float4* dout_vec = reinterpret_cast<const float4*>(dout); float4* dx_vec = reinterpret_cast<float4*>(dx); float4* dy_vec = reinterpret_cast<float4*>(dy); float4 tmp_loop; for (int i = tid; i < loop; i += stride) { tmp_loop = dout_vec[i]; dx_vec[i] = tmp_loop; dy_vec[i] = tmp_loop; } if (tid == loop && remainder != 0) { T tmp_rem; while (remainder) { int idx = size - remainder; remainder--; tmp_rem = dout[idx]; dx[idx] = tmp_rem; dy[idx] = tmp_rem; } } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, platform::CUDADeviceContext>::value>::type default_elementwise_add_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { int axis = ctx.Attr<int>("axis"); auto* dout_data = dout->data<T>(); // dx if (dx != nullptr) { auto* dx_data = dx->mutable_data<T>(ctx.GetPlace()); if (dx->dims() == dout->dims()) { if (dx_data != dout_data) { framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dx); } } else { // For inplace strategy, dx will be stored in addr of dout, which makes // the result of dy wrong. if (dx->IsSharedBufferWith(*dout)) { dx->clear(); dx->mutable_data<T>(x->dims(), ctx.GetPlace()); } std::vector<int> reduce_dims = GetReduceDim(x->dims(), out->dims(), axis); gpuStream_t stream = ctx.cuda_device_context().stream(); TensorReduceFunctorImpl<T, T, CustomSum>(*dout, dx, reduce_dims, stream); } } // dy if (dy != nullptr) { auto* dy_data = dy->mutable_data<T>(ctx.GetPlace()); if (dy->dims() == dout->dims()) { if (dy_data != dout_data) { framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dy); } } else { std::vector<int> reduce_dims = GetReduceDim(y->dims(), out->dims(), axis); gpuStream_t stream = ctx.cuda_device_context().stream(); TensorReduceFunctorImpl<T, T, CustomSum>(*dout, dy, reduce_dims, stream); } } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_add_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { auto* dx_data = dx->mutable_data<T>(ctx.GetPlace()); auto* dy_data = dy->mutable_data<T>(ctx.GetPlace()); auto* dout_data = dout->data<T>(); if (dx_data == dout_data && dy_data != dout_data) { VLOG(4) << "Special case when dx_data is the same as dout_data, " "only need copy dout to dy"; framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dy); } else if (dx_data != dout_data && dy_data == dout_data) { VLOG(4) << "Special case when dy_data is the same as dout_data, " "only need copy dout to dx"; framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dx); } else if (dx_data != dout_data && dy_data != dout_data) { auto size = x->numel(); int vec_size = max(static_cast<int>(sizeof(float4) / sizeof(T)), 1); dim3 block_size = dim3(ELEMENTWISE_BLOCK_SIZE, 1); dim3 grid_size = dim3(((size + vec_size - 1) / vec_size + ELEMENTWISE_BLOCK_SIZE - 1) / ELEMENTWISE_BLOCK_SIZE, 1); hipLaunchKernelGGL(( SimpleElemwiseAddGradCUDAKernel< T>), dim3(grid_size), dim3(block_size), 0, ctx.template device_context<plat::CUDADeviceContext>().stream(), dout->data<T>(), size, vec_size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } else { VLOG(4) << "Special case when dy_data is the same as dout_data, " "and dx_data is the same as dout_data, do not need " "any operator"; } } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<double>>); REGISTER_OP_CUDA_KERNEL( elementwise_add_grad, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex<double>>); REGISTER_OP_CUDA_KERNEL( elementwise_add_grad_grad, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::complex<double>>); REGISTER_OP_CUDA_KERNEL( grad_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<double>>);
85d63da8f7222c09f3443ef3a1c5a2286481305c.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise/elementwise_add_op.h" #include "paddle/fluid/operators/elementwise/elementwise_op_broadcast.cu.h" #include "paddle/fluid/operators/reduce_ops/reduce_functor_op.h" #include "paddle/fluid/operators/reduce_ops/reduce_op.cu.h" #include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/float16.h" namespace ops = paddle::operators; namespace plat = paddle::platform; namespace paddle { namespace operators { template <typename T> class ElementwiseAddKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { std::vector<const framework::Tensor*> ins; std::vector<framework::Tensor*> outs; const auto& cuda_ctx = ctx.template device_context<platform::CUDADeviceContext>(); int axis = PackTensorsIntoVector<T>(ctx, &ins, &outs); LaunchElementwiseCudaKernel<ElementwiseType::kBinary, T, T>( cuda_ctx, ins, &outs, axis, AddFunctor<T>()); } }; template <typename T> static __global__ void SimpleElemwiseAddGradCUDAKernel( const T* __restrict__ dout, int size, int vec_size, T* dx, T* dy) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; int loop = size / vec_size; int remainder = size % vec_size; const float4* dout_vec = reinterpret_cast<const float4*>(dout); float4* dx_vec = reinterpret_cast<float4*>(dx); float4* dy_vec = reinterpret_cast<float4*>(dy); float4 tmp_loop; for (int i = tid; i < loop; i += stride) { tmp_loop = dout_vec[i]; dx_vec[i] = tmp_loop; dy_vec[i] = tmp_loop; } if (tid == loop && remainder != 0) { T tmp_rem; while (remainder) { int idx = size - remainder; remainder--; tmp_rem = dout[idx]; dx[idx] = tmp_rem; dy[idx] = tmp_rem; } } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, platform::CUDADeviceContext>::value>::type default_elementwise_add_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { int axis = ctx.Attr<int>("axis"); auto* dout_data = dout->data<T>(); // dx if (dx != nullptr) { auto* dx_data = dx->mutable_data<T>(ctx.GetPlace()); if (dx->dims() == dout->dims()) { if (dx_data != dout_data) { framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dx); } } else { // For inplace strategy, dx will be stored in addr of dout, which makes // the result of dy wrong. if (dx->IsSharedBufferWith(*dout)) { dx->clear(); dx->mutable_data<T>(x->dims(), ctx.GetPlace()); } std::vector<int> reduce_dims = GetReduceDim(x->dims(), out->dims(), axis); gpuStream_t stream = ctx.cuda_device_context().stream(); TensorReduceFunctorImpl<T, T, CustomSum>(*dout, dx, reduce_dims, stream); } } // dy if (dy != nullptr) { auto* dy_data = dy->mutable_data<T>(ctx.GetPlace()); if (dy->dims() == dout->dims()) { if (dy_data != dout_data) { framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dy); } } else { std::vector<int> reduce_dims = GetReduceDim(y->dims(), out->dims(), axis); gpuStream_t stream = ctx.cuda_device_context().stream(); TensorReduceFunctorImpl<T, T, CustomSum>(*dout, dy, reduce_dims, stream); } } } template <typename DeviceContext, typename T> typename std::enable_if< std::is_same<DeviceContext, plat::CUDADeviceContext>::value>::type elementwise_add_grad(const framework::ExecutionContext& ctx, const framework::Tensor* x, const framework::Tensor* y, const framework::Tensor* out, const framework::Tensor* dout, framework::Tensor* dx, framework::Tensor* dy) { auto* dx_data = dx->mutable_data<T>(ctx.GetPlace()); auto* dy_data = dy->mutable_data<T>(ctx.GetPlace()); auto* dout_data = dout->data<T>(); if (dx_data == dout_data && dy_data != dout_data) { VLOG(4) << "Special case when dx_data is the same as dout_data, " "only need copy dout to dy"; framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dy); } else if (dx_data != dout_data && dy_data == dout_data) { VLOG(4) << "Special case when dy_data is the same as dout_data, " "only need copy dout to dx"; framework::TensorCopy( *dout, ctx.GetPlace(), ctx.template device_context<platform::DeviceContext>(), dx); } else if (dx_data != dout_data && dy_data != dout_data) { auto size = x->numel(); int vec_size = max(static_cast<int>(sizeof(float4) / sizeof(T)), 1); dim3 block_size = dim3(ELEMENTWISE_BLOCK_SIZE, 1); dim3 grid_size = dim3(((size + vec_size - 1) / vec_size + ELEMENTWISE_BLOCK_SIZE - 1) / ELEMENTWISE_BLOCK_SIZE, 1); SimpleElemwiseAddGradCUDAKernel< T><<<grid_size, block_size, 0, ctx.template device_context<plat::CUDADeviceContext>().stream()>>>( dout->data<T>(), size, vec_size, dx->mutable_data<T>(ctx.GetPlace()), dy->mutable_data<T>(ctx.GetPlace())); } else { VLOG(4) << "Special case when dy_data is the same as dout_data, " "and dx_data is the same as dout_data, do not need " "any operator"; } } } // namespace operators } // namespace paddle REGISTER_OP_CUDA_KERNEL( elementwise_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<double>>); REGISTER_OP_CUDA_KERNEL( elementwise_add_grad, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddGradKernel<plat::CUDADeviceContext, plat::complex<double>>); REGISTER_OP_CUDA_KERNEL( elementwise_add_grad_grad, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddDoubleGradKernel<plat::CUDADeviceContext, plat::complex<double>>); REGISTER_OP_CUDA_KERNEL( grad_add, ops::ElementwiseAddKernel<plat::CUDADeviceContext, float>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, double>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, int64_t>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::float16>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<float>>, ops::ElementwiseAddKernel<plat::CUDADeviceContext, plat::complex<double>>);
dda4cc88782a7d882cca67b4dae08d24feee6d40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> __constant__ int inc; __device__ int sum; __global__ void atomicAdd() { int s = atomicAdd(&sum, inc); assert((s - 1) % inc == 0); if (threadIdx.x == 0) { printf("blockIdx.x = %d, sum = %d\n", blockIdx.x, s); } } int main(int argc, char *argv[]) { // Initialize inc and sum. int h_inc = 3; int h_sum = 1; // Copy inc and sum from host memory to device memory synchronously. hipMemcpyToSymbol(inc, &h_inc, sizeof(int)); hipMemcpyToSymbol(sum, &h_sum, sizeof(int)); // Invoke the kernel on device asynchronously. hipLaunchKernelGGL(( atomicAdd), dim3(2), dim3(2), 0, 0, ); // Copy sum from device memory to host memory synchronously. hipMemcpyFromSymbol(&h_sum, sum, sizeof(int)); // Print the result. printf("sum = %d\n", h_sum); // Cleanup. hipDeviceReset(); }
dda4cc88782a7d882cca67b4dae08d24feee6d40.cu
#include <stdio.h> #include <assert.h> __constant__ int inc; __device__ int sum; __global__ void atomicAdd() { int s = atomicAdd(&sum, inc); assert((s - 1) % inc == 0); if (threadIdx.x == 0) { printf("blockIdx.x = %d, sum = %d\n", blockIdx.x, s); } } int main(int argc, char *argv[]) { // Initialize inc and sum. int h_inc = 3; int h_sum = 1; // Copy inc and sum from host memory to device memory synchronously. cudaMemcpyToSymbol(inc, &h_inc, sizeof(int)); cudaMemcpyToSymbol(sum, &h_sum, sizeof(int)); // Invoke the kernel on device asynchronously. atomicAdd<<<2, 2>>>(); // Copy sum from device memory to host memory synchronously. cudaMemcpyFromSymbol(&h_sum, sum, sizeof(int)); // Print the result. printf("sum = %d\n", h_sum); // Cleanup. cudaDeviceReset(); }
6e5854e27152cb75ea7c242fafde930864550c41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This example demonstrates a block-wise inclusive // parallel prefix sum (scan) algorithm. #include <stdlib.h> #include <stdio.h> #include <vector> #include <iostream> // This kernel computes, per-block, a block-sized scan // of the input. It assumes that the block size evenly // divides the input size __global__ void inclusive_scan(const unsigned int *input, unsigned int *result) { extern __shared__ unsigned int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory unsigned int sum = input[i]; sdata[threadIdx.x] = sum; __syncthreads(); for(int offset = 1; offset < blockDim.x; offset <<= 1) { if(threadIdx.x >= offset) { sum += sdata[threadIdx.x - offset]; } // wait until every thread has updated its partial sum __syncthreads(); // write my partial sum sdata[threadIdx.x] = sum; // wait until every thread has written its partial sum __syncthreads(); } // we're done! each thread writes out its result result[i] = sdata[threadIdx.x]; } int main(void) { // use small input sizes for illustrative purposes const int num_blocks = 4; const int block_size = 16; const int num_elements = num_blocks * block_size; // generate random input in [0,5] on the host std::vector<unsigned int> h_input(num_elements); for(unsigned int i = 0; i < num_elements; ++i) { h_input[i] = rand() % 6; } // copy input to device memory unsigned int *d_input = 0; hipMalloc((void**)&d_input, sizeof(unsigned int) * num_elements); hipMemcpy(d_input, &h_input[0], sizeof(unsigned int) * num_elements, hipMemcpyHostToDevice); // allocate space for the result unsigned int *d_result = 0; hipMalloc((void**)&d_result, sizeof(unsigned int) * num_elements); hipLaunchKernelGGL(( inclusive_scan), dim3(num_blocks), dim3(block_size), block_size * sizeof(unsigned int), 0, d_input, d_result); // copy result to host memory std::vector<unsigned int> h_result(num_elements); hipMemcpy(&h_result[0], d_result, sizeof(unsigned int) * num_elements, hipMemcpyDeviceToHost); // print out the results for(int b = 0; b < num_blocks; ++b) { std::cout << "Block " << b << std::endl << std::endl; std::cout << "Input: " << std::endl; for(int i = 0; i < block_size; ++i) { printf("%2d ", h_input[b * block_size + i]); } std::cout << std::endl; std::cout << "Result: " << std::endl; for(int i = 0; i < block_size; ++i) { printf("%2d ", h_result[b * block_size + i]); } std::cout << std::endl << std::endl << std::endl; } return 0; }
6e5854e27152cb75ea7c242fafde930864550c41.cu
// This example demonstrates a block-wise inclusive // parallel prefix sum (scan) algorithm. #include <stdlib.h> #include <stdio.h> #include <vector> #include <iostream> // This kernel computes, per-block, a block-sized scan // of the input. It assumes that the block size evenly // divides the input size __global__ void inclusive_scan(const unsigned int *input, unsigned int *result) { extern __shared__ unsigned int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory unsigned int sum = input[i]; sdata[threadIdx.x] = sum; __syncthreads(); for(int offset = 1; offset < blockDim.x; offset <<= 1) { if(threadIdx.x >= offset) { sum += sdata[threadIdx.x - offset]; } // wait until every thread has updated its partial sum __syncthreads(); // write my partial sum sdata[threadIdx.x] = sum; // wait until every thread has written its partial sum __syncthreads(); } // we're done! each thread writes out its result result[i] = sdata[threadIdx.x]; } int main(void) { // use small input sizes for illustrative purposes const int num_blocks = 4; const int block_size = 16; const int num_elements = num_blocks * block_size; // generate random input in [0,5] on the host std::vector<unsigned int> h_input(num_elements); for(unsigned int i = 0; i < num_elements; ++i) { h_input[i] = rand() % 6; } // copy input to device memory unsigned int *d_input = 0; cudaMalloc((void**)&d_input, sizeof(unsigned int) * num_elements); cudaMemcpy(d_input, &h_input[0], sizeof(unsigned int) * num_elements, cudaMemcpyHostToDevice); // allocate space for the result unsigned int *d_result = 0; cudaMalloc((void**)&d_result, sizeof(unsigned int) * num_elements); inclusive_scan<<<num_blocks, block_size, block_size * sizeof(unsigned int)>>>(d_input, d_result); // copy result to host memory std::vector<unsigned int> h_result(num_elements); cudaMemcpy(&h_result[0], d_result, sizeof(unsigned int) * num_elements, cudaMemcpyDeviceToHost); // print out the results for(int b = 0; b < num_blocks; ++b) { std::cout << "Block " << b << std::endl << std::endl; std::cout << "Input: " << std::endl; for(int i = 0; i < block_size; ++i) { printf("%2d ", h_input[b * block_size + i]); } std::cout << std::endl; std::cout << "Result: " << std::endl; for(int i = 0; i < block_size; ++i) { printf("%2d ", h_result[b * block_size + i]); } std::cout << std::endl << std::endl << std::endl; } return 0; }
3a5c79e8f93bd8eb0db65a3331cc1670854a6801.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <scalar.h> //scalar and current element __device__ float op(float d1,float d2,float *params) { return d2 - d1; } extern "C" __global__ void rdiv_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) { transform(n,idx,dx,dy,incy,params,result); }
3a5c79e8f93bd8eb0db65a3331cc1670854a6801.cu
#include <scalar.h> //scalar and current element __device__ float op(float d1,float d2,float *params) { return d2 - d1; } extern "C" __global__ void rdiv_scalar_float(int n, int idx,float dx,float *dy,int incy,float *params,float *result) { transform(n,idx,dx,dy,incy,params,result); }
64693b5773eb38b00681cb156d724fa0f221c4ab.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/manifold/umapparams.h> #include <cuml/manifold/common.hpp> #include <cuml/manifold/umap.hpp> #include "runner.cuh" #include <raft/cuda_utils.cuh> #include <iostream> namespace ML { namespace UMAP { static const int TPB_X = 256; /** * \brief Dense transform * * @param X: pointer to input array * @param n: n_samples of input array * @param d: n_features of input array * @param orig_X: self.X_m.ptr * @param orig_n: self.n_rows * @param embedding: pointer to stored embedding * @param embedding_n: n_samples in embedding, equals to orig_n * @param transformed: output array with shape n * n_components */ void transform(const raft::handle_t &handle, float *X, int n, int d, knn_indices_dense_t *knn_indices, float *knn_dists, float *orig_X, int orig_n, float *embedding, int embedding_n, UMAPParams *params, float *transformed) { if (knn_indices != nullptr && knn_dists != nullptr) { manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float> inputs( knn_indices, knn_dists, X, nullptr, n, d, params->n_neighbors); UMAPAlgo::_transform< knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>( handle, inputs, inputs, embedding, embedding_n, params, transformed); } else { manifold_dense_inputs_t<float> inputs(X, nullptr, n, d); manifold_dense_inputs_t<float> orig_inputs(orig_X, nullptr, orig_n, d); UMAPAlgo::_transform<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, orig_inputs, embedding, embedding_n, params, transformed); } } // Sparse transform void transform_sparse(const raft::handle_t &handle, int *indptr, int *indices, float *data, size_t nnz, int n, int d, int *orig_x_indptr, int *orig_x_indices, float *orig_x_data, size_t orig_nnz, int orig_n, float *embedding, int embedding_n, UMAPParams *params, float *transformed) { manifold_sparse_inputs_t<knn_indices_sparse_t, float> inputs( indptr, indices, data, nullptr, nnz, n, d); manifold_sparse_inputs_t<knn_indices_sparse_t, float> orig_x_inputs( orig_x_indptr, orig_x_indices, orig_x_data, nullptr, orig_nnz, orig_n, d); UMAPAlgo::_transform<knn_indices_sparse_t, float, manifold_sparse_inputs_t<int, float>, TPB_X>( handle, inputs, orig_x_inputs, embedding, embedding_n, params, transformed); } // Dense fit void fit(const raft::handle_t &handle, float *X, // input matrix float *y, // labels int n, int d, knn_indices_dense_t *knn_indices, float *knn_dists, UMAPParams *params, float *embeddings) { if (knn_indices != nullptr && knn_dists != nullptr) { CUML_LOG_DEBUG("Calling UMAP::fit() with precomputed KNN"); manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float> inputs( knn_indices, knn_dists, X, y, n, d, params->n_neighbors); if (y != nullptr) { UMAPAlgo::_fit_supervised< knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>( handle, inputs, params, embeddings); } else { UMAPAlgo::_fit< knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>( handle, inputs, params, embeddings); } } else { manifold_dense_inputs_t<float> inputs(X, y, n, d); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, params, embeddings); } else { UMAPAlgo::_fit<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>(handle, inputs, params, embeddings); } } } // Sparse fit void fit_sparse(const raft::handle_t &handle, int *indptr, // input matrix int *indices, float *data, size_t nnz, float *y, int n, // rows int d, // cols UMAPParams *params, float *embeddings) { manifold_sparse_inputs_t<int, float> inputs(indptr, indices, data, y, nnz, n, d); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_sparse_t, float, manifold_sparse_inputs_t<int, float>, TPB_X>( handle, inputs, params, embeddings); } else { UMAPAlgo::_fit<knn_indices_sparse_t, float, manifold_sparse_inputs_t<int, float>, TPB_X>( handle, inputs, params, embeddings); } } void find_ab(const raft::handle_t &handle, UMAPParams *params) { hipStream_t stream = handle.get_stream(); auto d_alloc = handle.get_device_allocator(); UMAPAlgo::find_ab(params, d_alloc, stream); } } // namespace UMAP } // namespace ML
64693b5773eb38b00681cb156d724fa0f221c4ab.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/manifold/umapparams.h> #include <cuml/manifold/common.hpp> #include <cuml/manifold/umap.hpp> #include "runner.cuh" #include <raft/cuda_utils.cuh> #include <iostream> namespace ML { namespace UMAP { static const int TPB_X = 256; /** * \brief Dense transform * * @param X: pointer to input array * @param n: n_samples of input array * @param d: n_features of input array * @param orig_X: self.X_m.ptr * @param orig_n: self.n_rows * @param embedding: pointer to stored embedding * @param embedding_n: n_samples in embedding, equals to orig_n * @param transformed: output array with shape n * n_components */ void transform(const raft::handle_t &handle, float *X, int n, int d, knn_indices_dense_t *knn_indices, float *knn_dists, float *orig_X, int orig_n, float *embedding, int embedding_n, UMAPParams *params, float *transformed) { if (knn_indices != nullptr && knn_dists != nullptr) { manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float> inputs( knn_indices, knn_dists, X, nullptr, n, d, params->n_neighbors); UMAPAlgo::_transform< knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>( handle, inputs, inputs, embedding, embedding_n, params, transformed); } else { manifold_dense_inputs_t<float> inputs(X, nullptr, n, d); manifold_dense_inputs_t<float> orig_inputs(orig_X, nullptr, orig_n, d); UMAPAlgo::_transform<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, orig_inputs, embedding, embedding_n, params, transformed); } } // Sparse transform void transform_sparse(const raft::handle_t &handle, int *indptr, int *indices, float *data, size_t nnz, int n, int d, int *orig_x_indptr, int *orig_x_indices, float *orig_x_data, size_t orig_nnz, int orig_n, float *embedding, int embedding_n, UMAPParams *params, float *transformed) { manifold_sparse_inputs_t<knn_indices_sparse_t, float> inputs( indptr, indices, data, nullptr, nnz, n, d); manifold_sparse_inputs_t<knn_indices_sparse_t, float> orig_x_inputs( orig_x_indptr, orig_x_indices, orig_x_data, nullptr, orig_nnz, orig_n, d); UMAPAlgo::_transform<knn_indices_sparse_t, float, manifold_sparse_inputs_t<int, float>, TPB_X>( handle, inputs, orig_x_inputs, embedding, embedding_n, params, transformed); } // Dense fit void fit(const raft::handle_t &handle, float *X, // input matrix float *y, // labels int n, int d, knn_indices_dense_t *knn_indices, float *knn_dists, UMAPParams *params, float *embeddings) { if (knn_indices != nullptr && knn_dists != nullptr) { CUML_LOG_DEBUG("Calling UMAP::fit() with precomputed KNN"); manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float> inputs( knn_indices, knn_dists, X, y, n, d, params->n_neighbors); if (y != nullptr) { UMAPAlgo::_fit_supervised< knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>( handle, inputs, params, embeddings); } else { UMAPAlgo::_fit< knn_indices_dense_t, float, manifold_precomputed_knn_inputs_t<knn_indices_dense_t, float>, TPB_X>( handle, inputs, params, embeddings); } } else { manifold_dense_inputs_t<float> inputs(X, y, n, d); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>( handle, inputs, params, embeddings); } else { UMAPAlgo::_fit<knn_indices_dense_t, float, manifold_dense_inputs_t<float>, TPB_X>(handle, inputs, params, embeddings); } } } // Sparse fit void fit_sparse(const raft::handle_t &handle, int *indptr, // input matrix int *indices, float *data, size_t nnz, float *y, int n, // rows int d, // cols UMAPParams *params, float *embeddings) { manifold_sparse_inputs_t<int, float> inputs(indptr, indices, data, y, nnz, n, d); if (y != nullptr) { UMAPAlgo::_fit_supervised<knn_indices_sparse_t, float, manifold_sparse_inputs_t<int, float>, TPB_X>( handle, inputs, params, embeddings); } else { UMAPAlgo::_fit<knn_indices_sparse_t, float, manifold_sparse_inputs_t<int, float>, TPB_X>( handle, inputs, params, embeddings); } } void find_ab(const raft::handle_t &handle, UMAPParams *params) { cudaStream_t stream = handle.get_stream(); auto d_alloc = handle.get_device_allocator(); UMAPAlgo::find_ab(params, d_alloc, stream); } } // namespace UMAP } // namespace ML
8245a62b08971bd44a3af96f32ad039d49710e2a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Transformer function helper function. Written by tomztyang, 2021/08/23 */ #include <math.h> #include <stdio.h> #define THREADS_PER_BLOCK 256 #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG template <unsigned int d> __global__ void rpe_q_forward_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *output) { // dim3 blocks(total_query_num, nhead); dim3 threads(local_size); // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params output: [total_query_num, local_size, nhead] int query_idx = blockIdx.x; int head_idx = blockIdx.y; int local_key_idx = threadIdx.x; if (query_idx >= total_query_num || head_idx >= nhead || local_key_idx >= local_size) return; // get query features for attention computation. __shared__ float shared_query_features[d]; for(int i = local_key_idx; i < hdim; i += blockDim.x){ shared_query_features[i] = query_features[ query_idx * nhead * hdim + head_idx * hdim + i]; } __syncthreads(); // 1. obtain quantize relative position. relpos += query_idx * local_size + local_key_idx; int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1); lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim; output += query_idx * local_size * nhead + local_key_idx * nhead + head_idx; float attn_weight = 0; for (int i = 0; i < hdim; i++){ attn_weight += shared_query_features[i] * lookup_table[i]; } output[0] = attn_weight; } void rpe_q_launcher_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *output){ // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params output: [total_query_num, local_size, nhead] if (hdim > 100){ throw "hdim should be <= 100."; } dim3 blocks(total_query_num, nhead); dim3 threads(local_size); switch (hdim){ case 4: hipLaunchKernelGGL(( rpe_q_forward_v2<4>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; case 8: hipLaunchKernelGGL(( rpe_q_forward_v2<8>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; case 16: hipLaunchKernelGGL(( rpe_q_forward_v2<16>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; case 32: hipLaunchKernelGGL(( rpe_q_forward_v2<32>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; default: hipLaunchKernelGGL(( rpe_q_forward_v2<100>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; } } template <unsigned int d> __global__ void rpe_q_backward_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *grad_out, float * grad_lookup_table, float * grad_query_features) { // dim3 blocks(total_query_num, nhead); dim3 blocks(local_size); // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params grad_out: [total_query_num, local_size, nhead] // params grad_lookup_table: [l, nhead, hdim] // params grad_query_features: [total_query_num, nhead, hdim] int query_idx = blockIdx.x; int head_idx = blockIdx.y; int local_key_idx = threadIdx.x; // out-range judgement. if (query_idx >= total_query_num || head_idx >= nhead || local_key_idx >= local_size) return; // get shared query features and shared grad query features. __shared__ float shared_query_features[d], shared_grad_query_features[d]; for (int i = local_key_idx; i < hdim; i += blockDim.x){ shared_query_features[i] = query_features[ query_idx * nhead * hdim + head_idx * hdim + i]; shared_grad_query_features[i] = 0; } __syncthreads(); // 2. Obtain quantize relative position. relpos += query_idx * local_size + local_key_idx; int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1); lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim; grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim; grad_query_features += query_idx * nhead * hdim + head_idx * hdim; float gradient = grad_out[query_idx * local_size * nhead + local_key_idx * nhead + head_idx]; for (int i = 0; i < hdim; i++){ atomicAdd( grad_lookup_table + i, gradient * shared_query_features[i]); atomicAdd( shared_grad_query_features + i, gradient * lookup_table[i]); } __syncthreads(); for (int i = local_key_idx; i < hdim; i += blockDim.x){ grad_query_features[i] = shared_grad_query_features[i]; } } void rpe_q_grad_launcher_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *grad_out, float* grad_lookup_table, float* grad_query_features){ // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params grad_out: [total_query_num, local_size, nhead] // params grad_lookup_table: [l, nhead, hdim] // params grad_query_features: [total_query_num, nhead, hdim] if (hdim > 100){ throw "hdim should be <= 100."; } dim3 blocks(total_query_num, nhead); dim3 threads(local_size); switch (hdim){ case 4: hipLaunchKernelGGL(( rpe_q_backward_v2<4>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; case 8: hipLaunchKernelGGL(( rpe_q_backward_v2<8>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; case 16: hipLaunchKernelGGL(( rpe_q_backward_v2<16>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; case 32: hipLaunchKernelGGL(( rpe_q_backward_v2<32>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; default: hipLaunchKernelGGL(( rpe_q_backward_v2<100>), dim3(blocks), dim3(threads), 0, 0, b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; } }
8245a62b08971bd44a3af96f32ad039d49710e2a.cu
/* Transformer function helper function. Written by tomztyang, 2021/08/23 */ #include <math.h> #include <stdio.h> #define THREADS_PER_BLOCK 256 #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG template <unsigned int d> __global__ void rpe_q_forward_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *output) { // dim3 blocks(total_query_num, nhead); dim3 threads(local_size); // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params output: [total_query_num, local_size, nhead] int query_idx = blockIdx.x; int head_idx = blockIdx.y; int local_key_idx = threadIdx.x; if (query_idx >= total_query_num || head_idx >= nhead || local_key_idx >= local_size) return; // get query features for attention computation. __shared__ float shared_query_features[d]; for(int i = local_key_idx; i < hdim; i += blockDim.x){ shared_query_features[i] = query_features[ query_idx * nhead * hdim + head_idx * hdim + i]; } __syncthreads(); // 1. obtain quantize relative position. relpos += query_idx * local_size + local_key_idx; int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1); lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim; output += query_idx * local_size * nhead + local_key_idx * nhead + head_idx; float attn_weight = 0; for (int i = 0; i < hdim; i++){ attn_weight += shared_query_features[i] * lookup_table[i]; } output[0] = attn_weight; } void rpe_q_launcher_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *output){ // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params output: [total_query_num, local_size, nhead] if (hdim > 100){ throw "hdim should be <= 100."; } dim3 blocks(total_query_num, nhead); dim3 threads(local_size); switch (hdim){ case 4: rpe_q_forward_v2<4><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; case 8: rpe_q_forward_v2<8><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; case 16: rpe_q_forward_v2<16><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; case 32: rpe_q_forward_v2<32><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; default: rpe_q_forward_v2<100><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, output); break; } } template <unsigned int d> __global__ void rpe_q_backward_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *grad_out, float * grad_lookup_table, float * grad_query_features) { // dim3 blocks(total_query_num, nhead); dim3 blocks(local_size); // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params grad_out: [total_query_num, local_size, nhead] // params grad_lookup_table: [l, nhead, hdim] // params grad_query_features: [total_query_num, nhead, hdim] int query_idx = blockIdx.x; int head_idx = blockIdx.y; int local_key_idx = threadIdx.x; // out-range judgement. if (query_idx >= total_query_num || head_idx >= nhead || local_key_idx >= local_size) return; // get shared query features and shared grad query features. __shared__ float shared_query_features[d], shared_grad_query_features[d]; for (int i = local_key_idx; i < hdim; i += blockDim.x){ shared_query_features[i] = query_features[ query_idx * nhead * hdim + head_idx * hdim + i]; shared_grad_query_features[i] = 0; } __syncthreads(); // 2. Obtain quantize relative position. relpos += query_idx * local_size + local_key_idx; int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1); lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim; grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim; grad_query_features += query_idx * nhead * hdim + head_idx * hdim; float gradient = grad_out[query_idx * local_size * nhead + local_key_idx * nhead + head_idx]; for (int i = 0; i < hdim; i++){ atomicAdd( grad_lookup_table + i, gradient * shared_query_features[i]); atomicAdd( shared_grad_query_features + i, gradient * lookup_table[i]); } __syncthreads(); for (int i = local_key_idx; i < hdim; i += blockDim.x){ grad_query_features[i] = shared_grad_query_features[i]; } } void rpe_q_grad_launcher_v2( int b, int total_query_num, int local_size, int nhead, int hdim, int l, const int *query_batch_cnt, const float *relpos, const float* lookup_table, const float* query_features, float *grad_out, float* grad_lookup_table, float* grad_query_features){ // params query_batch_cnt: [b] // params relpos: [total_query_num, local_size] // params lookup_table: [l, nhead, hdim] // params query_features: [total_query_num, nhead, hdim] // params grad_out: [total_query_num, local_size, nhead] // params grad_lookup_table: [l, nhead, hdim] // params grad_query_features: [total_query_num, nhead, hdim] if (hdim > 100){ throw "hdim should be <= 100."; } dim3 blocks(total_query_num, nhead); dim3 threads(local_size); switch (hdim){ case 4: rpe_q_backward_v2<4><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; case 8: rpe_q_backward_v2<8><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; case 16: rpe_q_backward_v2<16><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; case 32: rpe_q_backward_v2<32><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; default: rpe_q_backward_v2<100><<<blocks, threads>>>( b, total_query_num, local_size, nhead, hdim, l, query_batch_cnt, relpos, lookup_table, query_features, grad_out, grad_lookup_table, grad_query_features); break; } }
72eae68ba1d04492bc130a60b37255e64cabbc8b.hip
// !!! This is a file automatically generated by hipify!!! /* * nvbio * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define NVBIO_CUDA_DEBUG #include <hipcub/hipcub.hpp> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <crc/crc.h> #ifdef _OPENMP #include <omp.h> #endif #include <nvbio/sufsort/sufsort.h> #include <nvbio/sufsort/sufsort_utils.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/timer.h> #include <nvbio/strings/string_set.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/io/fmindex/fmindex.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/basic/dna.h> #include <nvbio/fmindex/bwt.h> #include <thrust/device_vector.h> namespace nvbio { namespace sufsort { template <uint32 SYMBOL_SIZE, typename offset_type> void make_test_string_set( const uint64 N_strings, const uint32 N, thrust::host_vector<uint32>& h_string, thrust::host_vector<offset_type>& h_offsets) { for (uint64 i = 0; i < N_strings; ++i) h_offsets[i] = offset_type( uint64(N)*i ); LCG_random rand; for (uint64 i = 0; i < h_string.size(); ++i) h_string[i] = rand.next(); h_offsets[N_strings] = N*N_strings; } struct SuffixHandler { void process( const uint32 n_suffixes, const uint32* suffix_array, const uint32* string_ids, const uint32* cum_lengths) { output.resize( n_suffixes ); thrust::copy( thrust::device_ptr<const uint32>( suffix_array ), thrust::device_ptr<const uint32>( suffix_array ) + n_suffixes, output.begin() ); } thrust::device_vector<uint32> output; }; } // namespace sufsort int sufsort_test(int argc, char* argv[]) { enum Test { kGPU_SA = 1u, kGPU_BWT = 2u, kCPU_BWT = 4u, kGPU_BWT_FUNCTIONAL = 8u, kGPU_BWT_GENOME = 16u, kGPU_BWT_SET = 32u, kCPU_BWT_SET = 64u, kGPU_SA_SET = 128u, }; uint32 TEST_MASK = 0xFFFFFFFFu; uint32 gpu_bwt_size = 50u; uint32 cpu_bwt_size = 100u; #ifdef _OPENMP uint32 threads = omp_get_num_procs(); #else uint32 threads = 1; #endif bool store_output = true; const char* index_name = "data/human.NCBI36/Homo_sapiens.NCBI36.53.dna.toplevel.fa"; BWTParams params; for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-v" ) == 0 || strcmp( argv[i], "-verbosity" ) == 0 || strcmp( argv[i], "--verbosity" ) == 0) { set_verbosity( Verbosity( atoi( argv[++i] ) ) ); } else if (strcmp( argv[i], "-cpu-mem" ) == 0) { params.host_memory = atoi( argv[++i] ) * uint64(1024u*1024u); } else if (strcmp( argv[i], "-gpu-mem" ) == 0) { params.device_memory = atoi( argv[++i] ) * uint64(1024u*1024u); } else if (strcmp( argv[i], "-cpu-bwt-size" ) == 0) { cpu_bwt_size = atoi( argv[++i] ); } else if (strcmp( argv[i], "-gpu-bwt-size" ) == 0) { gpu_bwt_size = atoi( argv[++i] ); } else if (strcmp( argv[i], "-threads" ) == 0) { threads = atoi( argv[++i] ); } else if (strcmp( argv[i], "-no-output" ) == 0) { store_output = false; } else if ((strcmp( argv[i], "-genome" ) == 0) || (strcmp( argv[i], "-index" ) == 0)) { index_name = argv[++i]; } else if (strcmp( argv[i], "-tests" ) == 0) { const std::string tests_string( argv[++i] ); char temp[256]; const char* begin = tests_string.c_str(); const char* end = begin; TEST_MASK = 0u; while (1) { while (*end != ':' && *end != '\0') { temp[end - begin] = *end; end++; } temp[end - begin] = '\0'; if (strcmp( temp, "gpu-sa" ) == 0) TEST_MASK |= kGPU_SA; else if (strcmp( temp, "gpu-bwt" ) == 0) TEST_MASK |= kGPU_BWT; else if (strcmp( temp, "gpu-bwt-func" ) == 0) TEST_MASK |= kGPU_BWT_FUNCTIONAL; else if (strcmp( temp, "gpu-bwt-genome" ) == 0) TEST_MASK |= kGPU_BWT_GENOME; else if (strcmp( temp, "cpu-bwt" ) == 0) TEST_MASK |= kCPU_BWT; else if (strcmp( temp, "gpu-set-bwt" ) == 0) TEST_MASK |= kGPU_BWT_SET; else if (strcmp( temp, "cpu-set-bwt" ) == 0) TEST_MASK |= kCPU_BWT_SET; if (*end == '\0') break; ++end; begin = end; } } } #ifdef _OPENMP // Now set the number of threads omp_set_num_threads( threads ); #endif log_info(stderr, "nvbio/sufsort test... started (%u threads)\n", threads); #pragma omp parallel { log_info(stderr, " running on multiple threads\n"); } const uint32 N = 100; const uint32 SYMBOL_SIZE = 2; const uint32 SYMBOLS_PER_WORD = (8u*sizeof(uint32)) / SYMBOL_SIZE; if (TEST_MASK & kGPU_SA) { typedef uint32 index_type; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,index_type> packed_stream_type; const index_type N_symbols = 8u*1024u*1024u; const index_type N_words = (N_symbols + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; log_info(stderr, " gpu sa test\n"); log_info(stderr, " %5.1f M symbols\n", (1.0e-6f*float(N_symbols))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); LCG_random rand; for (index_type i = 0; i < N_words; ++i) h_string[i] = rand.next(); for (uint32 lcp = 100; lcp <= 100000; lcp *= 10) { // insert some long common prefixes for (uint32 i = 50; i < 50 + lcp; ++i) h_string[i] = 0; thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_sa( N_symbols+1 ); hipDeviceSynchronize(); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); log_info(stderr, "\n sa... started (LCP: %u)\n", lcp*16u); Timer timer; timer.start(); cuda::suffix_sort( N_symbols, d_packed_string, d_sa.begin(), &params ); hipDeviceSynchronize(); timer.stop(); log_info(stderr, " sa... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); if (1) { log_info(stderr, " sa-is... started\n"); timer.start(); std::vector<int32> sa_ref( N_symbols+1 ); gen_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), &sa_ref[0] ); timer.stop(); log_info(stderr, " sa-is... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); thrust::host_vector<uint32> h_sa( d_sa ); for (uint32 i = 0; i < N_symbols; ++i) { const uint32 s = h_sa[i]; const uint32 r = sa_ref[i]; if (s != r) { log_error(stderr, " mismatch at %u: expected %u, got %u\n", i, r, s); return 0u; } } } } FILE* file = fopen("./data/howto", "r" ); if (file == NULL) log_warning(stderr, " unable to open \"howto\" file\n"); else { log_info(stderr, "\n loading \"howto\" text benchmark\n"); fseek( file, 0, SEEK_END ); const uint32 N_symbols = uint32( ftell( file ) ); thrust::host_vector<uint8> h_text( N_symbols ); rewind( file ); fread( &h_text[0], 1, N_symbols, file ); fclose( file ); thrust::device_vector<uint8> d_text( h_text ); thrust::device_vector<uint32> d_sa( N_symbols+1 ); hipDeviceSynchronize(); log_info(stderr, " sa... started (%u bytes)\n", N_symbols); Timer timer; timer.start(); cuda::suffix_sort( N_symbols, d_text.begin(), d_sa.begin(), &params ); hipDeviceSynchronize(); timer.stop(); log_info(stderr, " sa... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); if (1) { log_info(stderr, " sa-is... started\n"); timer.start(); std::vector<int32> sa_ref( N_symbols+1 ); sa_ref[0] = N_symbols; saisxx( nvbio::plain_view( h_text ), &sa_ref[0] + 1, int32(N_symbols), 256 ); timer.stop(); log_info(stderr, " sa-is... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); thrust::host_vector<uint32> h_sa( d_sa ); for (uint32 i = 0; i < N_symbols; ++i) { const uint32 s = h_sa[i]; const uint32 r = sa_ref[i]; if (s != r) { log_error(stderr, " mismatch at %u: expected %u, got %u\n", i, r, s); return 0u; } } } } } if (TEST_MASK & kGPU_SA_SET) { typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef ConcatenatedStringSet<packed_stream_type,uint32*> string_set; const uint32 N_strings = 1024*1024; const uint32 N_tests = 10; const uint32 N_words = uint32((uint64(N_strings)*N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint32> h_offsets( N_strings+1 ); sufsort::make_test_string_set<SYMBOL_SIZE>( N_strings, N, h_string, h_offsets ); thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_offsets( h_offsets ); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); string_set d_string_set( N_strings, d_packed_string, nvbio::plain_view( d_offsets ) ); hipDeviceSynchronize(); log_info(stderr, " gpu SA test\n"); log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings))); log_info(stderr, " %5.1f M suffixes\n", (1.0e-6f*float(N_strings*(N+1)))); log_info(stderr, " %5.1f G symbols\n", (1.0e-9f*float(uint64(N_strings)*(N+1)*(N+1)/2))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); // copy a sparse string set into a packed concatenated one { sufsort::SuffixHandler suffix_hander; Timer timer; timer.start(); // sort the suffixes for (uint32 i = 0; i < N_tests; ++i) cuda::suffix_sort( d_string_set, suffix_hander, &params ); hipDeviceSynchronize(); timer.stop(); log_info(stderr, " sorting time: %.2fs\n", timer.seconds()/float(N_tests)); log_info(stderr, " %5.1f M strings/s\n", (1.0e-6f*float(N_strings)) * (float(N_tests)/timer.seconds())); log_info(stderr, " %5.1f M suffixes/s\n", (1.0e-6f*float(N_strings*(N+1))) * (float(N_tests)/timer.seconds())); log_info(stderr, " %5.1f G symbols/s\n", (1.0e-9f*float(uint64(N_strings)*(N+1)*(N+1)/2)) * (float(N_tests)/timer.seconds())); } } if (TEST_MASK & kGPU_BWT_FUNCTIONAL) { typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,uint32> packed_stream_type; const uint32 N_words = 8; const uint32 N_symbols = N_words * SYMBOLS_PER_WORD - 13u; char char_string[N_symbols+1]; log_info(stderr, " gpu bwt test\n"); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint32> h_bwt( N_words+1 ); thrust::host_vector<uint32> h_bwt_ref( N_words+1 ); uint32 primary_ref; LCG_random rand; for (uint32 i = 0; i < N_words; ++i) h_string[i] = rand.next(); dna_to_string( packed_stream_type( nvbio::plain_view( h_string ) ), N_symbols, char_string ); log_info(stderr, " str : %s\n", char_string ); { // generate the SA using SA-IS int32 sa[N_symbols+1]; gen_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), &sa[0] ); // generate the BWT from the SA primary_ref = gen_bwt_from_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), sa, packed_stream_type( nvbio::plain_view( h_bwt_ref ) ) ); dna_to_string( packed_stream_type( nvbio::plain_view( h_bwt_ref ) ), N_symbols, char_string ); log_info(stderr, " primary : %u\n", primary_ref ); log_info(stderr, " bwt : %s\n", char_string ); } thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_bwt( N_words+1 ); hipDeviceSynchronize(); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt ) ); log_info(stderr, " bwt... started\n"); Timer timer; timer.start(); const uint32 primary = cuda::bwt( N_symbols, d_packed_string, d_packed_bwt, &params ); timer.stop(); log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); h_bwt = d_bwt; { // check whether the results match our expectations packed_stream_type h_packed_bwt_ref( nvbio::plain_view( h_bwt_ref ) ); packed_stream_type h_packed_bwt( nvbio::plain_view( h_bwt ) ); bool check = (primary_ref == primary); for (uint32 i = 0; i < N_symbols; ++i) { if (h_packed_bwt[i] != h_packed_bwt_ref[i]) check = false; } if (check == false) { dna_to_string( packed_stream_type( nvbio::plain_view( h_bwt ) ), N_symbols, char_string ); log_error(stderr, "mismatching results!\n" ); log_error(stderr, " primary : %u\n", primary ); log_error(stderr, " bwt : %s\n", char_string ); return 0u; } } } if (TEST_MASK & kGPU_BWT_GENOME) { // load a genome io::SequenceDataHost h_ref; io::FMIndexDataHost h_fmi; if (io::load_sequence_file( DNA, &h_ref, index_name ) == false) return 0; if (h_fmi.load( index_name, io::FMIndexData::FORWARD ) == false) return 0; // copy it to the gpu io::SequenceDataDevice d_ref( h_ref ); io::FMIndexDataDevice d_fmi( h_fmi, 0u ); typedef io::SequenceDataAccess<DNA,io::ConstSequenceDataView> const_reference_access_type; typedef io::SequenceDataEdit<DNA,io::SequenceDataView> reference_access_type; typedef const_reference_access_type::sequence_stream_type const_packed_stream_type; typedef reference_access_type::sequence_stream_type packed_stream_type; const uint32 N_symbols = d_ref.bps(); const uint32 N_words = d_ref.words(); log_info(stderr, " gpu bwt test\n"); log_info(stderr, " %5.1f G symbols\n", (1.0e-6f*float(N_symbols))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::device_vector<uint32> d_bwt_storage( N_words+1 ); const const_reference_access_type d_ref_access( d_ref ); const_packed_stream_type d_packed_string( d_ref_access.sequence_stream() ); packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt_storage ) ); const uint32 primary_ref = cuda::find_primary( N_symbols, d_packed_string ); log_info(stderr, " primary: %u\n", primary_ref); { const const_reference_access_type h_ref_access( h_ref ); const_packed_stream_type h_packed_string( h_ref_access.sequence_stream() ); const uint32 crc = crcCalc( h_packed_string, N_symbols ); log_info(stderr, " crc : %u\n", crc); } log_info(stderr, " bwt... started\n"); Timer timer; timer.start(); const uint32 primary = cuda::bwt( N_symbols, d_packed_string, d_packed_bwt, &params ); timer.stop(); log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); bool check = primary == primary_ref; if (check == false) { log_error(stderr, "mismatching results!\n" ); log_error(stderr, " primary : %u\n", primary ); return 0u; } log_info(stderr, " testing correctness... started\n"); thrust::host_vector<uint32> h_bwt_storage( d_bwt_storage ); const const_packed_stream_type h_packed_bwt( nvbio::plain_view( h_bwt_storage ) ); const io::FMIndexData::bwt_stream_type h_ref_bwt( h_fmi.bwt_iterator() ); for (uint32 i = 0; i < N_symbols; ++i) { const uint8 c0 = h_ref_bwt[i]; const uint8 c1 = h_packed_bwt[i]; if (c0 != c1) { log_error(stderr, "mismatching results!\n" ); log_error(stderr, " at %u, expected %c, got %c\n", i, dna_to_char(c0), dna_to_char(c1) ); return 0u; } } log_info(stderr, " testing correctness... done\n"); { const uint32 crc = crcCalc( h_packed_bwt, N_symbols ); log_info(stderr, " crc: %u\n", crc); } } if (TEST_MASK & kGPU_BWT) { typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type; const uint64 N_symbols = 4llu*1024u*1024u*1024u - 1u; const uint64 N_words = (N_symbols + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; log_info(stderr, " gpu bwt test\n"); log_info(stderr, " %5.1f G symbols\n", (1.0e-9f*float(N_symbols))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); LCG_random rand; for (uint64 i = 0; i < N_words; ++i) h_string[i] = rand.next(); // insert some long common prefixes for (uint32 i = 50; i < 100; ++i) h_string[i] = 0; thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_bwt( N_words ); hipDeviceSynchronize(); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt ) ); log_info(stderr, " bwt... started\n"); Timer timer; timer.start(); cuda::bwt( N_symbols, d_packed_string, d_packed_bwt, &params ); timer.stop(); log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); } if (TEST_MASK & kGPU_BWT_SET) { typedef uint32 word_type; typedef cuda::load_pointer<word_type,cuda::LOAD_DEFAULT> storage_type; typedef PackedStream<word_type*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type; typedef PackedStream<storage_type,uint8,SYMBOL_SIZE,true,uint64> mod_packed_stream_type; typedef ConcatenatedStringSet<mod_packed_stream_type,uint64*> string_set; const uint32 N_strings = gpu_bwt_size*1000*1000; const uint64 N_words = util::divide_ri( uint64(N_strings)*(N+0), SYMBOLS_PER_WORD ); const uint64 N_bwt_words = util::divide_ri( uint64(N_strings)*(N+1), SYMBOLS_PER_WORD ); log_info(stderr, " gpu set-bwt test\n"); log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings))); log_info(stderr, " %5.1f G suffixes\n", (1.0e-9f*float(uint64(N_strings)*uint64(N+1)))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint64> h_offsets( N_strings+1 ); sufsort::make_test_string_set<SYMBOL_SIZE>( N_strings, N, h_string, h_offsets ); thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint64> d_offsets( h_offsets ); hipDeviceSynchronize(); mod_packed_stream_type d_packed_string( storage_type( (word_type*)nvbio::plain_view( d_string ) ) ); string_set d_string_set( N_strings, d_packed_string, nvbio::plain_view( d_offsets ) ); log_info(stderr, " bwt... started\n"); Timer timer; if (store_output) { thrust::device_vector<uint32> d_bwt( N_bwt_words ); packed_stream_type d_packed_bwt( (word_type*)nvbio::plain_view( d_bwt ) ); DeviceBWTHandler<packed_stream_type> output_handler( d_packed_bwt ); timer.start(); cuda::bwt<SYMBOL_SIZE,true>( d_string_set, output_handler, &params ); timer.stop(); } else { DiscardBWTHandler output_handler; timer.start(); cuda::bwt<SYMBOL_SIZE,true>( d_string_set, output_handler, &params ); timer.stop(); } log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); } if (TEST_MASK & kCPU_BWT_SET) { typedef uint32 word_type; typedef PackedStream<word_type*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type; typedef ConcatenatedStringSet<packed_stream_type,uint64*> string_set; const uint32 N_strings = cpu_bwt_size*1000*1000; const uint64 N_words = util::divide_ri( uint64(N_strings)*(N+0), SYMBOLS_PER_WORD ); const uint64 N_bwt_words = util::divide_ri( uint64(N_strings)*(N+1), SYMBOLS_PER_WORD ); log_info(stderr, " cpu set-bwt test\n"); log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings))); log_info(stderr, " %5.1f G suffixes\n", (1.0e-9f*float(uint64(N_strings)*uint64(N+1)))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint64> h_offsets( N_strings+1 ); sufsort::make_test_string_set<SYMBOL_SIZE>( N_strings, N, h_string, h_offsets ); packed_stream_type h_packed_string( (word_type*)nvbio::plain_view( h_string ) ); string_set h_string_set( N_strings, h_packed_string, nvbio::plain_view( h_offsets ) ); log_info(stderr, " bwt... started\n"); Timer timer; if (store_output) { thrust::host_vector<uint32> h_bwt( N_bwt_words ); packed_stream_type h_packed_bwt( (word_type*)nvbio::plain_view( h_bwt ) ); HostBWTHandler<packed_stream_type> output_handler( h_packed_bwt ); timer.start(); large_bwt<SYMBOL_SIZE,true>( h_string_set, output_handler, &params ); timer.stop(); } else { DiscardBWTHandler output_handler; timer.start(); large_bwt<SYMBOL_SIZE,true>( h_string_set, output_handler, &params ); timer.stop(); } log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); } log_info(stderr, "nvbio/sufsort test... done\n"); return 0; } } // namespace nvbio using namespace nvbio; int main(int argc, char* argv[]) { crcInit(); int cuda_device = -1; int device_count; hipGetDeviceCount(&device_count); log_verbose(stderr, " cuda devices : %d\n", device_count); int arg = 1; if (argc > 1) { if (strcmp( argv[arg], "-device" ) == 0) { cuda_device = atoi(argv[++arg]); ++arg; } } // inspect and select cuda devices if (device_count) { if (cuda_device == -1) { int best_device = 0; hipDeviceProp_t best_device_prop; hipGetDeviceProperties( &best_device_prop, best_device ); for (int device = 0; device < device_count; ++device) { hipDeviceProp_t device_prop; hipGetDeviceProperties( &device_prop, device ); log_verbose(stderr, " device %d has compute capability %d.%d\n", device, device_prop.major, device_prop.minor); log_verbose(stderr, " SM count : %u\n", device_prop.multiProcessorCount); log_verbose(stderr, " SM clock rate : %u Mhz\n", device_prop.clockRate / 1000); log_verbose(stderr, " memory clock rate : %.1f Ghz\n", float(device_prop.memoryClockRate) * 1.0e-6f); if (device_prop.major >= best_device_prop.major && device_prop.minor >= best_device_prop.minor) { best_device_prop = device_prop; best_device = device; } } cuda_device = best_device; } log_verbose(stderr, " chosen device %d\n", cuda_device); { hipDeviceProp_t device_prop; hipGetDeviceProperties( &device_prop, cuda_device ); log_verbose(stderr, " device name : %s\n", device_prop.name); log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor); } hipSetDevice( cuda_device ); } // allocate some heap hipDeviceSetLimit( hipLimitMallocHeapSize, 128*1024*1024 ); argc = argc >= arg ? argc-arg : 0; try { nvbio::sufsort_test( argc, argv+arg ); } catch (nvbio::cuda_error e) { log_error(stderr, "caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::bad_alloc e) { log_error(stderr, "caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::logic_error e) { log_error(stderr, "caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::runtime_error e) { log_error(stderr, "caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::bad_alloc e) { log_error(stderr, "caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::logic_error e) { log_error(stderr, "caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::runtime_error e) { log_error(stderr, "caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (...) { log_error(stderr,"unknown exception caught!\n"); exit(1); } hipDeviceReset(); return 0; }
72eae68ba1d04492bc130a60b37255e64cabbc8b.cu
/* * nvbio * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define NVBIO_CUDA_DEBUG #include <cub/cub.cuh> #include <stdio.h> #include <stdlib.h> #include <vector> #include <algorithm> #include <crc/crc.h> #ifdef _OPENMP #include <omp.h> #endif #include <nvbio/sufsort/sufsort.h> #include <nvbio/sufsort/sufsort_utils.h> #include <nvbio/basic/exceptions.h> #include <nvbio/basic/timer.h> #include <nvbio/strings/string_set.h> #include <nvbio/basic/cuda/arch.h> #include <nvbio/basic/cuda/ldg.h> #include <nvbio/io/fmindex/fmindex.h> #include <nvbio/io/sequence/sequence.h> #include <nvbio/basic/dna.h> #include <nvbio/fmindex/bwt.h> #include <thrust/device_vector.h> namespace nvbio { namespace sufsort { template <uint32 SYMBOL_SIZE, typename offset_type> void make_test_string_set( const uint64 N_strings, const uint32 N, thrust::host_vector<uint32>& h_string, thrust::host_vector<offset_type>& h_offsets) { for (uint64 i = 0; i < N_strings; ++i) h_offsets[i] = offset_type( uint64(N)*i ); LCG_random rand; for (uint64 i = 0; i < h_string.size(); ++i) h_string[i] = rand.next(); h_offsets[N_strings] = N*N_strings; } struct SuffixHandler { void process( const uint32 n_suffixes, const uint32* suffix_array, const uint32* string_ids, const uint32* cum_lengths) { output.resize( n_suffixes ); thrust::copy( thrust::device_ptr<const uint32>( suffix_array ), thrust::device_ptr<const uint32>( suffix_array ) + n_suffixes, output.begin() ); } thrust::device_vector<uint32> output; }; } // namespace sufsort int sufsort_test(int argc, char* argv[]) { enum Test { kGPU_SA = 1u, kGPU_BWT = 2u, kCPU_BWT = 4u, kGPU_BWT_FUNCTIONAL = 8u, kGPU_BWT_GENOME = 16u, kGPU_BWT_SET = 32u, kCPU_BWT_SET = 64u, kGPU_SA_SET = 128u, }; uint32 TEST_MASK = 0xFFFFFFFFu; uint32 gpu_bwt_size = 50u; uint32 cpu_bwt_size = 100u; #ifdef _OPENMP uint32 threads = omp_get_num_procs(); #else uint32 threads = 1; #endif bool store_output = true; const char* index_name = "data/human.NCBI36/Homo_sapiens.NCBI36.53.dna.toplevel.fa"; BWTParams params; for (int i = 0; i < argc; ++i) { if (strcmp( argv[i], "-v" ) == 0 || strcmp( argv[i], "-verbosity" ) == 0 || strcmp( argv[i], "--verbosity" ) == 0) { set_verbosity( Verbosity( atoi( argv[++i] ) ) ); } else if (strcmp( argv[i], "-cpu-mem" ) == 0) { params.host_memory = atoi( argv[++i] ) * uint64(1024u*1024u); } else if (strcmp( argv[i], "-gpu-mem" ) == 0) { params.device_memory = atoi( argv[++i] ) * uint64(1024u*1024u); } else if (strcmp( argv[i], "-cpu-bwt-size" ) == 0) { cpu_bwt_size = atoi( argv[++i] ); } else if (strcmp( argv[i], "-gpu-bwt-size" ) == 0) { gpu_bwt_size = atoi( argv[++i] ); } else if (strcmp( argv[i], "-threads" ) == 0) { threads = atoi( argv[++i] ); } else if (strcmp( argv[i], "-no-output" ) == 0) { store_output = false; } else if ((strcmp( argv[i], "-genome" ) == 0) || (strcmp( argv[i], "-index" ) == 0)) { index_name = argv[++i]; } else if (strcmp( argv[i], "-tests" ) == 0) { const std::string tests_string( argv[++i] ); char temp[256]; const char* begin = tests_string.c_str(); const char* end = begin; TEST_MASK = 0u; while (1) { while (*end != ':' && *end != '\0') { temp[end - begin] = *end; end++; } temp[end - begin] = '\0'; if (strcmp( temp, "gpu-sa" ) == 0) TEST_MASK |= kGPU_SA; else if (strcmp( temp, "gpu-bwt" ) == 0) TEST_MASK |= kGPU_BWT; else if (strcmp( temp, "gpu-bwt-func" ) == 0) TEST_MASK |= kGPU_BWT_FUNCTIONAL; else if (strcmp( temp, "gpu-bwt-genome" ) == 0) TEST_MASK |= kGPU_BWT_GENOME; else if (strcmp( temp, "cpu-bwt" ) == 0) TEST_MASK |= kCPU_BWT; else if (strcmp( temp, "gpu-set-bwt" ) == 0) TEST_MASK |= kGPU_BWT_SET; else if (strcmp( temp, "cpu-set-bwt" ) == 0) TEST_MASK |= kCPU_BWT_SET; if (*end == '\0') break; ++end; begin = end; } } } #ifdef _OPENMP // Now set the number of threads omp_set_num_threads( threads ); #endif log_info(stderr, "nvbio/sufsort test... started (%u threads)\n", threads); #pragma omp parallel { log_info(stderr, " running on multiple threads\n"); } const uint32 N = 100; const uint32 SYMBOL_SIZE = 2; const uint32 SYMBOLS_PER_WORD = (8u*sizeof(uint32)) / SYMBOL_SIZE; if (TEST_MASK & kGPU_SA) { typedef uint32 index_type; typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,index_type> packed_stream_type; const index_type N_symbols = 8u*1024u*1024u; const index_type N_words = (N_symbols + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; log_info(stderr, " gpu sa test\n"); log_info(stderr, " %5.1f M symbols\n", (1.0e-6f*float(N_symbols))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); LCG_random rand; for (index_type i = 0; i < N_words; ++i) h_string[i] = rand.next(); for (uint32 lcp = 100; lcp <= 100000; lcp *= 10) { // insert some long common prefixes for (uint32 i = 50; i < 50 + lcp; ++i) h_string[i] = 0; thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_sa( N_symbols+1 ); cudaDeviceSynchronize(); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); log_info(stderr, "\n sa... started (LCP: %u)\n", lcp*16u); Timer timer; timer.start(); cuda::suffix_sort( N_symbols, d_packed_string, d_sa.begin(), &params ); cudaDeviceSynchronize(); timer.stop(); log_info(stderr, " sa... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); if (1) { log_info(stderr, " sa-is... started\n"); timer.start(); std::vector<int32> sa_ref( N_symbols+1 ); gen_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), &sa_ref[0] ); timer.stop(); log_info(stderr, " sa-is... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); thrust::host_vector<uint32> h_sa( d_sa ); for (uint32 i = 0; i < N_symbols; ++i) { const uint32 s = h_sa[i]; const uint32 r = sa_ref[i]; if (s != r) { log_error(stderr, " mismatch at %u: expected %u, got %u\n", i, r, s); return 0u; } } } } FILE* file = fopen("./data/howto", "r" ); if (file == NULL) log_warning(stderr, " unable to open \"howto\" file\n"); else { log_info(stderr, "\n loading \"howto\" text benchmark\n"); fseek( file, 0, SEEK_END ); const uint32 N_symbols = uint32( ftell( file ) ); thrust::host_vector<uint8> h_text( N_symbols ); rewind( file ); fread( &h_text[0], 1, N_symbols, file ); fclose( file ); thrust::device_vector<uint8> d_text( h_text ); thrust::device_vector<uint32> d_sa( N_symbols+1 ); cudaDeviceSynchronize(); log_info(stderr, " sa... started (%u bytes)\n", N_symbols); Timer timer; timer.start(); cuda::suffix_sort( N_symbols, d_text.begin(), d_sa.begin(), &params ); cudaDeviceSynchronize(); timer.stop(); log_info(stderr, " sa... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); if (1) { log_info(stderr, " sa-is... started\n"); timer.start(); std::vector<int32> sa_ref( N_symbols+1 ); sa_ref[0] = N_symbols; saisxx( nvbio::plain_view( h_text ), &sa_ref[0] + 1, int32(N_symbols), 256 ); timer.stop(); log_info(stderr, " sa-is... done: %.2fs (%.1fM suffixes/s)\n", timer.seconds(), 1.0e-6f*float(N_symbols)/float(timer.seconds())); thrust::host_vector<uint32> h_sa( d_sa ); for (uint32 i = 0; i < N_symbols; ++i) { const uint32 s = h_sa[i]; const uint32 r = sa_ref[i]; if (s != r) { log_error(stderr, " mismatch at %u: expected %u, got %u\n", i, r, s); return 0u; } } } } } if (TEST_MASK & kGPU_SA_SET) { typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,false> packed_stream_type; typedef ConcatenatedStringSet<packed_stream_type,uint32*> string_set; const uint32 N_strings = 1024*1024; const uint32 N_tests = 10; const uint32 N_words = uint32((uint64(N_strings)*N + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint32> h_offsets( N_strings+1 ); sufsort::make_test_string_set<SYMBOL_SIZE>( N_strings, N, h_string, h_offsets ); thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_offsets( h_offsets ); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); string_set d_string_set( N_strings, d_packed_string, nvbio::plain_view( d_offsets ) ); cudaDeviceSynchronize(); log_info(stderr, " gpu SA test\n"); log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings))); log_info(stderr, " %5.1f M suffixes\n", (1.0e-6f*float(N_strings*(N+1)))); log_info(stderr, " %5.1f G symbols\n", (1.0e-9f*float(uint64(N_strings)*(N+1)*(N+1)/2))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); // copy a sparse string set into a packed concatenated one { sufsort::SuffixHandler suffix_hander; Timer timer; timer.start(); // sort the suffixes for (uint32 i = 0; i < N_tests; ++i) cuda::suffix_sort( d_string_set, suffix_hander, &params ); cudaDeviceSynchronize(); timer.stop(); log_info(stderr, " sorting time: %.2fs\n", timer.seconds()/float(N_tests)); log_info(stderr, " %5.1f M strings/s\n", (1.0e-6f*float(N_strings)) * (float(N_tests)/timer.seconds())); log_info(stderr, " %5.1f M suffixes/s\n", (1.0e-6f*float(N_strings*(N+1))) * (float(N_tests)/timer.seconds())); log_info(stderr, " %5.1f G symbols/s\n", (1.0e-9f*float(uint64(N_strings)*(N+1)*(N+1)/2)) * (float(N_tests)/timer.seconds())); } } if (TEST_MASK & kGPU_BWT_FUNCTIONAL) { typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,uint32> packed_stream_type; const uint32 N_words = 8; const uint32 N_symbols = N_words * SYMBOLS_PER_WORD - 13u; char char_string[N_symbols+1]; log_info(stderr, " gpu bwt test\n"); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint32> h_bwt( N_words+1 ); thrust::host_vector<uint32> h_bwt_ref( N_words+1 ); uint32 primary_ref; LCG_random rand; for (uint32 i = 0; i < N_words; ++i) h_string[i] = rand.next(); dna_to_string( packed_stream_type( nvbio::plain_view( h_string ) ), N_symbols, char_string ); log_info(stderr, " str : %s\n", char_string ); { // generate the SA using SA-IS int32 sa[N_symbols+1]; gen_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), &sa[0] ); // generate the BWT from the SA primary_ref = gen_bwt_from_sa( N_symbols, packed_stream_type( nvbio::plain_view( h_string ) ), sa, packed_stream_type( nvbio::plain_view( h_bwt_ref ) ) ); dna_to_string( packed_stream_type( nvbio::plain_view( h_bwt_ref ) ), N_symbols, char_string ); log_info(stderr, " primary : %u\n", primary_ref ); log_info(stderr, " bwt : %s\n", char_string ); } thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_bwt( N_words+1 ); cudaDeviceSynchronize(); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt ) ); log_info(stderr, " bwt... started\n"); Timer timer; timer.start(); const uint32 primary = cuda::bwt( N_symbols, d_packed_string, d_packed_bwt, &params ); timer.stop(); log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); h_bwt = d_bwt; { // check whether the results match our expectations packed_stream_type h_packed_bwt_ref( nvbio::plain_view( h_bwt_ref ) ); packed_stream_type h_packed_bwt( nvbio::plain_view( h_bwt ) ); bool check = (primary_ref == primary); for (uint32 i = 0; i < N_symbols; ++i) { if (h_packed_bwt[i] != h_packed_bwt_ref[i]) check = false; } if (check == false) { dna_to_string( packed_stream_type( nvbio::plain_view( h_bwt ) ), N_symbols, char_string ); log_error(stderr, "mismatching results!\n" ); log_error(stderr, " primary : %u\n", primary ); log_error(stderr, " bwt : %s\n", char_string ); return 0u; } } } if (TEST_MASK & kGPU_BWT_GENOME) { // load a genome io::SequenceDataHost h_ref; io::FMIndexDataHost h_fmi; if (io::load_sequence_file( DNA, &h_ref, index_name ) == false) return 0; if (h_fmi.load( index_name, io::FMIndexData::FORWARD ) == false) return 0; // copy it to the gpu io::SequenceDataDevice d_ref( h_ref ); io::FMIndexDataDevice d_fmi( h_fmi, 0u ); typedef io::SequenceDataAccess<DNA,io::ConstSequenceDataView> const_reference_access_type; typedef io::SequenceDataEdit<DNA,io::SequenceDataView> reference_access_type; typedef const_reference_access_type::sequence_stream_type const_packed_stream_type; typedef reference_access_type::sequence_stream_type packed_stream_type; const uint32 N_symbols = d_ref.bps(); const uint32 N_words = d_ref.words(); log_info(stderr, " gpu bwt test\n"); log_info(stderr, " %5.1f G symbols\n", (1.0e-6f*float(N_symbols))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::device_vector<uint32> d_bwt_storage( N_words+1 ); const const_reference_access_type d_ref_access( d_ref ); const_packed_stream_type d_packed_string( d_ref_access.sequence_stream() ); packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt_storage ) ); const uint32 primary_ref = cuda::find_primary( N_symbols, d_packed_string ); log_info(stderr, " primary: %u\n", primary_ref); { const const_reference_access_type h_ref_access( h_ref ); const_packed_stream_type h_packed_string( h_ref_access.sequence_stream() ); const uint32 crc = crcCalc( h_packed_string, N_symbols ); log_info(stderr, " crc : %u\n", crc); } log_info(stderr, " bwt... started\n"); Timer timer; timer.start(); const uint32 primary = cuda::bwt( N_symbols, d_packed_string, d_packed_bwt, &params ); timer.stop(); log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); bool check = primary == primary_ref; if (check == false) { log_error(stderr, "mismatching results!\n" ); log_error(stderr, " primary : %u\n", primary ); return 0u; } log_info(stderr, " testing correctness... started\n"); thrust::host_vector<uint32> h_bwt_storage( d_bwt_storage ); const const_packed_stream_type h_packed_bwt( nvbio::plain_view( h_bwt_storage ) ); const io::FMIndexData::bwt_stream_type h_ref_bwt( h_fmi.bwt_iterator() ); for (uint32 i = 0; i < N_symbols; ++i) { const uint8 c0 = h_ref_bwt[i]; const uint8 c1 = h_packed_bwt[i]; if (c0 != c1) { log_error(stderr, "mismatching results!\n" ); log_error(stderr, " at %u, expected %c, got %c\n", i, dna_to_char(c0), dna_to_char(c1) ); return 0u; } } log_info(stderr, " testing correctness... done\n"); { const uint32 crc = crcCalc( h_packed_bwt, N_symbols ); log_info(stderr, " crc: %u\n", crc); } } if (TEST_MASK & kGPU_BWT) { typedef PackedStream<uint32*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type; const uint64 N_symbols = 4llu*1024u*1024u*1024u - 1u; const uint64 N_words = (N_symbols + SYMBOLS_PER_WORD-1) / SYMBOLS_PER_WORD; log_info(stderr, " gpu bwt test\n"); log_info(stderr, " %5.1f G symbols\n", (1.0e-9f*float(N_symbols))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); LCG_random rand; for (uint64 i = 0; i < N_words; ++i) h_string[i] = rand.next(); // insert some long common prefixes for (uint32 i = 50; i < 100; ++i) h_string[i] = 0; thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint32> d_bwt( N_words ); cudaDeviceSynchronize(); packed_stream_type d_packed_string( nvbio::plain_view( d_string ) ); packed_stream_type d_packed_bwt( nvbio::plain_view( d_bwt ) ); log_info(stderr, " bwt... started\n"); Timer timer; timer.start(); cuda::bwt( N_symbols, d_packed_string, d_packed_bwt, &params ); timer.stop(); log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); } if (TEST_MASK & kGPU_BWT_SET) { typedef uint32 word_type; typedef cuda::load_pointer<word_type,cuda::LOAD_DEFAULT> storage_type; typedef PackedStream<word_type*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type; typedef PackedStream<storage_type,uint8,SYMBOL_SIZE,true,uint64> mod_packed_stream_type; typedef ConcatenatedStringSet<mod_packed_stream_type,uint64*> string_set; const uint32 N_strings = gpu_bwt_size*1000*1000; const uint64 N_words = util::divide_ri( uint64(N_strings)*(N+0), SYMBOLS_PER_WORD ); const uint64 N_bwt_words = util::divide_ri( uint64(N_strings)*(N+1), SYMBOLS_PER_WORD ); log_info(stderr, " gpu set-bwt test\n"); log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings))); log_info(stderr, " %5.1f G suffixes\n", (1.0e-9f*float(uint64(N_strings)*uint64(N+1)))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint64> h_offsets( N_strings+1 ); sufsort::make_test_string_set<SYMBOL_SIZE>( N_strings, N, h_string, h_offsets ); thrust::device_vector<uint32> d_string( h_string ); thrust::device_vector<uint64> d_offsets( h_offsets ); cudaDeviceSynchronize(); mod_packed_stream_type d_packed_string( storage_type( (word_type*)nvbio::plain_view( d_string ) ) ); string_set d_string_set( N_strings, d_packed_string, nvbio::plain_view( d_offsets ) ); log_info(stderr, " bwt... started\n"); Timer timer; if (store_output) { thrust::device_vector<uint32> d_bwt( N_bwt_words ); packed_stream_type d_packed_bwt( (word_type*)nvbio::plain_view( d_bwt ) ); DeviceBWTHandler<packed_stream_type> output_handler( d_packed_bwt ); timer.start(); cuda::bwt<SYMBOL_SIZE,true>( d_string_set, output_handler, &params ); timer.stop(); } else { DiscardBWTHandler output_handler; timer.start(); cuda::bwt<SYMBOL_SIZE,true>( d_string_set, output_handler, &params ); timer.stop(); } log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); } if (TEST_MASK & kCPU_BWT_SET) { typedef uint32 word_type; typedef PackedStream<word_type*,uint8,SYMBOL_SIZE,true,uint64> packed_stream_type; typedef ConcatenatedStringSet<packed_stream_type,uint64*> string_set; const uint32 N_strings = cpu_bwt_size*1000*1000; const uint64 N_words = util::divide_ri( uint64(N_strings)*(N+0), SYMBOLS_PER_WORD ); const uint64 N_bwt_words = util::divide_ri( uint64(N_strings)*(N+1), SYMBOLS_PER_WORD ); log_info(stderr, " cpu set-bwt test\n"); log_info(stderr, " %5.1f M strings\n", (1.0e-6f*float(N_strings))); log_info(stderr, " %5.1f G suffixes\n", (1.0e-9f*float(uint64(N_strings)*uint64(N+1)))); log_info(stderr, " %5.2f GB\n", (float(N_words)*sizeof(uint32))/float(1024*1024*1024)); thrust::host_vector<uint32> h_string( N_words ); thrust::host_vector<uint64> h_offsets( N_strings+1 ); sufsort::make_test_string_set<SYMBOL_SIZE>( N_strings, N, h_string, h_offsets ); packed_stream_type h_packed_string( (word_type*)nvbio::plain_view( h_string ) ); string_set h_string_set( N_strings, h_packed_string, nvbio::plain_view( h_offsets ) ); log_info(stderr, " bwt... started\n"); Timer timer; if (store_output) { thrust::host_vector<uint32> h_bwt( N_bwt_words ); packed_stream_type h_packed_bwt( (word_type*)nvbio::plain_view( h_bwt ) ); HostBWTHandler<packed_stream_type> output_handler( h_packed_bwt ); timer.start(); large_bwt<SYMBOL_SIZE,true>( h_string_set, output_handler, &params ); timer.stop(); } else { DiscardBWTHandler output_handler; timer.start(); large_bwt<SYMBOL_SIZE,true>( h_string_set, output_handler, &params ); timer.stop(); } log_info(stderr, " bwt... done: %.2fs\n", timer.seconds()); } log_info(stderr, "nvbio/sufsort test... done\n"); return 0; } } // namespace nvbio using namespace nvbio; int main(int argc, char* argv[]) { crcInit(); int cuda_device = -1; int device_count; cudaGetDeviceCount(&device_count); log_verbose(stderr, " cuda devices : %d\n", device_count); int arg = 1; if (argc > 1) { if (strcmp( argv[arg], "-device" ) == 0) { cuda_device = atoi(argv[++arg]); ++arg; } } // inspect and select cuda devices if (device_count) { if (cuda_device == -1) { int best_device = 0; cudaDeviceProp best_device_prop; cudaGetDeviceProperties( &best_device_prop, best_device ); for (int device = 0; device < device_count; ++device) { cudaDeviceProp device_prop; cudaGetDeviceProperties( &device_prop, device ); log_verbose(stderr, " device %d has compute capability %d.%d\n", device, device_prop.major, device_prop.minor); log_verbose(stderr, " SM count : %u\n", device_prop.multiProcessorCount); log_verbose(stderr, " SM clock rate : %u Mhz\n", device_prop.clockRate / 1000); log_verbose(stderr, " memory clock rate : %.1f Ghz\n", float(device_prop.memoryClockRate) * 1.0e-6f); if (device_prop.major >= best_device_prop.major && device_prop.minor >= best_device_prop.minor) { best_device_prop = device_prop; best_device = device; } } cuda_device = best_device; } log_verbose(stderr, " chosen device %d\n", cuda_device); { cudaDeviceProp device_prop; cudaGetDeviceProperties( &device_prop, cuda_device ); log_verbose(stderr, " device name : %s\n", device_prop.name); log_verbose(stderr, " compute capability : %d.%d\n", device_prop.major, device_prop.minor); } cudaSetDevice( cuda_device ); } // allocate some heap cudaDeviceSetLimit( cudaLimitMallocHeapSize, 128*1024*1024 ); argc = argc >= arg ? argc-arg : 0; try { nvbio::sufsort_test( argc, argv+arg ); } catch (nvbio::cuda_error e) { log_error(stderr, "caught a nvbio::cuda_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::bad_alloc e) { log_error(stderr, "caught a nvbio::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::logic_error e) { log_error(stderr, "caught a nvbio::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (nvbio::runtime_error e) { log_error(stderr, "caught a nvbio::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::bad_alloc e) { log_error(stderr, "caught a std::bad_alloc exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::logic_error e) { log_error(stderr, "caught a std::logic_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (std::runtime_error e) { log_error(stderr, "caught a std::runtime_error exception:\n"); log_error(stderr, " %s\n", e.what()); } catch (...) { log_error(stderr,"unknown exception caught!\n"); exit(1); } cudaDeviceReset(); return 0; }
c476c6af72c2fc8c6dd12fdd713f3edc8c204c6a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <time.h> __global__ void initialize(double *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<n) a[i] =(double)i/n; } int main() { //Serial Code const int n = 10000000; double *a; double start, end; a = (double*)malloc(n * sizeof(double)); int i; start = clock(); for (i = 0; i < n; i++) a[i] = (double)i / n; end = clock(); for (i = 0; i < 5; i++) printf("a[%d]: %.7f\n",i, a[i]); printf(" ...\n"); for (i = n-5; i < n; i++) printf("a[%d]: %.7f\n", i, a[i]); double total = (end - start) / CLOCKS_PER_SEC; printf("time: %f\n\n",total); //Cuda double* ac; double* d_a; ac = (double*)malloc(n * sizeof(double)); printf("Cuda\n"); hipMalloc(&d_a, sizeof(double)*n); double t = clock(); hipLaunchKernelGGL(( initialize), dim3(10000),dim3(1000), 0, 0, d_a, n); hipDeviceSynchronize(); t = (clock() - t) / CLOCKS_PER_SEC; hipMemcpy(ac, d_a, n*sizeof(double), hipMemcpyDeviceToHost); for (i = 0; i < 5; i++) printf("a[%d]: %.7f\n", i, ac[i]); printf(" ...\n"); for (i = n - 5; i < n; i++) printf("a[%d]: %.7f\n", i, ac[i]); printf("time:%f\n", t); double timesfaster = total / t; printf("Using cuda, the code executed %f times faster\n", timesfaster); }
c476c6af72c2fc8c6dd12fdd713f3edc8c204c6a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <time.h> __global__ void initialize(double *a, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<n) a[i] =(double)i/n; } int main() { //Serial Code const int n = 10000000; double *a; double start, end; a = (double*)malloc(n * sizeof(double)); int i; start = clock(); for (i = 0; i < n; i++) a[i] = (double)i / n; end = clock(); for (i = 0; i < 5; i++) printf("a[%d]: %.7f\n",i, a[i]); printf(" ...\n"); for (i = n-5; i < n; i++) printf("a[%d]: %.7f\n", i, a[i]); double total = (end - start) / CLOCKS_PER_SEC; printf("time: %f\n\n",total); //Cuda double* ac; double* d_a; ac = (double*)malloc(n * sizeof(double)); printf("Cuda\n"); cudaMalloc(&d_a, sizeof(double)*n); double t = clock(); initialize<<<10000,1000>>>(d_a, n); cudaDeviceSynchronize(); t = (clock() - t) / CLOCKS_PER_SEC; cudaMemcpy(ac, d_a, n*sizeof(double), cudaMemcpyDeviceToHost); for (i = 0; i < 5; i++) printf("a[%d]: %.7f\n", i, ac[i]); printf(" ...\n"); for (i = n - 5; i < n; i++) printf("a[%d]: %.7f\n", i, ac[i]); printf("time:%f\n", t); double timesfaster = total / t; printf("Using cuda, the code executed %f times faster\n", timesfaster); }
cbb079e704b0ce060c6e97d21240ffbeb85e6034.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cugl_ros/apps/dynamic_fusion/cuda_internal.h> #include <cugl_ros/apps/dynamic_fusion/cuda_impl/utils.cuh> namespace dynfu { namespace gpu { /* * Image Processing Functions built on CUDA */ __global__ void depthToColorKernel(float2 thresholds, float depth_scale, int width, int height, const DeviceArrayHandle2D<ushort> src_depth, DeviceArrayHandle2D<uchar4> dst_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { ushort depth_raw = src_depth.at(y, x); float depth_in_meters = (float)(depth_raw) * depth_scale; uchar4 rgba = make_uchar4(20, 5, 0, 1); if(depth_in_meters > thresholds.x && depth_in_meters < thresholds.y) { uchar f = (uchar)(255.0f * (depth_in_meters - thresholds.x) / (thresholds.y - thresholds.x)); rgba.x = 255 - f; rgba.y = 0; rgba.z = f; } dst_color.at(y, x) = rgba; } } void depthToColor(float2 thresholds, float depth_scale, int width, int height, const DeviceArray2D<ushort> &src_depth, DeviceArray2D<uchar4> &dst_color) { dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); hipLaunchKernelGGL(( depthToColorKernel), dim3(grid), dim3(block), 0, 0, thresholds, depth_scale, width, height, src_depth.getHandle(), dst_color.getHandle()); checkCudaErrors(hipGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void convertColorKernel(int width, int height, bool draw_mask, int2 mask_start, int2 mask_end, const DeviceArrayHandle2D<uchar> src_color, DeviceArrayHandle2D<uchar4> dst_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { uchar color_r = src_color.at(y, 3*x + 0); uchar color_g = src_color.at(y, 3*x + 1); uchar color_b = src_color.at(y, 3*x + 2); bool in_mask = (draw_mask && x > mask_start.x && y > mask_start.y && x < mask_end.x && y < mask_end.y); uchar4 rgba = (in_mask) ? make_uchar4(255, 0, 0, 1) : make_uchar4(color_r, color_g, color_b, 1); dst_color.at(y, x) = rgba; } } void convertColor(int width, int height, bool draw_mask, int2 mask_start, int2 mask_end, const DeviceArray2D<uchar> &src_color, DeviceArray2D<uchar4> &dst_color) { dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); hipLaunchKernelGGL(( convertColorKernel), dim3(grid), dim3(block), 0, 0, width, height, draw_mask, mask_start, mask_end, src_color.getHandle(), dst_color.getHandle()); checkCudaErrors(hipGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void truncateDepthKernel(float2 depth_thresholds, float4 color_thresholds, bool draw_mask, int2 mask_start, int2 mask_end, const Intrinsics intrin, const DeviceArrayHandle2D<ushort> src_depth, const DeviceArrayHandle2D<uchar> src_color, DeviceArrayHandle2D<ushort> dst_depth_16u, DeviceArrayHandle2D<float> dst_depth_32f) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < intrin.width && y < intrin.height) { // truncation via depth ushort depth_raw = src_depth.at(y, x); float depth_in_meters = (float)(depth_raw) * intrin.depth_scale; // truncation via color (HSV space) float color_r = __uint2float_rd(src_color.at(y, 3*x + 0)); float color_g = __uint2float_rd(src_color.at(y, 3*x + 1)); float color_b = __uint2float_rd(src_color.at(y, 3*x + 2)); const float max = fmaxf(color_r, fmaxf(color_g, color_b)); const float min = fminf(color_r, fminf(color_g, color_b)); float color_v = max / 255.f; float color_h, color_s; if (max == 0.f) { color_h = 0.f; color_s = 0.f; } const float diff = max - min; color_s = diff / max; if (min == max) { color_h = 0.f; } if (max == color_r) color_h = 60.f * ((color_g - color_b) / diff); else if (max == color_g) color_h = 60.f * (2.f + (color_b - color_r) / diff); else color_h = 60.f * (4.f + (color_r - color_g) / diff); if (color_h < 0.f) color_h += 360.f; bool in_mask = (draw_mask && x > mask_start.x && y > mask_start.y && x < mask_end.x && y < mask_end.y); bool valid = (depth_in_meters > depth_thresholds.x && depth_in_meters < depth_thresholds.y && color_h > color_thresholds.x && color_h < color_thresholds.y && color_s > color_thresholds.z && color_v > color_thresholds.w && !in_mask); if(!valid) { depth_raw = 0; depth_in_meters = 0; } float xl = (x - intrin.cx) / intrin.fx; float yl = (y - intrin.cy) / intrin.fy; float lambda = sqrtf(xl * xl + yl * yl + 1); dst_depth_16u.at(y, x) = depth_raw; dst_depth_32f.at(y, x) = depth_in_meters * lambda; } } void truncateDepth(float2 depth_thresholds, float4 color_thresholds, bool draw_mask, int2 mask_start, int2 mask_end, const Intrinsics &intrin, const DeviceArray2D<ushort> &src_depth, const DeviceArray2D<uchar> &src_color, DeviceArray2D<ushort> &dst_depth_16u, DeviceArray2D<float> &dst_depth_32f) { dim3 block(32, 16); dim3 grid(divUp(intrin.width, block.x), divUp(intrin.height, block.y)); hipLaunchKernelGGL(( truncateDepthKernel), dim3(grid), dim3(block), 0, 0, depth_thresholds, color_thresholds, draw_mask, mask_start, mask_end, intrin, src_depth.getHandle(), src_color.getHandle(), dst_depth_16u.getHandle(), dst_depth_32f.getHandle()); checkCudaErrors(hipGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void bilateralFilterKernel(int cols, int rows, float sigma_space2_inv_half, float sigma_color2_inv_half, const DeviceArrayHandle2D<ushort> src_depth, DeviceArrayHandle2D<ushort> dst_depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= cols || y >= rows) return; const int R = 6; const int D = R * 2 + 1; int value = src_depth.at(y, x); int tx = min(x - D / 2 + D, cols - 1); int ty = min(y - D / 2 + D, rows - 1); float sum1 = 0; float sum2 = 0; for (int cy = max(y - D / 2, 0); cy < ty; ++cy) { for (int cx = max(x - D / 2, 0); cx < tx; ++cx) { int tmp = src_depth.at(cy, cx); float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy); float color2 = (value - tmp) * (value - tmp); float weight = __expf (-(space2 * sigma_space2_inv_half + color2 * sigma_color2_inv_half)); sum1 += tmp * weight; sum2 += weight; } } int res = __float2int_rn (sum1 / sum2); dst_depth.at(y, x) = max(0, min(res, SHRT_MAX)); } void bilateralFilter(int width, int height, const DeviceArray2D<ushort> &src_depth, DeviceArray2D<ushort> &dst_depth) { float sigma_color = 30; float sigma_space = 4.5; dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); // hipFuncSetCacheConfig(bilateralFilter, hipFuncCachePreferL1); hipLaunchKernelGGL(( bilateralFilterKernel), dim3(grid), dim3(block), 0, 0, width, height, 0.5f / (sigma_space * sigma_space), 0.5f / (sigma_color * sigma_color), src_depth.getHandle(), dst_depth.getHandle()); checkCudaErrors(hipGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void createVertexImageKernel(const Intrinsics intrin, const DeviceArrayHandle2D<ushort> src_depth, DeviceArrayHandle2D<float4> dst_vertex) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u < intrin.width && v < intrin.height) { float z = intrin.depth_scale * (float)(src_depth.at(v, u)); float vx = z * (u - intrin.cx) / intrin.fx; float vy = z * (v - intrin.cy) / intrin.fy; float vz = z; float4 vert = (z != 0) ? make_float4(vx, vy, vz, 1.f) : make_float4(quiet_nanf(), quiet_nanf(), quiet_nanf(), 1.f); dst_vertex.at(v, u) = vert; } } void createVertexImage(const Intrinsics &intrin, const DeviceArray2D<ushort> &src_depth, DeviceArray2D<float4> &dst_vertex) { dim3 block(32, 16); dim3 grid(divUp(intrin.width, block.x), divUp(intrin.height, block.y)); hipLaunchKernelGGL(( createVertexImageKernel), dim3(grid), dim3(block), 0, 0, intrin, src_depth.getHandle(), dst_vertex.getHandle()); checkCudaErrors(hipGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __device__ __forceinline__ float3 computeDominantEigenVector(float cov[6]) { // use power method to find dominant eigenvector float3 v = make_float3(1.f, 1.f, 1.f); // 8 iterations seems to be more than enough for (int i = 0; i < 8; i++) { float x = v.x * cov[0] + v.y * cov[1] + v.z * cov[2]; float y = v.x * cov[1] + v.y * cov[3] + v.z * cov[4]; float z = v.x * cov[2] + v.y * cov[4] + v.z * cov[5]; float m = max(max(x, y), z); float iv = 1.f / m; v = make_float3(x * iv, y * iv, z * iv); } return v; } __global__ void createNormalImageKernel(int width, int height, const DeviceArrayHandle2D<float4> src_vertex, DeviceArrayHandle2D<float4> dst_normal) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u >= width || v >= height) return; dst_normal.at(v, u) = make_float4(quiet_nanf(), quiet_nanf(), quiet_nanf(), 0.f); if (isnan(src_vertex.at(v, u).x)) return; const int kx = 7; const int ky = 7; const int kstep = 1; int ty = min(v - ky / 2 + ky, height - 1); int tx = min(u - kx / 2 + kx, width - 1); float3 centroid = make_float3(0.f); int counter = 0; for (int cy = max(v - ky / 2, 0); cy < ty; cy += kstep) { for (int cx = max(u - kx / 2, 0); cx < tx; cx += kstep) { float3 vertex = make_float3(src_vertex.at(cy, cx)); if (!isnan(vertex.x)) { centroid += vertex; ++counter; } } } if (counter < kx * ky / 2) return; float counter_inv = 1.f / counter; centroid *= counter_inv; // store cov as an upper triangular mat in row-major order float cov[] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; for (int cy = max(v - ky / 2, 0); cy < ty; cy += kstep) { for (int cx = max(u - kx / 2, 0); cx < tx; cx += kstep) { float3 vertex = make_float3(src_vertex.at(cy, cx)); if (!isnan(vertex.x)) { float3 cent_to_vert = vertex - centroid; cov[0] += cent_to_vert.x * cent_to_vert.x; cov[1] += cent_to_vert.x * cent_to_vert.y; cov[2] += cent_to_vert.x * cent_to_vert.z; cov[3] += cent_to_vert.y * cent_to_vert.y; cov[4] += cent_to_vert.y * cent_to_vert.z; cov[5] += cent_to_vert.z * cent_to_vert.z; } } } // approximate the dominant eigenvector of the covariance matrix // float3 n = computeDominantEigenVector(cov); typedef Eigen33::Mat33 Mat33; Eigen33 eigen33 (cov); Mat33 tmp; Mat33 vec_tmp; Mat33 evecs; float3 evals; eigen33.compute(tmp, vec_tmp, evecs, evals); dst_normal.at(v, u) = make_float4(normalize(evecs[0]), 0.f); } void createNormalImage(int width, int height, const DeviceArray2D<float4> &src_vertex, DeviceArray2D<float4> &dst_normal) { dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); hipLaunchKernelGGL(( createNormalImageKernel), dim3(grid), dim3(block), 0, 0, width, height, src_vertex.getHandle(), dst_normal.getHandle()); checkCudaErrors(hipGetLastError()); } } // namespace gpu } // namespace dynfu
cbb079e704b0ce060c6e97d21240ffbeb85e6034.cu
#include <cugl_ros/apps/dynamic_fusion/cuda_internal.h> #include <cugl_ros/apps/dynamic_fusion/cuda_impl/utils.cuh> namespace dynfu { namespace gpu { /* * Image Processing Functions built on CUDA */ __global__ void depthToColorKernel(float2 thresholds, float depth_scale, int width, int height, const DeviceArrayHandle2D<ushort> src_depth, DeviceArrayHandle2D<uchar4> dst_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { ushort depth_raw = src_depth.at(y, x); float depth_in_meters = (float)(depth_raw) * depth_scale; uchar4 rgba = make_uchar4(20, 5, 0, 1); if(depth_in_meters > thresholds.x && depth_in_meters < thresholds.y) { uchar f = (uchar)(255.0f * (depth_in_meters - thresholds.x) / (thresholds.y - thresholds.x)); rgba.x = 255 - f; rgba.y = 0; rgba.z = f; } dst_color.at(y, x) = rgba; } } void depthToColor(float2 thresholds, float depth_scale, int width, int height, const DeviceArray2D<ushort> &src_depth, DeviceArray2D<uchar4> &dst_color) { dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); depthToColorKernel<<<grid, block>>>(thresholds, depth_scale, width, height, src_depth.getHandle(), dst_color.getHandle()); checkCudaErrors(cudaGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void convertColorKernel(int width, int height, bool draw_mask, int2 mask_start, int2 mask_end, const DeviceArrayHandle2D<uchar> src_color, DeviceArrayHandle2D<uchar4> dst_color) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < width && y < height) { uchar color_r = src_color.at(y, 3*x + 0); uchar color_g = src_color.at(y, 3*x + 1); uchar color_b = src_color.at(y, 3*x + 2); bool in_mask = (draw_mask && x > mask_start.x && y > mask_start.y && x < mask_end.x && y < mask_end.y); uchar4 rgba = (in_mask) ? make_uchar4(255, 0, 0, 1) : make_uchar4(color_r, color_g, color_b, 1); dst_color.at(y, x) = rgba; } } void convertColor(int width, int height, bool draw_mask, int2 mask_start, int2 mask_end, const DeviceArray2D<uchar> &src_color, DeviceArray2D<uchar4> &dst_color) { dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); convertColorKernel<<<grid, block>>>(width, height, draw_mask, mask_start, mask_end, src_color.getHandle(), dst_color.getHandle()); checkCudaErrors(cudaGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void truncateDepthKernel(float2 depth_thresholds, float4 color_thresholds, bool draw_mask, int2 mask_start, int2 mask_end, const Intrinsics intrin, const DeviceArrayHandle2D<ushort> src_depth, const DeviceArrayHandle2D<uchar> src_color, DeviceArrayHandle2D<ushort> dst_depth_16u, DeviceArrayHandle2D<float> dst_depth_32f) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if(x < intrin.width && y < intrin.height) { // truncation via depth ushort depth_raw = src_depth.at(y, x); float depth_in_meters = (float)(depth_raw) * intrin.depth_scale; // truncation via color (HSV space) float color_r = __uint2float_rd(src_color.at(y, 3*x + 0)); float color_g = __uint2float_rd(src_color.at(y, 3*x + 1)); float color_b = __uint2float_rd(src_color.at(y, 3*x + 2)); const float max = fmaxf(color_r, fmaxf(color_g, color_b)); const float min = fminf(color_r, fminf(color_g, color_b)); float color_v = max / 255.f; float color_h, color_s; if (max == 0.f) { color_h = 0.f; color_s = 0.f; } const float diff = max - min; color_s = diff / max; if (min == max) { color_h = 0.f; } if (max == color_r) color_h = 60.f * ((color_g - color_b) / diff); else if (max == color_g) color_h = 60.f * (2.f + (color_b - color_r) / diff); else color_h = 60.f * (4.f + (color_r - color_g) / diff); if (color_h < 0.f) color_h += 360.f; bool in_mask = (draw_mask && x > mask_start.x && y > mask_start.y && x < mask_end.x && y < mask_end.y); bool valid = (depth_in_meters > depth_thresholds.x && depth_in_meters < depth_thresholds.y && color_h > color_thresholds.x && color_h < color_thresholds.y && color_s > color_thresholds.z && color_v > color_thresholds.w && !in_mask); if(!valid) { depth_raw = 0; depth_in_meters = 0; } float xl = (x - intrin.cx) / intrin.fx; float yl = (y - intrin.cy) / intrin.fy; float lambda = sqrtf(xl * xl + yl * yl + 1); dst_depth_16u.at(y, x) = depth_raw; dst_depth_32f.at(y, x) = depth_in_meters * lambda; } } void truncateDepth(float2 depth_thresholds, float4 color_thresholds, bool draw_mask, int2 mask_start, int2 mask_end, const Intrinsics &intrin, const DeviceArray2D<ushort> &src_depth, const DeviceArray2D<uchar> &src_color, DeviceArray2D<ushort> &dst_depth_16u, DeviceArray2D<float> &dst_depth_32f) { dim3 block(32, 16); dim3 grid(divUp(intrin.width, block.x), divUp(intrin.height, block.y)); truncateDepthKernel<<<grid, block>>>(depth_thresholds, color_thresholds, draw_mask, mask_start, mask_end, intrin, src_depth.getHandle(), src_color.getHandle(), dst_depth_16u.getHandle(), dst_depth_32f.getHandle()); checkCudaErrors(cudaGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void bilateralFilterKernel(int cols, int rows, float sigma_space2_inv_half, float sigma_color2_inv_half, const DeviceArrayHandle2D<ushort> src_depth, DeviceArrayHandle2D<ushort> dst_depth) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= cols || y >= rows) return; const int R = 6; const int D = R * 2 + 1; int value = src_depth.at(y, x); int tx = min(x - D / 2 + D, cols - 1); int ty = min(y - D / 2 + D, rows - 1); float sum1 = 0; float sum2 = 0; for (int cy = max(y - D / 2, 0); cy < ty; ++cy) { for (int cx = max(x - D / 2, 0); cx < tx; ++cx) { int tmp = src_depth.at(cy, cx); float space2 = (x - cx) * (x - cx) + (y - cy) * (y - cy); float color2 = (value - tmp) * (value - tmp); float weight = __expf (-(space2 * sigma_space2_inv_half + color2 * sigma_color2_inv_half)); sum1 += tmp * weight; sum2 += weight; } } int res = __float2int_rn (sum1 / sum2); dst_depth.at(y, x) = max(0, min(res, SHRT_MAX)); } void bilateralFilter(int width, int height, const DeviceArray2D<ushort> &src_depth, DeviceArray2D<ushort> &dst_depth) { float sigma_color = 30; float sigma_space = 4.5; dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); // cudaFuncSetCacheConfig(bilateralFilter, cudaFuncCachePreferL1); bilateralFilterKernel<<<grid, block>>>(width, height, 0.5f / (sigma_space * sigma_space), 0.5f / (sigma_color * sigma_color), src_depth.getHandle(), dst_depth.getHandle()); checkCudaErrors(cudaGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __global__ void createVertexImageKernel(const Intrinsics intrin, const DeviceArrayHandle2D<ushort> src_depth, DeviceArrayHandle2D<float4> dst_vertex) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if(u < intrin.width && v < intrin.height) { float z = intrin.depth_scale * (float)(src_depth.at(v, u)); float vx = z * (u - intrin.cx) / intrin.fx; float vy = z * (v - intrin.cy) / intrin.fy; float vz = z; float4 vert = (z != 0) ? make_float4(vx, vy, vz, 1.f) : make_float4(quiet_nanf(), quiet_nanf(), quiet_nanf(), 1.f); dst_vertex.at(v, u) = vert; } } void createVertexImage(const Intrinsics &intrin, const DeviceArray2D<ushort> &src_depth, DeviceArray2D<float4> &dst_vertex) { dim3 block(32, 16); dim3 grid(divUp(intrin.width, block.x), divUp(intrin.height, block.y)); createVertexImageKernel<<<grid, block>>>(intrin, src_depth.getHandle(), dst_vertex.getHandle()); checkCudaErrors(cudaGetLastError()); } ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////// __device__ __forceinline__ float3 computeDominantEigenVector(float cov[6]) { // use power method to find dominant eigenvector float3 v = make_float3(1.f, 1.f, 1.f); // 8 iterations seems to be more than enough for (int i = 0; i < 8; i++) { float x = v.x * cov[0] + v.y * cov[1] + v.z * cov[2]; float y = v.x * cov[1] + v.y * cov[3] + v.z * cov[4]; float z = v.x * cov[2] + v.y * cov[4] + v.z * cov[5]; float m = max(max(x, y), z); float iv = 1.f / m; v = make_float3(x * iv, y * iv, z * iv); } return v; } __global__ void createNormalImageKernel(int width, int height, const DeviceArrayHandle2D<float4> src_vertex, DeviceArrayHandle2D<float4> dst_normal) { int u = threadIdx.x + blockIdx.x * blockDim.x; int v = threadIdx.y + blockIdx.y * blockDim.y; if (u >= width || v >= height) return; dst_normal.at(v, u) = make_float4(quiet_nanf(), quiet_nanf(), quiet_nanf(), 0.f); if (isnan(src_vertex.at(v, u).x)) return; const int kx = 7; const int ky = 7; const int kstep = 1; int ty = min(v - ky / 2 + ky, height - 1); int tx = min(u - kx / 2 + kx, width - 1); float3 centroid = make_float3(0.f); int counter = 0; for (int cy = max(v - ky / 2, 0); cy < ty; cy += kstep) { for (int cx = max(u - kx / 2, 0); cx < tx; cx += kstep) { float3 vertex = make_float3(src_vertex.at(cy, cx)); if (!isnan(vertex.x)) { centroid += vertex; ++counter; } } } if (counter < kx * ky / 2) return; float counter_inv = 1.f / counter; centroid *= counter_inv; // store cov as an upper triangular mat in row-major order float cov[] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; for (int cy = max(v - ky / 2, 0); cy < ty; cy += kstep) { for (int cx = max(u - kx / 2, 0); cx < tx; cx += kstep) { float3 vertex = make_float3(src_vertex.at(cy, cx)); if (!isnan(vertex.x)) { float3 cent_to_vert = vertex - centroid; cov[0] += cent_to_vert.x * cent_to_vert.x; cov[1] += cent_to_vert.x * cent_to_vert.y; cov[2] += cent_to_vert.x * cent_to_vert.z; cov[3] += cent_to_vert.y * cent_to_vert.y; cov[4] += cent_to_vert.y * cent_to_vert.z; cov[5] += cent_to_vert.z * cent_to_vert.z; } } } // approximate the dominant eigenvector of the covariance matrix // float3 n = computeDominantEigenVector(cov); typedef Eigen33::Mat33 Mat33; Eigen33 eigen33 (cov); Mat33 tmp; Mat33 vec_tmp; Mat33 evecs; float3 evals; eigen33.compute(tmp, vec_tmp, evecs, evals); dst_normal.at(v, u) = make_float4(normalize(evecs[0]), 0.f); } void createNormalImage(int width, int height, const DeviceArray2D<float4> &src_vertex, DeviceArray2D<float4> &dst_normal) { dim3 block(32, 16); dim3 grid(divUp(width, block.x), divUp(height, block.y)); createNormalImageKernel<<<grid, block>>>(width, height, src_vertex.getHandle(), dst_normal.getHandle()); checkCudaErrors(cudaGetLastError()); } } // namespace gpu } // namespace dynfu
2d780c079ecaac729ff0ac0ab555fcafd02f07df.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2015-2016 NVIDIA Corporation. All rights reserved. * * Sample to demonstrate use of NVlink CUPTI APIs * * This version is significantly changed to use PAPI and the CUDA component to * handle access and reporting. As of 10/05/2018, I have deleted all CUPTI_ONLY * references, for clarity. The file nvlink_bandwidth_cupti_only.cu contains * the cupti-only code. I also deleted the #if PAPI; there is no option * without PAPI. Also, before my changes, the makefile did not even have a * build option that set CUPTI_ONLY for this file. * * -TonyC. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <cupti.h> #include "papi.h" // THIS MACRO EXITS if the papi call does not return PAPI_OK. Do not use for routines that // return anything else; e.g. PAPI_num_components, PAPI_get_component_info, PAPI_library_init. #define CALL_PAPI_OK(papi_routine) \ do { \ int _papiret = papi_routine; \ if (_papiret != PAPI_OK) { \ fprintf(stderr, "%s:%d macro: PAPI Error: function " #papi_routine " failed with ret=%d [%s].\n", \ __FILE__, __LINE__, _papiret, PAPI_strerror(_papiret)); \ exit(-1); \ } \ } while (0); #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with message '%s'.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0); #define DRIVER_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ const char *errName=NULL, *errStr=NULL; \ hipError_t _e1 = hipGetErrorName(_status, &errName); \ hipError_t _e2 = hipGetErrorString(_status, &errStr); \ fprintf(stderr, "%s:%d: error: function %s failed with error [%s]='%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, errName, errStr); \ exit(-1); \ } \ } while (0); #define RUNTIME_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with message'%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status)); \ exit(-1); \ } \ } while (0); #define MEMORY_ALLOCATION_CALL(var) \ do { \ if (var == NULL) { \ fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n",\ __FILE__, __LINE__); \ exit(-1); \ } \ } while (0); #define MAX_DEVICES (32) #define BLOCK_SIZE (1024) #define GRID_SIZE (512) #define BUF_SIZE (32 * 1024) #define ALIGN_SIZE (8) #define SUCCESS (0) #define NUM_METRIC (18) #define NUM_EVENTS (2) #define MAX_SIZE (64*1024*1024) // 64 MB typedef union { long long ll; unsigned long long ull; double d; void *vp; unsigned char ch[8]; } convert_64_t; typedef struct { char name[128]; long long value; } eventStore_t; int eventsFoundCount = 0; // occupants of the array. int eventsFoundMax; // Size of the array. int eventsFoundAdd = 32; // Blocksize for increasing the array. eventStore_t *eventsFound = NULL; // The array. int Streams; // Gets asyncEngineCount (number of physical copy engines). int cpuToGpu = 0; int gpuToGpu = 0; size_t bufferSize = 0; int *deviceEvents = NULL; hipDeviceptr_t *pDevBuffer0 = NULL; hipDeviceptr_t *pDevBuffer1 = NULL; float **pHostBuffer = NULL; hipStream_t *cudaStreams = NULL; //----------------------------------------------------------------------------- // This is the GPU routine to move a block from 'source' (on one GPU) to 'dest' // on another GPU. //----------------------------------------------------------------------------- extern "C" __global__ void test_nvlink_bandwidth(float *source, float *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; dest[idx] = source[idx] * 2.0f; } // end routine #define DIM(x) (sizeof(x)/sizeof(*(x))) /* compute elements in an array */ //----------------------------------------------------------------------------- // FreeGlobals: Frees globally allocated memories. //----------------------------------------------------------------------------- void FreeGlobals(void) { int i; free(deviceEvents); for(i=0; i<Streams; i++) { RUNTIME_API_CALL(hipSetDevice(0)); // device 0 for pDevBuffer0. RUNTIME_API_CALL(hipFree((void **) &pDevBuffer0[i])); // Free allocated space. free(pHostBuffer[i]); // Just locally allocateed. } free(pDevBuffer0); // all contents freed by above. free(pHostBuffer); // Free the pointers. free(pDevBuffer1); // contents freed by the way the tests work. for (i=0; i<Streams; i++) { // Destroy all streams. if (cudaStreams[i] != NULL) { RUNTIME_API_CALL(hipStreamDestroy(cudaStreams[i])); } } free(cudaStreams); // Free the memory for pointers. } // end routine. //----------------------------------------------------------------------------- // Return a text version with B, KB, MB, GB or TB. //----------------------------------------------------------------------------- void calculateSize(char *result, uint64_t size) { int i; const char *sizes[] = { "TB", "GB", "MB", "KB", "B" }; uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL; uint64_t multiplier = exbibytes; for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) { if(size < multiplier) continue; sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]); return; } strcpy(result, "0"); return; } // end routine //----------------------------------------------------------------------------- // Copy buffers from host to device, vice versa, both simultaneously. //----------------------------------------------------------------------------- void testCpuToGpu(hipDeviceptr_t * pDevBuffer, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams) { int i; // Unidirectional copy H2D (Host to Device). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Unidirectional copy D2H (Device to Host). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync(pHostBuffer[i], (void *) pDevBuffer[i], bufferSize, hipMemcpyDeviceToHost, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Bidirectional copy for(i = 0; i < Streams; i += 2) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); RUNTIME_API_CALL(hipMemcpyAsync(pHostBuffer[i + 1], (void *) pDevBuffer[i + 1], bufferSize, hipMemcpyDeviceToHost, cudaStreams[i + 1])); } RUNTIME_API_CALL(hipDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy buffers from the host to each device, in preperation for a transfer // between devices. //----------------------------------------------------------------------------- void testGpuToGpu_part1(hipDeviceptr_t * pDevBuffer0, hipDeviceptr_t * pDevBuffer1, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams) { int i; // Unidirectional copy H2D for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer0[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer1[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy from device zero to device 1, then from device 1 to device 0. //----------------------------------------------------------------------------- void testGpuToGpu_part2(hipDeviceptr_t * pDevBuffer0, hipDeviceptr_t * pDevBuffer1, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams) { int i; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer0[i], (void *) pDevBuffer1[i], bufferSize, hipMemcpyDeviceToDevice, cudaStreams[i])); //printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer1[i], (void *) pDevBuffer0[i], bufferSize, hipMemcpyDeviceToDevice, cudaStreams[i])); // printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { hipLaunchKernelGGL(( test_nvlink_bandwidth) , dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, (float *) pDevBuffer1[i], (float *) pDevBuffer0[i]); // printf("test_nvlink_bandwidth stream %d \n", i); } } // end routine. //----------------------------------------------------------------------------- // conducts test CpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductCpuToGpu(int EventSet, int device, long long *values) { int i; if (device == 0) { CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer0, pHostBuffer, bufferSize, cudaStreams); } else { RUNTIME_API_CALL(hipSetDevice(device)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer1[i], bufferSize)); } CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); for (i=0; i<Streams; i++) { RUNTIME_API_CALL(hipFree((void **) pDevBuffer1[i])); } } // end testing device other than 0. CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read any values. } // end routine. //----------------------------------------------------------------------------- // conducts test GpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductGpuToGpu(int EventSet, int device, long long *values) { int i; // Need to target another GPU. I already have pDevBuffer0 on device 0. int partner=device; // Presume event is not on zero. if (device == 0) partner=1; // If it is on zero, make partner 1. RUNTIME_API_CALL(hipSetDevice(0)); // Device 0 must RUNTIME_API_CALL(hipDeviceEnablePeerAccess(partner, 0)); // access partner. RUNTIME_API_CALL(hipSetDevice(partner)); // The partner device must access 0. RUNTIME_API_CALL(hipDeviceEnablePeerAccess(0, 0)); // Let non-zero device access 0. for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer1[i], bufferSize)); } // Prepare the copy, load up buffers on each device from the host. testGpuToGpu_part1(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); // What we want to time: Copy from device 0->1, then device 1->0. CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testGpuToGpu_part2(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read value. // Disable peer access. RUNTIME_API_CALL(hipSetDevice(0)); RUNTIME_API_CALL(hipDeviceDisablePeerAccess(partner)); // Kill connection to device i. RUNTIME_API_CALL(hipSetDevice(partner)); RUNTIME_API_CALL(hipDeviceDisablePeerAccess(0)); // Kill access to device 0. // Now free the pointers on device 'partner' (never 0). for (i=0; i<Streams; i++) { RUNTIME_API_CALL(hipFree((void **) pDevBuffer1[i])); } RUNTIME_API_CALL(hipSetDevice(0)); // return to default pointer. } // end routine. //----------------------------------------------------------------------------- // Show help. //----------------------------------------------------------------------------- static void printUsage() { printf("usage: Demonstrate use of NVlink CUPTI APIs\n"); printf(" -help : display help message\n"); printf(" --cpu-to-gpu : Show results for data transfer between CPU and GPU \n"); printf(" --gpu-to-gpu : Show results for data transfer between two GPUs \n"); } // end routine. //----------------------------------------------------------------------------- // Interpret command line flags. //----------------------------------------------------------------------------- void parseCommandLineArgs(int argc, char *argv[]) { if(argc != 2) { printf("Invalid number of options\n"); exit(0); } if(strcmp(argv[1], "--cpu-to-gpu") == 0) { cpuToGpu = 1; } else if(strcmp(argv[1], "--gpu-to-gpu") == 0) { gpuToGpu = 1; } else if((strcmp(argv[1], "--help") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "-h") == 0)) { printUsage(); exit(0); } else { cpuToGpu = 1; } } // end routine. //----------------------------------------------------------------------------- // Add an entry to the eventsFound array. On entry we always have room. //----------------------------------------------------------------------------- void addEventsFound(char *eventName, long long value) { strncpy(eventsFound[eventsFoundCount].name, eventName, 127); // Copy up to 127 chars. eventsFound[eventsFoundCount].value = value; // Copy the value. if (++eventsFoundCount >= eventsFoundMax) { // bump count, if too much, make room. eventsFoundMax += eventsFoundAdd; // Add. eventsFound = (eventStore_t*) realloc(eventsFound, eventsFoundMax*sizeof(eventStore_t)); // Make new room. memset(eventsFound+(eventsFoundMax-eventsFoundAdd), 0, eventsFoundAdd*sizeof(eventStore_t)); // zero it. } } // end routine. //----------------------------------------------------------------------------- // Main program. //----------------------------------------------------------------------------- int main(int argc, char *argv[]) { int device, deviceCount = 0, i = 0; size_t freeMemory = 0, totalMemory = 0; char str[64]; eventsFoundMax = eventsFoundAdd; // space allocated. eventsFound = (eventStore_t*) calloc(eventsFoundMax, sizeof(eventStore_t)); // make some space. hipDeviceProp_t prop[MAX_DEVICES]; // Parse command line arguments parseCommandLineArgs(argc, argv); DRIVER_API_CALL(hipInit(0)); RUNTIME_API_CALL(hipGetDeviceCount(&deviceCount)); printf("There are %d devices.\n", deviceCount); if(deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } Streams = 1; // Always use at least ONE stream. for(device = 0; device < deviceCount; device++) { RUNTIME_API_CALL(hipGetDeviceProperties(&prop[device], device)); printf("CUDA Device %d Name: %s", i, prop[i].name); printf(", AsyncEngineCount=%i", prop[i].asyncEngineCount); printf(", MultiProcessors=%i", prop[i].multiProcessorCount); printf(", MaxThreadsPerMP=%i", prop[i].maxThreadsPerMultiProcessor); printf("\n"); if (prop[i].asyncEngineCount > Streams) { // If a new high, Streams = prop[i].asyncEngineCount; // Always use the maximum. } } printf("Streams to use: %i (= max Copy Engines).\n", Streams); // allocate space deviceEvents= (int*) calloc(deviceCount, sizeof(int)); pDevBuffer0 = (hipDeviceptr_t*) calloc(Streams, sizeof(hipDeviceptr_t)); pDevBuffer1 = (hipDeviceptr_t*) calloc(Streams, sizeof(hipDeviceptr_t)); pHostBuffer = (float **) calloc(Streams, sizeof(float*)); cudaStreams = (hipStream_t*) calloc(Streams, sizeof(hipStream_t)); // Set memcpy size based on available device memory RUNTIME_API_CALL(hipMemGetInfo(&freeMemory, &totalMemory)); printf("Total Device Memory available : "); calculateSize(str, (uint64_t) totalMemory); printf("%s\n", str); bufferSize = MAX_SIZE < (freeMemory / 4) ? MAX_SIZE : (freeMemory / 4); bufferSize = bufferSize/2; printf("Memcpy size is set to %llu B (%llu MB)\n", (unsigned long long) bufferSize, (unsigned long long) bufferSize / (1024 * 1024)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipStreamCreate(&cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Nvlink-topology Records are generated even before hipMemcpy API is called. CUPTI_CALL(cuptiActivityFlushAll(0)); // fprintf(stderr, "Setup PAPI counters internally (PAPI)\n"); int EventSet = PAPI_NULL; int eventCount; int retval; int k, m, cid=-1; /* PAPI Initialization */ retval = PAPI_library_init(PAPI_VER_CURRENT); if(retval != PAPI_VER_CURRENT) { fprintf(stderr, "PAPI_library_init failed, ret=%i [%s]\n", retval, PAPI_strerror(retval)); FreeGlobals(); exit(-1); } printf("PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION)); // Find cuda component index. k = PAPI_num_components(); // get number of components. for (i=0; i<k && cid<0; i++) { // while not found, PAPI_component_info_t *aComponent = (PAPI_component_info_t*) PAPI_get_component_info(i); // get the component info. if (aComponent == NULL) { // if we failed, fprintf(stderr, "PAPI_get_component_info(%i) failed, " "returned NULL. %i components reported.\n", i,k); FreeGlobals(); exit(-1); } if (strcmp("cuda", aComponent->name) == 0) cid=i; // If we found our match, record it. } // end search components. if (cid < 0) { // if no PCP component found, fprintf(stderr, "Failed to find cuda component among %i " "reported components.\n", k); FreeGlobals(); PAPI_shutdown(); exit(-1); } printf("Found CUDA Component at id %d\n", cid); // Add events at a GPU specific level ... eg cuda:::metric:nvlink_total_data_transmitted:device=0 // Just profile devices to match the CUPTI example eventCount = 0; int eventsRead=0; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer0[i], bufferSize)); pHostBuffer[i] = (float *) malloc(bufferSize); MEMORY_ALLOCATION_CALL(pHostBuffer[i]); } // Begin enumeration of all events. if (cpuToGpu) printf("Experiment timing memory copy from host to GPU.\n"); if (gpuToGpu) printf("Experiment timing memory copy between GPU 0 and each other GPU.\n"); printf("Events with numeric values were read; if they are zero, they may not \n" "be operational, or the exercises performed by this code do not affect \n" "them. We report all 'nvlink' events presented by the cuda component. \n" "\n" "---------------------------Event Name---------------------------:---Value---\n"); PAPI_event_info_t info; // To get event enumeration info. m=PAPI_NATIVE_MASK; // Get the PAPI NATIVE mask. CALL_PAPI_OK(PAPI_enum_cmp_event(&m,PAPI_ENUM_FIRST,cid)); // Begin enumeration of ALL papi counters. do { // Enumerate all events. memset(&info,0,sizeof(PAPI_event_info_t)); // Clear event info. k=m; // Make a copy of current code. // enumerate sub-events, with masks. For this test, we do not // have any! But we do this to test our enumeration works as // expected. First time through is guaranteed, of course. do { // enumerate masked events. CALL_PAPI_OK(PAPI_get_event_info(k,&info)); // get name of k symbol. if (strstr(info.symbol, "nvlink") == NULL) continue; // skip if not an nvlink event. char *devstr = strstr(info.symbol, "device="); // look for device enumerator. if (devstr == NULL) continue; // Skip if no device present. device=atoi(devstr+7); // Get the device id, for info. // fprintf(stderr, "Found nvlink symbol '%s', device=%i.\n", info.symbol , device); if (device < 0 || device >= deviceCount) continue; // skip any not in range. deviceEvents[device]++; // Add to count of events on this device. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); retval = PAPI_add_named_event(EventSet, info.symbol); // Don't want to fail program if name not found... if(retval == PAPI_OK) { eventCount++; // Bump number of events we could test. } else { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. continue; } long long value=-1; // The only value we read. // ===== Allocate Memory ===================================== if(cpuToGpu) { conductCpuToGpu(EventSet, device, &value); // Just one value for now. } else if(gpuToGpu) { conductGpuToGpu(EventSet, device, &value); // Just one value for now. } addEventsFound(info.symbol, value); // Add to events we were able to read. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. // report each event counted. if (value >= 0) { // If not still -1, eventsRead++; // .. count and report. calculateSize(str, value); if (value == 0) { printf("%-64s: %9s (not exercised by current test code.)\n", info.symbol, str); } else { printf("%-64s: %9s\n", info.symbol, str); } } else { printf("%-64s: Failed to read.\n", info.symbol); } } while(PAPI_enum_cmp_event(&k,PAPI_NTV_ENUM_UMASKS,cid)==PAPI_OK); // Get next umask entry (bits different) (should return PAPI_NOEVNT). } while(PAPI_enum_cmp_event(&m,PAPI_ENUM_EVENTS,cid)==PAPI_OK); // Get next event code. if (eventCount < 1) { // If we failed on all of them, fprintf(stderr, "Unable to add any NVLINK events; they are not present in the component.\n"); fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } if (eventsRead < 1) { // If failed to read any, printf("\nFailed to read any nvlink events.\n"); // report a failure. fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } printf("\nTotal nvlink events identified: %i.\n\n", eventsFoundCount); if (eventsFoundCount < 2) { // If failed to get counts on any, printf("Insufficient events are exercised by the current test code to perform pair testing.\n"); // report a failure. FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(0); // exit no matter what. } for (i=0; i<deviceCount; i++) { printf("Device %i has %i events. %i potential pairings per device.\n", i, deviceEvents[i], deviceEvents[i]*(deviceEvents[i]-1)/2); } // Begin pair testing. We consider every possible pairing of events // that, tested alone, returned a value greater than zero. int mainEvent, pairEvent, mainDevice, pairDevice; long long saveValues[2]; long long readValues[2]; int goodOnSame=0, failOnDiff=0, badSameCombo=0, pairProblems=0; // Some counters. int type; // 0 succeed on same device, 1 = fail across devices. for (type=0; type<2; type++) { if (type == 0) { printf("List of Pairings on SAME device:\n"); printf("* means value changed by more than 10%% when paired (vs measured singly, above).\n"); printf("^ means a pair was rejected as an invalid combo.\n"); } else { printf("List of Failed Pairings on DIFFERENT devices:\n"); } for (mainEvent = 0; mainEvent<eventsFoundCount-1; mainEvent++) { // Through all but one events. char *devstr = strstr(eventsFound[mainEvent].name, "device="); // look for device enumerator. mainDevice=atoi(devstr+7); // Get the device id. for (pairEvent = mainEvent+1; pairEvent<eventsFoundCount; pairEvent++) { // Through all possible pairs, devstr = strstr(eventsFound[pairEvent].name, "device="); // look for device enumerator. pairDevice=atoi(devstr+7); // Get the device id. if (type == 0 && mainDevice != pairDevice) continue; // Skip if we need same device. if (type == 1 && mainDevice == pairDevice) continue; // Skip if we need different devices. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); CALL_PAPI_OK(PAPI_add_named_event(EventSet, eventsFound[mainEvent].name)); // Here we must examine the return code. int ret = PAPI_add_named_event(EventSet, eventsFound[pairEvent].name); if (type == 0 && ret == PAPI_ECOMBO) { // A bad combination when looking for valid combos. printf("%c %64s + %-64s [Invalid Combo]\n", '^', // report it. eventsFound[mainEvent].name, eventsFound[pairEvent].name); badSameCombo++; // .. count an explicit rejection. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. done with event set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (type == 1 && ret == PAPI_ECOMBO) { // A bad combination when we are looking for that. printf("%64s + %-64s BAD COMBINATION ACROSS DEVICES.\n", eventsFound[mainEvent].name, eventsFound[pairEvent].name); // report it. failOnDiff++; // count the bad combos. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. don't need to go further. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (ret != PAPI_OK) { // If it failed for some other reason, fprintf(stderr, "%s:%d Attempt to add event '%s' to set " "with event '%s' produced an unexpected error: " "[%s]. Ignoring this pair.\n", __FILE__, __LINE__, eventsFound[pairEvent], eventsFound[mainEvent], PAPI_strerror(ret)); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. didn't work. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair. In type 1, we just skip it, // because we presume a single event on a device isn't changed // by any event on another device. if (type == 1) { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. worked fine; don't measure it. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair, in type 0, get a measurement. readValues[0]= -1; readValues[1] = -1; if(cpuToGpu) { conductCpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } else if(gpuToGpu) { conductGpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } goodOnSame++; // Was accepted by cuda as a valid pairing. // For the checks, we add 2 (so -1 becomes +1) to avoid any // divide by zeros. It won't make a significant difference // in the ratios. (none if readings are the same). double mainSingle = (2.0 + eventsFound[mainEvent].value); // Get value when read alone. double pairSingle = (2.0 + eventsFound[pairEvent].value); // .. double mainCheck = mainSingle/(2.0 + saveValues[0]); // Get ratio when paired. double pairCheck = pairSingle/(2.0 + saveValues[1]); // .. char flag=' ', flag1=' ', flag2=' '; // Presume all okay. if (mainCheck < 0.90 || mainCheck > 1.10) flag1='*'; // Flag as significantly different for main. if (pairCheck < 0.90 || pairCheck > 1.10) flag2='*'; // Flag as significantly different for pair. if (flag1 == '*' || flag2 == '*') { pairProblems++; // Remember number of problems. flag = '*'; // set global flag. } printf("%c %64s + %-64s [", flag, eventsFound[mainEvent].name, eventsFound[pairEvent].name); calculateSize(str, saveValues[0]); // Do some pretty formatting, printf("%c%9s,", flag1, str); calculateSize(str, saveValues[1]); printf("%c%9s]\n", flag2, str); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. } } // end loop on all events. if (type == 0) { // For good pairings on same devices, if (goodOnSame == 0) { printf("NO valid pairings of above events if both on the SAME device.\n"); } else { printf("%i valid pairings of above events if both on the SAME device.\n", goodOnSame); } printf("%i unique pairings on SAME device were rejected as bad combinations.\n", badSameCombo); if (pairProblems > 0) { printf("%i pairings resulted in a change of one or both event values > 10%%.\n", pairProblems); } else { printf("No significant change in event values read for any pairings.\n"); } } else { // Must be reporting bad pairings across devies. if (failOnDiff == 0) printf("NO failed pairings of above events if each on a DIFFERENT device.\n"); else printf("%i failed pairings of above events with each on a DIFFERENT device.\n", failOnDiff); } } // end loop on type. PAPI_shutdown(); // Returns no value. return(0); // exit OK. } // end MAIN.
2d780c079ecaac729ff0ac0ab555fcafd02f07df.cu
/* * Copyright 2015-2016 NVIDIA Corporation. All rights reserved. * * Sample to demonstrate use of NVlink CUPTI APIs * * This version is significantly changed to use PAPI and the CUDA component to * handle access and reporting. As of 10/05/2018, I have deleted all CUPTI_ONLY * references, for clarity. The file nvlink_bandwidth_cupti_only.cu contains * the cupti-only code. I also deleted the #if PAPI; there is no option * without PAPI. Also, before my changes, the makefile did not even have a * build option that set CUPTI_ONLY for this file. * * -TonyC. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cupti.h> #include "papi.h" // THIS MACRO EXITS if the papi call does not return PAPI_OK. Do not use for routines that // return anything else; e.g. PAPI_num_components, PAPI_get_component_info, PAPI_library_init. #define CALL_PAPI_OK(papi_routine) \ do { \ int _papiret = papi_routine; \ if (_papiret != PAPI_OK) { \ fprintf(stderr, "%s:%d macro: PAPI Error: function " #papi_routine " failed with ret=%d [%s].\n", \ __FILE__, __LINE__, _papiret, PAPI_strerror(_papiret)); \ exit(-1); \ } \ } while (0); #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with message '%s'.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0); #define DRIVER_API_CALL(apiFuncCall) \ do { \ CUresult _status = apiFuncCall; \ if (_status != CUDA_SUCCESS) { \ const char *errName=NULL, *errStr=NULL; \ CUresult _e1 = cuGetErrorName(_status, &errName); \ CUresult _e2 = cuGetErrorString(_status, &errStr); \ fprintf(stderr, "%s:%d: error: function %s failed with error [%s]='%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, errName, errStr); \ exit(-1); \ } \ } while (0); #define RUNTIME_API_CALL(apiFuncCall) \ do { \ cudaError_t _status = apiFuncCall; \ if (_status != cudaSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with message'%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status)); \ exit(-1); \ } \ } while (0); #define MEMORY_ALLOCATION_CALL(var) \ do { \ if (var == NULL) { \ fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n",\ __FILE__, __LINE__); \ exit(-1); \ } \ } while (0); #define MAX_DEVICES (32) #define BLOCK_SIZE (1024) #define GRID_SIZE (512) #define BUF_SIZE (32 * 1024) #define ALIGN_SIZE (8) #define SUCCESS (0) #define NUM_METRIC (18) #define NUM_EVENTS (2) #define MAX_SIZE (64*1024*1024) // 64 MB typedef union { long long ll; unsigned long long ull; double d; void *vp; unsigned char ch[8]; } convert_64_t; typedef struct { char name[128]; long long value; } eventStore_t; int eventsFoundCount = 0; // occupants of the array. int eventsFoundMax; // Size of the array. int eventsFoundAdd = 32; // Blocksize for increasing the array. eventStore_t *eventsFound = NULL; // The array. int Streams; // Gets asyncEngineCount (number of physical copy engines). int cpuToGpu = 0; int gpuToGpu = 0; size_t bufferSize = 0; int *deviceEvents = NULL; CUdeviceptr *pDevBuffer0 = NULL; CUdeviceptr *pDevBuffer1 = NULL; float **pHostBuffer = NULL; cudaStream_t *cudaStreams = NULL; //----------------------------------------------------------------------------- // This is the GPU routine to move a block from 'source' (on one GPU) to 'dest' // on another GPU. //----------------------------------------------------------------------------- extern "C" __global__ void test_nvlink_bandwidth(float *source, float *dest) { int idx = blockIdx.x * blockDim.x + threadIdx.x; dest[idx] = source[idx] * 2.0f; } // end routine #define DIM(x) (sizeof(x)/sizeof(*(x))) /* compute elements in an array */ //----------------------------------------------------------------------------- // FreeGlobals: Frees globally allocated memories. //----------------------------------------------------------------------------- void FreeGlobals(void) { int i; free(deviceEvents); for(i=0; i<Streams; i++) { RUNTIME_API_CALL(cudaSetDevice(0)); // device 0 for pDevBuffer0. RUNTIME_API_CALL(cudaFree((void **) &pDevBuffer0[i])); // Free allocated space. free(pHostBuffer[i]); // Just locally allocateed. } free(pDevBuffer0); // all contents freed by above. free(pHostBuffer); // Free the pointers. free(pDevBuffer1); // contents freed by the way the tests work. for (i=0; i<Streams; i++) { // Destroy all streams. if (cudaStreams[i] != NULL) { RUNTIME_API_CALL(cudaStreamDestroy(cudaStreams[i])); } } free(cudaStreams); // Free the memory for pointers. } // end routine. //----------------------------------------------------------------------------- // Return a text version with B, KB, MB, GB or TB. //----------------------------------------------------------------------------- void calculateSize(char *result, uint64_t size) { int i; const char *sizes[] = { "TB", "GB", "MB", "KB", "B" }; uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL; uint64_t multiplier = exbibytes; for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) { if(size < multiplier) continue; sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]); return; } strcpy(result, "0"); return; } // end routine //----------------------------------------------------------------------------- // Copy buffers from host to device, vice versa, both simultaneously. //----------------------------------------------------------------------------- void testCpuToGpu(CUdeviceptr * pDevBuffer, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams) { int i; // Unidirectional copy H2D (Host to Device). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Unidirectional copy D2H (Device to Host). for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync(pHostBuffer[i], (void *) pDevBuffer[i], bufferSize, cudaMemcpyDeviceToHost, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Bidirectional copy for(i = 0; i < Streams; i += 2) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); RUNTIME_API_CALL(cudaMemcpyAsync(pHostBuffer[i + 1], (void *) pDevBuffer[i + 1], bufferSize, cudaMemcpyDeviceToHost, cudaStreams[i + 1])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy buffers from the host to each device, in preperation for a transfer // between devices. //----------------------------------------------------------------------------- void testGpuToGpu_part1(CUdeviceptr * pDevBuffer0, CUdeviceptr * pDevBuffer1, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams) { int i; // Unidirectional copy H2D for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer0[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer1[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy from device zero to device 1, then from device 1 to device 0. //----------------------------------------------------------------------------- void testGpuToGpu_part2(CUdeviceptr * pDevBuffer0, CUdeviceptr * pDevBuffer1, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams) { int i; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer0[i], (void *) pDevBuffer1[i], bufferSize, cudaMemcpyDeviceToDevice, cudaStreams[i])); //printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer1[i], (void *) pDevBuffer0[i], bufferSize, cudaMemcpyDeviceToDevice, cudaStreams[i])); // printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { test_nvlink_bandwidth <<< GRID_SIZE, BLOCK_SIZE >>> ((float *) pDevBuffer1[i], (float *) pDevBuffer0[i]); // printf("test_nvlink_bandwidth stream %d \n", i); } } // end routine. //----------------------------------------------------------------------------- // conducts test CpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductCpuToGpu(int EventSet, int device, long long *values) { int i; if (device == 0) { CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer0, pHostBuffer, bufferSize, cudaStreams); } else { RUNTIME_API_CALL(cudaSetDevice(device)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer1[i], bufferSize)); } CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testCpuToGpu(pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); for (i=0; i<Streams; i++) { RUNTIME_API_CALL(cudaFree((void **) pDevBuffer1[i])); } } // end testing device other than 0. CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read any values. } // end routine. //----------------------------------------------------------------------------- // conducts test GpuToGpu. This is mostly a shortcut for readability, // decisions must be made about the device buffers. //----------------------------------------------------------------------------- void conductGpuToGpu(int EventSet, int device, long long *values) { int i; // Need to target another GPU. I already have pDevBuffer0 on device 0. int partner=device; // Presume event is not on zero. if (device == 0) partner=1; // If it is on zero, make partner 1. RUNTIME_API_CALL(cudaSetDevice(0)); // Device 0 must RUNTIME_API_CALL(cudaDeviceEnablePeerAccess(partner, 0)); // access partner. RUNTIME_API_CALL(cudaSetDevice(partner)); // The partner device must access 0. RUNTIME_API_CALL(cudaDeviceEnablePeerAccess(0, 0)); // Let non-zero device access 0. for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer1[i], bufferSize)); } // Prepare the copy, load up buffers on each device from the host. testGpuToGpu_part1(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); // What we want to time: Copy from device 0->1, then device 1->0. CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testGpuToGpu_part2(pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read value. // Disable peer access. RUNTIME_API_CALL(cudaSetDevice(0)); RUNTIME_API_CALL(cudaDeviceDisablePeerAccess(partner)); // Kill connection to device i. RUNTIME_API_CALL(cudaSetDevice(partner)); RUNTIME_API_CALL(cudaDeviceDisablePeerAccess(0)); // Kill access to device 0. // Now free the pointers on device 'partner' (never 0). for (i=0; i<Streams; i++) { RUNTIME_API_CALL(cudaFree((void **) pDevBuffer1[i])); } RUNTIME_API_CALL(cudaSetDevice(0)); // return to default pointer. } // end routine. //----------------------------------------------------------------------------- // Show help. //----------------------------------------------------------------------------- static void printUsage() { printf("usage: Demonstrate use of NVlink CUPTI APIs\n"); printf(" -help : display help message\n"); printf(" --cpu-to-gpu : Show results for data transfer between CPU and GPU \n"); printf(" --gpu-to-gpu : Show results for data transfer between two GPUs \n"); } // end routine. //----------------------------------------------------------------------------- // Interpret command line flags. //----------------------------------------------------------------------------- void parseCommandLineArgs(int argc, char *argv[]) { if(argc != 2) { printf("Invalid number of options\n"); exit(0); } if(strcmp(argv[1], "--cpu-to-gpu") == 0) { cpuToGpu = 1; } else if(strcmp(argv[1], "--gpu-to-gpu") == 0) { gpuToGpu = 1; } else if((strcmp(argv[1], "--help") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "-h") == 0)) { printUsage(); exit(0); } else { cpuToGpu = 1; } } // end routine. //----------------------------------------------------------------------------- // Add an entry to the eventsFound array. On entry we always have room. //----------------------------------------------------------------------------- void addEventsFound(char *eventName, long long value) { strncpy(eventsFound[eventsFoundCount].name, eventName, 127); // Copy up to 127 chars. eventsFound[eventsFoundCount].value = value; // Copy the value. if (++eventsFoundCount >= eventsFoundMax) { // bump count, if too much, make room. eventsFoundMax += eventsFoundAdd; // Add. eventsFound = (eventStore_t*) realloc(eventsFound, eventsFoundMax*sizeof(eventStore_t)); // Make new room. memset(eventsFound+(eventsFoundMax-eventsFoundAdd), 0, eventsFoundAdd*sizeof(eventStore_t)); // zero it. } } // end routine. //----------------------------------------------------------------------------- // Main program. //----------------------------------------------------------------------------- int main(int argc, char *argv[]) { int device, deviceCount = 0, i = 0; size_t freeMemory = 0, totalMemory = 0; char str[64]; eventsFoundMax = eventsFoundAdd; // space allocated. eventsFound = (eventStore_t*) calloc(eventsFoundMax, sizeof(eventStore_t)); // make some space. cudaDeviceProp prop[MAX_DEVICES]; // Parse command line arguments parseCommandLineArgs(argc, argv); DRIVER_API_CALL(cuInit(0)); RUNTIME_API_CALL(cudaGetDeviceCount(&deviceCount)); printf("There are %d devices.\n", deviceCount); if(deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } Streams = 1; // Always use at least ONE stream. for(device = 0; device < deviceCount; device++) { RUNTIME_API_CALL(cudaGetDeviceProperties(&prop[device], device)); printf("CUDA Device %d Name: %s", i, prop[i].name); printf(", AsyncEngineCount=%i", prop[i].asyncEngineCount); printf(", MultiProcessors=%i", prop[i].multiProcessorCount); printf(", MaxThreadsPerMP=%i", prop[i].maxThreadsPerMultiProcessor); printf("\n"); if (prop[i].asyncEngineCount > Streams) { // If a new high, Streams = prop[i].asyncEngineCount; // Always use the maximum. } } printf("Streams to use: %i (= max Copy Engines).\n", Streams); // allocate space deviceEvents= (int*) calloc(deviceCount, sizeof(int)); pDevBuffer0 = (CUdeviceptr*) calloc(Streams, sizeof(CUdeviceptr)); pDevBuffer1 = (CUdeviceptr*) calloc(Streams, sizeof(CUdeviceptr)); pHostBuffer = (float **) calloc(Streams, sizeof(float*)); cudaStreams = (cudaStream_t*) calloc(Streams, sizeof(cudaStream_t)); // Set memcpy size based on available device memory RUNTIME_API_CALL(cudaMemGetInfo(&freeMemory, &totalMemory)); printf("Total Device Memory available : "); calculateSize(str, (uint64_t) totalMemory); printf("%s\n", str); bufferSize = MAX_SIZE < (freeMemory / 4) ? MAX_SIZE : (freeMemory / 4); bufferSize = bufferSize/2; printf("Memcpy size is set to %llu B (%llu MB)\n", (unsigned long long) bufferSize, (unsigned long long) bufferSize / (1024 * 1024)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaStreamCreate(&cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Nvlink-topology Records are generated even before cudaMemcpy API is called. CUPTI_CALL(cuptiActivityFlushAll(0)); // fprintf(stderr, "Setup PAPI counters internally (PAPI)\n"); int EventSet = PAPI_NULL; int eventCount; int retval; int k, m, cid=-1; /* PAPI Initialization */ retval = PAPI_library_init(PAPI_VER_CURRENT); if(retval != PAPI_VER_CURRENT) { fprintf(stderr, "PAPI_library_init failed, ret=%i [%s]\n", retval, PAPI_strerror(retval)); FreeGlobals(); exit(-1); } printf("PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION)); // Find cuda component index. k = PAPI_num_components(); // get number of components. for (i=0; i<k && cid<0; i++) { // while not found, PAPI_component_info_t *aComponent = (PAPI_component_info_t*) PAPI_get_component_info(i); // get the component info. if (aComponent == NULL) { // if we failed, fprintf(stderr, "PAPI_get_component_info(%i) failed, " "returned NULL. %i components reported.\n", i,k); FreeGlobals(); exit(-1); } if (strcmp("cuda", aComponent->name) == 0) cid=i; // If we found our match, record it. } // end search components. if (cid < 0) { // if no PCP component found, fprintf(stderr, "Failed to find cuda component among %i " "reported components.\n", k); FreeGlobals(); PAPI_shutdown(); exit(-1); } printf("Found CUDA Component at id %d\n", cid); // Add events at a GPU specific level ... eg cuda:::metric:nvlink_total_data_transmitted:device=0 // Just profile devices to match the CUPTI example eventCount = 0; int eventsRead=0; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer0[i], bufferSize)); pHostBuffer[i] = (float *) malloc(bufferSize); MEMORY_ALLOCATION_CALL(pHostBuffer[i]); } // Begin enumeration of all events. if (cpuToGpu) printf("Experiment timing memory copy from host to GPU.\n"); if (gpuToGpu) printf("Experiment timing memory copy between GPU 0 and each other GPU.\n"); printf("Events with numeric values were read; if they are zero, they may not \n" "be operational, or the exercises performed by this code do not affect \n" "them. We report all 'nvlink' events presented by the cuda component. \n" "\n" "---------------------------Event Name---------------------------:---Value---\n"); PAPI_event_info_t info; // To get event enumeration info. m=PAPI_NATIVE_MASK; // Get the PAPI NATIVE mask. CALL_PAPI_OK(PAPI_enum_cmp_event(&m,PAPI_ENUM_FIRST,cid)); // Begin enumeration of ALL papi counters. do { // Enumerate all events. memset(&info,0,sizeof(PAPI_event_info_t)); // Clear event info. k=m; // Make a copy of current code. // enumerate sub-events, with masks. For this test, we do not // have any! But we do this to test our enumeration works as // expected. First time through is guaranteed, of course. do { // enumerate masked events. CALL_PAPI_OK(PAPI_get_event_info(k,&info)); // get name of k symbol. if (strstr(info.symbol, "nvlink") == NULL) continue; // skip if not an nvlink event. char *devstr = strstr(info.symbol, "device="); // look for device enumerator. if (devstr == NULL) continue; // Skip if no device present. device=atoi(devstr+7); // Get the device id, for info. // fprintf(stderr, "Found nvlink symbol '%s', device=%i.\n", info.symbol , device); if (device < 0 || device >= deviceCount) continue; // skip any not in range. deviceEvents[device]++; // Add to count of events on this device. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); retval = PAPI_add_named_event(EventSet, info.symbol); // Don't want to fail program if name not found... if(retval == PAPI_OK) { eventCount++; // Bump number of events we could test. } else { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. continue; } long long value=-1; // The only value we read. // ===== Allocate Memory ===================================== if(cpuToGpu) { conductCpuToGpu(EventSet, device, &value); // Just one value for now. } else if(gpuToGpu) { conductGpuToGpu(EventSet, device, &value); // Just one value for now. } addEventsFound(info.symbol, value); // Add to events we were able to read. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. // report each event counted. if (value >= 0) { // If not still -1, eventsRead++; // .. count and report. calculateSize(str, value); if (value == 0) { printf("%-64s: %9s (not exercised by current test code.)\n", info.symbol, str); } else { printf("%-64s: %9s\n", info.symbol, str); } } else { printf("%-64s: Failed to read.\n", info.symbol); } } while(PAPI_enum_cmp_event(&k,PAPI_NTV_ENUM_UMASKS,cid)==PAPI_OK); // Get next umask entry (bits different) (should return PAPI_NOEVNT). } while(PAPI_enum_cmp_event(&m,PAPI_ENUM_EVENTS,cid)==PAPI_OK); // Get next event code. if (eventCount < 1) { // If we failed on all of them, fprintf(stderr, "Unable to add any NVLINK events; they are not present in the component.\n"); fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } if (eventsRead < 1) { // If failed to read any, printf("\nFailed to read any nvlink events.\n"); // report a failure. fprintf(stderr, "Unable to proceed with this test.\n"); FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(-1); // exit no matter what. } printf("\nTotal nvlink events identified: %i.\n\n", eventsFoundCount); if (eventsFoundCount < 2) { // If failed to get counts on any, printf("Insufficient events are exercised by the current test code to perform pair testing.\n"); // report a failure. FreeGlobals(); PAPI_shutdown(); // Returns no value. exit(0); // exit no matter what. } for (i=0; i<deviceCount; i++) { printf("Device %i has %i events. %i potential pairings per device.\n", i, deviceEvents[i], deviceEvents[i]*(deviceEvents[i]-1)/2); } // Begin pair testing. We consider every possible pairing of events // that, tested alone, returned a value greater than zero. int mainEvent, pairEvent, mainDevice, pairDevice; long long saveValues[2]; long long readValues[2]; int goodOnSame=0, failOnDiff=0, badSameCombo=0, pairProblems=0; // Some counters. int type; // 0 succeed on same device, 1 = fail across devices. for (type=0; type<2; type++) { if (type == 0) { printf("List of Pairings on SAME device:\n"); printf("* means value changed by more than 10%% when paired (vs measured singly, above).\n"); printf("^ means a pair was rejected as an invalid combo.\n"); } else { printf("List of Failed Pairings on DIFFERENT devices:\n"); } for (mainEvent = 0; mainEvent<eventsFoundCount-1; mainEvent++) { // Through all but one events. char *devstr = strstr(eventsFound[mainEvent].name, "device="); // look for device enumerator. mainDevice=atoi(devstr+7); // Get the device id. for (pairEvent = mainEvent+1; pairEvent<eventsFoundCount; pairEvent++) { // Through all possible pairs, devstr = strstr(eventsFound[pairEvent].name, "device="); // look for device enumerator. pairDevice=atoi(devstr+7); // Get the device id. if (type == 0 && mainDevice != pairDevice) continue; // Skip if we need same device. if (type == 1 && mainDevice == pairDevice) continue; // Skip if we need different devices. CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); CALL_PAPI_OK(PAPI_add_named_event(EventSet, eventsFound[mainEvent].name)); // Here we must examine the return code. int ret = PAPI_add_named_event(EventSet, eventsFound[pairEvent].name); if (type == 0 && ret == PAPI_ECOMBO) { // A bad combination when looking for valid combos. printf("%c %64s + %-64s [Invalid Combo]\n", '^', // report it. eventsFound[mainEvent].name, eventsFound[pairEvent].name); badSameCombo++; // .. count an explicit rejection. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. done with event set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (type == 1 && ret == PAPI_ECOMBO) { // A bad combination when we are looking for that. printf("%64s + %-64s BAD COMBINATION ACROSS DEVICES.\n", eventsFound[mainEvent].name, eventsFound[pairEvent].name); // report it. failOnDiff++; // count the bad combos. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. don't need to go further. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } if (ret != PAPI_OK) { // If it failed for some other reason, fprintf(stderr, "%s:%d Attempt to add event '%s' to set " "with event '%s' produced an unexpected error: " "[%s]. Ignoring this pair.\n", __FILE__, __LINE__, eventsFound[pairEvent], eventsFound[mainEvent], PAPI_strerror(ret)); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. didn't work. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair. In type 1, we just skip it, // because we presume a single event on a device isn't changed // by any event on another device. if (type == 1) { CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // .. worked fine; don't measure it. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // .. continue; // .. try the next combo. } // We were able to add the pair, in type 0, get a measurement. readValues[0]= -1; readValues[1] = -1; if(cpuToGpu) { conductCpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } else if(gpuToGpu) { conductGpuToGpu(EventSet, mainDevice, readValues); // conduct for main. saveValues[0] = readValues[0]; saveValues[1] = readValues[1]; } goodOnSame++; // Was accepted by cuda as a valid pairing. // For the checks, we add 2 (so -1 becomes +1) to avoid any // divide by zeros. It won't make a significant difference // in the ratios. (none if readings are the same). double mainSingle = (2.0 + eventsFound[mainEvent].value); // Get value when read alone. double pairSingle = (2.0 + eventsFound[pairEvent].value); // .. double mainCheck = mainSingle/(2.0 + saveValues[0]); // Get ratio when paired. double pairCheck = pairSingle/(2.0 + saveValues[1]); // .. char flag=' ', flag1=' ', flag2=' '; // Presume all okay. if (mainCheck < 0.90 || mainCheck > 1.10) flag1='*'; // Flag as significantly different for main. if (pairCheck < 0.90 || pairCheck > 1.10) flag2='*'; // Flag as significantly different for pair. if (flag1 == '*' || flag2 == '*') { pairProblems++; // Remember number of problems. flag = '*'; // set global flag. } printf("%c %64s + %-64s [", flag, eventsFound[mainEvent].name, eventsFound[pairEvent].name); calculateSize(str, saveValues[0]); // Do some pretty formatting, printf("%c%9s,", flag1, str); calculateSize(str, saveValues[1]); printf("%c%9s]\n", flag2, str); CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // destroy the event set. } } // end loop on all events. if (type == 0) { // For good pairings on same devices, if (goodOnSame == 0) { printf("NO valid pairings of above events if both on the SAME device.\n"); } else { printf("%i valid pairings of above events if both on the SAME device.\n", goodOnSame); } printf("%i unique pairings on SAME device were rejected as bad combinations.\n", badSameCombo); if (pairProblems > 0) { printf("%i pairings resulted in a change of one or both event values > 10%%.\n", pairProblems); } else { printf("No significant change in event values read for any pairings.\n"); } } else { // Must be reporting bad pairings across devies. if (failOnDiff == 0) printf("NO failed pairings of above events if each on a DIFFERENT device.\n"); else printf("%i failed pairings of above events with each on a DIFFERENT device.\n", failOnDiff); } } // end loop on type. PAPI_shutdown(); // Returns no value. return(0); // exit OK. } // end MAIN.
c5bf1adbae8acf8f7d9d7d1d0c41c3236e9c5c78.hip
// !!! This is a file automatically generated by hipify!!! #include <iomanip> #include <sstream> #include <chrono> #include <hip/hip_runtime.h> #include <mpi.h> #include "tensor.h" #include "nccl_helper.h" int main(int argc, char *argv[]) { int size, rank; int numRepeats = 1000; if (argc > 1) numRepeats = atoi(argv[1]); //Initialize MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Barrier(MPI_COMM_WORLD); // Set cuda devices if (hipSetDevice(rank) != hipSuccess) { std::stringstream ss; ss << "Failed to set cuda device. Rank: " << rank; throw std::runtime_error(ss.str()); } //NCCL communicator ncclComm_t comm; ncclUniqueId commId; // NCCL init and set up communicator clique CHECK_NCCL_ERROR(ncclGetUniqueId(&commId), rank); MPI_Bcast(&commId, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); CHECK_NCCL_ERROR(ncclCommInitRank(&comm, size, commId, rank), rank); // CUDA stream creation hipStream_t stream; hipStreamCreateWithFlags(&stream, hipStreamNonBlocking); std::vector<int> sizes = {100000, 3097600, 4194304, 6553600, 16777217}; if (rank == 0) { std::cout << " NCCL MPI AllReduce " << std::endl; std::cout << " Num Ranks: " << size << std::endl; std::cout << std::setfill('-') << std::setw(100) << "-" << std::endl; std::cout << std::setfill(' '); std::cout << " # of floats bytes transferred Avg Time (msec) Max Time (msec)" << std::endl; std::cout << std::setfill('-') << std::setw(100) << "-" << std::endl; std::cout << std::setfill(' '); } for (auto &t_size: sizes) { auto data = fill({t_size*size}, rank); hipStreamSynchronize(stream); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < numRepeats; i++) CHECK_NCCL_ERROR(ncclAllReduce((void *) data.begin(), (void *) (data.begin() + t_size), t_size, ncclFloat, ncclSum, comm, stream), rank); hipStreamSynchronize(stream); auto end = std::chrono::steady_clock::now(); float time = static_cast<float>(std::chrono::duration<double, std::milli>(end - start).count() / numRepeats); float max_time, avg_time; MPI_Reduce(&time, &max_time, 1, MPI_FLOAT, MPI_MAX, 0, MPI_COMM_WORLD); MPI_Reduce(&time, &avg_time, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { avg_time = avg_time/size; std::cout << std::setw(15) << t_size << std::setw(15) << t_size * 4 << std::setw(20) << avg_time << std::setw(20) << max_time << std::endl; } } ncclCommDestroy(comm); MPI_Finalize(); }
c5bf1adbae8acf8f7d9d7d1d0c41c3236e9c5c78.cu
#include <iomanip> #include <sstream> #include <chrono> #include <cuda.h> #include <mpi.h> #include "tensor.h" #include "nccl_helper.h" int main(int argc, char *argv[]) { int size, rank; int numRepeats = 1000; if (argc > 1) numRepeats = atoi(argv[1]); //Initialize MPI MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Barrier(MPI_COMM_WORLD); // Set cuda devices if (cudaSetDevice(rank) != cudaSuccess) { std::stringstream ss; ss << "Failed to set cuda device. Rank: " << rank; throw std::runtime_error(ss.str()); } //NCCL communicator ncclComm_t comm; ncclUniqueId commId; // NCCL init and set up communicator clique CHECK_NCCL_ERROR(ncclGetUniqueId(&commId), rank); MPI_Bcast(&commId, NCCL_UNIQUE_ID_BYTES, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); CHECK_NCCL_ERROR(ncclCommInitRank(&comm, size, commId, rank), rank); // CUDA stream creation cudaStream_t stream; cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking); std::vector<int> sizes = {100000, 3097600, 4194304, 6553600, 16777217}; if (rank == 0) { std::cout << " NCCL MPI AllReduce " << std::endl; std::cout << " Num Ranks: " << size << std::endl; std::cout << std::setfill('-') << std::setw(100) << "-" << std::endl; std::cout << std::setfill(' '); std::cout << " # of floats bytes transferred Avg Time (msec) Max Time (msec)" << std::endl; std::cout << std::setfill('-') << std::setw(100) << "-" << std::endl; std::cout << std::setfill(' '); } for (auto &t_size: sizes) { auto data = fill({t_size*size}, rank); cudaStreamSynchronize(stream); auto start = std::chrono::steady_clock::now(); for (int i = 0; i < numRepeats; i++) CHECK_NCCL_ERROR(ncclAllReduce((void *) data.begin(), (void *) (data.begin() + t_size), t_size, ncclFloat, ncclSum, comm, stream), rank); cudaStreamSynchronize(stream); auto end = std::chrono::steady_clock::now(); float time = static_cast<float>(std::chrono::duration<double, std::milli>(end - start).count() / numRepeats); float max_time, avg_time; MPI_Reduce(&time, &max_time, 1, MPI_FLOAT, MPI_MAX, 0, MPI_COMM_WORLD); MPI_Reduce(&time, &avg_time, 1, MPI_FLOAT, MPI_SUM, 0, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); if (rank == 0) { avg_time = avg_time/size; std::cout << std::setw(15) << t_size << std::setw(15) << t_size * 4 << std::setw(20) << avg_time << std::setw(20) << max_time << std::endl; } } ncclCommDestroy(comm); MPI_Finalize(); }
28be5aa85ee582b0833927f896e4c9ed8ecd16fc.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #define N 1024 __global__ void gpu_sort(int *d_a, int *d_b){ __shared__ int tmp[512]; int tid = threadIdx.x; int ttid = threadIdx.x + blockIdx.x * blockDim.x; int val = d_a[ttid]; int count =0; for(int i=tid;i<N;i+=512){ tmp[tid] = d_a[i]; __syncthreads(); for(int j=0;j<512;j++){ if(val>tmp[j]){ count++; } } __syncthreads(); } d_b[count] = val; } int main(){ int sizeByte = sizeof(int)*N; int *h_a = (int*) malloc(sizeByte); int *h_b = (int*) malloc(sizeByte); int *h_a_cpu = (int*) malloc(sizeByte); int *h_b_cpu = (int*) malloc(sizeByte); int *d_a, *d_b; hipMalloc(&d_a, sizeByte); hipMalloc(&d_b, sizeByte); for(int i=0;i<N;i++){ h_a[i] = rand(); h_a_cpu[i] = h_a[i]; } hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipMemcpy(d_a, h_a, sizeByte, hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpu_sort), dim3(2), dim3(512), 0, 0, d_a,d_b); hipMemcpy(h_b, d_b, sizeByte, hipMemcpyDeviceToHost); hipEventRecord(stop,0); hipEventSynchronize(stop); float time = 0; hipEventElapsedTime(&time, start, stop); printf("Time consumption on GPU: %lf\n", time); for(int i=0;i<N-1;i++){ if(h_b[i]>h_b[i+1]){ printf("Error at index %d\n GPU[%d] = %d\n", i,i,h_b[i]); break; } } hipEvent_t start_cpu,stop_cpu; hipEventCreate(&start_cpu); hipEventCreate(&stop_cpu); hipEventRecord(start_cpu,0); //sort on cpu for(int i=N;i>0;i--){ for(int j=0;j<i-1;j++){ if(h_a_cpu[j]>h_a_cpu[j+1]){ int tmp = h_a_cpu[j]; h_a_cpu[j] = h_a_cpu[j + 1]; h_a_cpu[j+1] = tmp; } } } hipEventRecord(stop_cpu,0); hipEventSynchronize(stop_cpu); float time_cpu = 0; hipEventElapsedTime(&time_cpu, start_cpu, stop_cpu); printf("Time consumption on CPU: %lf\n", time_cpu); return 0; }
28be5aa85ee582b0833927f896e4c9ed8ecd16fc.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #define N 1024 __global__ void gpu_sort(int *d_a, int *d_b){ __shared__ int tmp[512]; int tid = threadIdx.x; int ttid = threadIdx.x + blockIdx.x * blockDim.x; int val = d_a[ttid]; int count =0; for(int i=tid;i<N;i+=512){ tmp[tid] = d_a[i]; __syncthreads(); for(int j=0;j<512;j++){ if(val>tmp[j]){ count++; } } __syncthreads(); } d_b[count] = val; } int main(){ int sizeByte = sizeof(int)*N; int *h_a = (int*) malloc(sizeByte); int *h_b = (int*) malloc(sizeByte); int *h_a_cpu = (int*) malloc(sizeByte); int *h_b_cpu = (int*) malloc(sizeByte); int *d_a, *d_b; cudaMalloc(&d_a, sizeByte); cudaMalloc(&d_b, sizeByte); for(int i=0;i<N;i++){ h_a[i] = rand(); h_a_cpu[i] = h_a[i]; } cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); cudaMemcpy(d_a, h_a, sizeByte, cudaMemcpyHostToDevice); gpu_sort<<<2, 512>>>(d_a,d_b); cudaMemcpy(h_b, d_b, sizeByte, cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float time = 0; cudaEventElapsedTime(&time, start, stop); printf("Time consumption on GPU: %lf\n", time); for(int i=0;i<N-1;i++){ if(h_b[i]>h_b[i+1]){ printf("Error at index %d\n GPU[%d] = %d\n", i,i,h_b[i]); break; } } cudaEvent_t start_cpu,stop_cpu; cudaEventCreate(&start_cpu); cudaEventCreate(&stop_cpu); cudaEventRecord(start_cpu,0); //sort on cpu for(int i=N;i>0;i--){ for(int j=0;j<i-1;j++){ if(h_a_cpu[j]>h_a_cpu[j+1]){ int tmp = h_a_cpu[j]; h_a_cpu[j] = h_a_cpu[j + 1]; h_a_cpu[j+1] = tmp; } } } cudaEventRecord(stop_cpu,0); cudaEventSynchronize(stop_cpu); float time_cpu = 0; cudaEventElapsedTime(&time_cpu, start_cpu, stop_cpu); printf("Time consumption on CPU: %lf\n", time_cpu); return 0; }
b765f35588f9acb9eece030a6be9752fb8ada8e6.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cassert> #include <hip/hip_runtime.h> #include "transpose_device.cuh" /* * TODO for all kernels (including naive): * Leave a comment above all non-coalesced memory accesses and bank conflicts. * Make it clear if the suboptimal access is a read or write. If an access is * non-coalesced, specify how many cache lines it touches, and if an access * causes bank conflicts, say if its a 2-way bank conflict, 4-way bank * conflict, etc. * * Comment all of your kernels. */ /* * Each block of the naive transpose handles a 64x64 block of the input matrix, * with each thread of the block handling a 1x4 section and each warp handling * a 32x4 section. * * If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have * a block matrix of shape (2 blocks, 16 blocks). * Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1), * warp n handles (n % 2, n / 2). * * This kernel is launched with block shape (64, 16) and grid shape * (n / 64, n / 64) where n is the size of the square matrix. * * You may notice that we suggested in lecture that threads should be able to * handle an arbitrary number of elements and that this kernel handles exactly * 4 elements per thread. This is OK here because to overwhelm this kernel * it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of * memory (well beyond what I expect GPUs to have in the next few years). */ __global__ void naiveTransposeKernel(const float *input, float *output, int n) { // TODO: do not modify code, just comment on suboptimal accesses const int i = threadIdx.x + 64 * blockIdx.x; int j = 4 * threadIdx.y + 64 * blockIdx.y; const int end_j = j + 4; for (; j < end_j; j++) /* The above code can be decomposed into two instructions. * * For the first instruction, we load the input data from memory and this * access is coalesced because for threads in the same warp, they share * the same threadIdx.x, therefore the access is a continuous 128 bytes. * * For the second instruction, we store the data to the location specified by * output. Depending on the value of n, this instruction is generally not coalesced. * If n >= 32, it touches 32 cache lines. Otherwise it touches n cache lines. */ output[j + n * i] = input[i + n * j]; } __global__ void shmemTransposeKernel(const float *input, float *output, int n) { // TODO: Modify transpose kernel to use shared memory. All global memory // reads and writes should be coalesced. Minimize the number of shared // memory bank conflicts (0 bank conflicts should be possible using // padding). Again, comment on all sub-optimal accesses. // __shared__ float data[???]; __shared__ float data[65 * 64]; int i = threadIdx.x + 64 * blockIdx.x; int j = 4 * threadIdx.y + 64 * blockIdx.y; const int shared_i = threadIdx.x; int shared_j = 4 * threadIdx.y; int end_j = j + 4; for (; j < end_j; j++, shared_j++) { data[shared_j + 65 * shared_i] = input[i + n * j]; } __syncthreads(); i = threadIdx.x + 64 * blockIdx.y; j = 4 * threadIdx.y + 64 * blockIdx.x; shared_j = 4 * threadIdx.y; end_j = j + 4; for (; j < end_j; j++, shared_j++) { output[i + n * j] = data[shared_i + 65 * shared_j]; } } __global__ void optimalTransposeKernel(const float *input, float *output, int n) { // TODO: This should be based off of your shmemTransposeKernel. // Use any optimization tricks discussed so far to improve performance. // Consider ILP and loop unrolling. __shared__ float data[65 * 64]; int i = threadIdx.x + 64 * blockIdx.x; int j = 4 * threadIdx.y + 64 * blockIdx.y; const int shared_i = threadIdx.x; int shared_j = 4 * threadIdx.y; data[shared_j + 65 * shared_i] = input[i + n * j]; data[shared_j + 65 * shared_i + 1] = input[i + n * j + n]; data[shared_j + 65 * shared_i + 2] = input[i + n * j + n + n]; data[shared_j + 65 * shared_i + 3] = input[i + n * j + n + n + n]; __syncthreads(); i = threadIdx.x + 64 * blockIdx.y; j = 4 * threadIdx.y + 64 * blockIdx.x; shared_j = 4 * threadIdx.y; output[i + n * j] = data[shared_i + 65 * shared_j]; output[i + n * j + n] = data[shared_i + 65 * shared_j + 65]; output[i + n * j + n + n] = data[shared_i + 65 * shared_j + 65 + 65]; output[i + n * j + n + n + n] = data[shared_i + 65 * shared_j + 65 + 65 + 65]; } void cudaTranspose( const float *d_input, float *d_output, int n, TransposeImplementation type) { if (type == NAIVE) { dim3 blockSize(64, 16); dim3 gridSize(n / 64, n / 64); hipLaunchKernelGGL(( naiveTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n); } else if (type == SHMEM) { dim3 blockSize(64, 16); dim3 gridSize(n / 64, n / 64); hipLaunchKernelGGL(( shmemTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n); } else if (type == OPTIMAL) { dim3 blockSize(64, 16); dim3 gridSize(n / 64, n / 64); hipLaunchKernelGGL(( optimalTransposeKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_input, d_output, n); } // Unknown type else assert(false); }
b765f35588f9acb9eece030a6be9752fb8ada8e6.cu
#include <cstdio> #include <cassert> #include <cuda_runtime.h> #include "transpose_device.cuh" /* * TODO for all kernels (including naive): * Leave a comment above all non-coalesced memory accesses and bank conflicts. * Make it clear if the suboptimal access is a read or write. If an access is * non-coalesced, specify how many cache lines it touches, and if an access * causes bank conflicts, say if its a 2-way bank conflict, 4-way bank * conflict, etc. * * Comment all of your kernels. */ /* * Each block of the naive transpose handles a 64x64 block of the input matrix, * with each thread of the block handling a 1x4 section and each warp handling * a 32x4 section. * * If we split the 64x64 matrix into 32 blocks of shape (32, 4), then we have * a block matrix of shape (2 blocks, 16 blocks). * Warp 0 handles block (0, 0), warp 1 handles (1, 0), warp 2 handles (0, 1), * warp n handles (n % 2, n / 2). * * This kernel is launched with block shape (64, 16) and grid shape * (n / 64, n / 64) where n is the size of the square matrix. * * You may notice that we suggested in lecture that threads should be able to * handle an arbitrary number of elements and that this kernel handles exactly * 4 elements per thread. This is OK here because to overwhelm this kernel * it would take a 4194304 x 4194304 matrix, which would take ~17.6TB of * memory (well beyond what I expect GPUs to have in the next few years). */ __global__ void naiveTransposeKernel(const float *input, float *output, int n) { // TODO: do not modify code, just comment on suboptimal accesses const int i = threadIdx.x + 64 * blockIdx.x; int j = 4 * threadIdx.y + 64 * blockIdx.y; const int end_j = j + 4; for (; j < end_j; j++) /* The above code can be decomposed into two instructions. * * For the first instruction, we load the input data from memory and this * access is coalesced because for threads in the same warp, they share * the same threadIdx.x, therefore the access is a continuous 128 bytes. * * For the second instruction, we store the data to the location specified by * output. Depending on the value of n, this instruction is generally not coalesced. * If n >= 32, it touches 32 cache lines. Otherwise it touches n cache lines. */ output[j + n * i] = input[i + n * j]; } __global__ void shmemTransposeKernel(const float *input, float *output, int n) { // TODO: Modify transpose kernel to use shared memory. All global memory // reads and writes should be coalesced. Minimize the number of shared // memory bank conflicts (0 bank conflicts should be possible using // padding). Again, comment on all sub-optimal accesses. // __shared__ float data[???]; __shared__ float data[65 * 64]; int i = threadIdx.x + 64 * blockIdx.x; int j = 4 * threadIdx.y + 64 * blockIdx.y; const int shared_i = threadIdx.x; int shared_j = 4 * threadIdx.y; int end_j = j + 4; for (; j < end_j; j++, shared_j++) { data[shared_j + 65 * shared_i] = input[i + n * j]; } __syncthreads(); i = threadIdx.x + 64 * blockIdx.y; j = 4 * threadIdx.y + 64 * blockIdx.x; shared_j = 4 * threadIdx.y; end_j = j + 4; for (; j < end_j; j++, shared_j++) { output[i + n * j] = data[shared_i + 65 * shared_j]; } } __global__ void optimalTransposeKernel(const float *input, float *output, int n) { // TODO: This should be based off of your shmemTransposeKernel. // Use any optimization tricks discussed so far to improve performance. // Consider ILP and loop unrolling. __shared__ float data[65 * 64]; int i = threadIdx.x + 64 * blockIdx.x; int j = 4 * threadIdx.y + 64 * blockIdx.y; const int shared_i = threadIdx.x; int shared_j = 4 * threadIdx.y; data[shared_j + 65 * shared_i] = input[i + n * j]; data[shared_j + 65 * shared_i + 1] = input[i + n * j + n]; data[shared_j + 65 * shared_i + 2] = input[i + n * j + n + n]; data[shared_j + 65 * shared_i + 3] = input[i + n * j + n + n + n]; __syncthreads(); i = threadIdx.x + 64 * blockIdx.y; j = 4 * threadIdx.y + 64 * blockIdx.x; shared_j = 4 * threadIdx.y; output[i + n * j] = data[shared_i + 65 * shared_j]; output[i + n * j + n] = data[shared_i + 65 * shared_j + 65]; output[i + n * j + n + n] = data[shared_i + 65 * shared_j + 65 + 65]; output[i + n * j + n + n + n] = data[shared_i + 65 * shared_j + 65 + 65 + 65]; } void cudaTranspose( const float *d_input, float *d_output, int n, TransposeImplementation type) { if (type == NAIVE) { dim3 blockSize(64, 16); dim3 gridSize(n / 64, n / 64); naiveTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n); } else if (type == SHMEM) { dim3 blockSize(64, 16); dim3 gridSize(n / 64, n / 64); shmemTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n); } else if (type == OPTIMAL) { dim3 blockSize(64, 16); dim3 gridSize(n / 64, n / 64); optimalTransposeKernel<<<gridSize, blockSize>>>(d_input, d_output, n); } // Unknown type else assert(false); }
aa2e92220226325f23b366861e866f22d499d6ee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <utility> #include <typeinfo> #include <quda_internal.h> #include <gauge_field.h> #include <ks_improved_force.h> #include <quda_matrix.h> #include <tune_quda.h> #include <index_helper.cuh> #include <gauge_field_order.h> #include <instantiate.h> #ifdef GPU_HISQ_FORCE namespace quda { namespace fermion_force { enum { XUP = 0, YUP = 1, ZUP = 2, TUP = 3, TDOWN = 4, ZDOWN = 5, YDOWN = 6, XDOWN = 7 }; enum HisqForceType { FORCE_ALL_LINK, FORCE_MIDDLE_LINK, FORCE_LEPAGE_MIDDLE_LINK, FORCE_SIDE_LINK, FORCE_SIDE_LINK_SHORT, FORCE_LONG_LINK, FORCE_COMPLETE, FORCE_ONE_LINK, FORCE_INVALID }; constexpr int opp_dir(int dir) { return 7-dir; } constexpr int goes_forward(int dir) { return dir<=3; } constexpr int goes_backward(int dir) { return dir>3; } constexpr int CoeffSign(int pos_dir, int odd_lattice) { return 2*((pos_dir + odd_lattice + 1) & 1) - 1; } constexpr int Sign(int parity) { return parity ? -1 : 1; } constexpr int posDir(int dir) { return (dir >= 4) ? 7-dir : dir; } template <int dir, typename Arg> constexpr void updateCoords(int x[], int shift, const Arg &arg) { x[dir] = (x[dir] + shift + arg.E[dir]) % arg.E[dir]; } template <typename Arg> constexpr void updateCoords(int x[], int dir, int shift, const Arg &arg) { switch (dir) { case 0: updateCoords<0>(x, shift, arg); break; case 1: updateCoords<1>(x, shift, arg); break; case 2: updateCoords<2>(x, shift, arg); break; case 3: updateCoords<3>(x, shift, arg); break; } } //struct for holding the fattening path coefficients template <typename real> struct PathCoefficients { const real one; const real three; const real five; const real seven; const real naik; const real lepage; PathCoefficients(const double *path_coeff_array) : one(path_coeff_array[0]), naik(path_coeff_array[1]), three(path_coeff_array[2]), five(path_coeff_array[3]), seven(path_coeff_array[4]), lepage(path_coeff_array[5]) { } }; template <typename real_, int nColor_, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct BaseForceArg { using real = real_; static constexpr int nColor = nColor_; typedef typename gauge_mapper<real,reconstruct>::type G; const G link; int threads; int X[4]; // regular grid dims int D[4]; // working set grid dims int E[4]; // extended grid dims int commDim[4]; int border[4]; int base_idx[4]; // the offset into the extended field int oddness_change; int mu; int sig; /** @param[in] link Gauge field @param[in] overlap Radius of additional redundant computation to do */ BaseForceArg(const GaugeField &link, int overlap) : link(link), threads(1), commDim{ comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3) } { for (int d=0; d<4; d++) { E[d] = link.X()[d]; border[d] = link.R()[d]; X[d] = E[d] - 2*border[d]; D[d] = comm_dim_partitioned(d) ? X[d]+overlap*2 : X[d]; base_idx[d] = comm_dim_partitioned(d) ? border[d]-overlap : 0; threads *= D[d]; } threads /= 2; oddness_change = (base_idx[0] + base_idx[1] + base_idx[2] + base_idx[3])&1; } }; template <typename real, int nColor, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct FatLinkArg : public BaseForceArg<real, nColor, reconstruct> { using BaseForceArg = BaseForceArg<real, nColor, reconstruct>; typedef typename gauge_mapper<real,QUDA_RECONSTRUCT_NO>::type F; F outA; F outB; F pMu; F p3; F qMu; const F oProd; const F qProd; const F qPrev; const real coeff; const real accumu_coeff; const bool p_mu; const bool q_mu; const bool q_prev; FatLinkArg(GaugeField &force, const GaugeField &oProd, const GaugeField &link, real coeff, HisqForceType type) : BaseForceArg(link, 0), outA(force), outB(force), pMu(oProd), p3(oProd), qMu(oProd), oProd(oProd), qProd(oProd), qPrev(oProd), coeff(coeff), accumu_coeff(0), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_ONE_LINK) errorQuda("This constructor is for FORCE_ONE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &pMu, GaugeField &P3, GaugeField &qMu, const GaugeField &oProd, const GaugeField &qPrev, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(pMu), p3(P3), qMu(qMu), oProd(oProd), qProd(oProd), qPrev(qPrev), coeff(coeff), accumu_coeff(0), p_mu(true), q_mu(true), q_prev(true) { if (type != FORCE_MIDDLE_LINK) errorQuda("This constructor is for FORCE_MIDDLE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &pMu, GaugeField &P3, GaugeField &qMu, const GaugeField &oProd, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(pMu), p3(P3), qMu(qMu), oProd(oProd), qProd(oProd), qPrev(qMu), coeff(coeff), accumu_coeff(0), p_mu(true), q_mu(true), q_prev(false) { if (type != FORCE_MIDDLE_LINK) errorQuda("This constructor is for FORCE_MIDDLE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &P3, const GaugeField &oProd, const GaugeField &qPrev, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(P3), p3(P3), qMu(qPrev), oProd(oProd), qProd(oProd), qPrev(qPrev), coeff(coeff), accumu_coeff(0), p_mu(false), q_mu(false), q_prev(true) { if (type != FORCE_LEPAGE_MIDDLE_LINK) errorQuda("This constructor is for FORCE_MIDDLE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &shortP, const GaugeField &P3, const GaugeField &qProd, const GaugeField &link, real coeff, real accumu_coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(shortP), pMu(P3), p3(P3), qMu(qProd), oProd(qProd), qProd(qProd), qPrev(qProd), coeff(coeff), accumu_coeff(accumu_coeff), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_SIDE_LINK) errorQuda("This constructor is for FORCE_SIDE_LINK or FORCE_ALL_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &P3, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(P3), p3(P3), qMu(P3), oProd(P3), qProd(P3), qPrev(P3), coeff(coeff), accumu_coeff(0.0), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_SIDE_LINK_SHORT) errorQuda("This constructor is for FORCE_SIDE_LINK_SHORT"); } FatLinkArg(GaugeField &newOprod, GaugeField &shortP, const GaugeField &oProd, const GaugeField &qPrev, const GaugeField &link, real coeff, real accumu_coeff, int overlap, HisqForceType type, bool dummy) : BaseForceArg(link, overlap), outA(newOprod), outB(shortP), oProd(oProd), qPrev(qPrev), pMu(shortP), p3(shortP), qMu(qPrev), qProd(qPrev), // dummy coeff(coeff), accumu_coeff(accumu_coeff), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_ALL_LINK) errorQuda("This constructor is for FORCE_ALL_LINK"); } }; template <typename Arg> __global__ void oneLinkTermKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int sig = blockIdx.z * blockDim.z + threadIdx.z; if (sig >= 4) return; int x[4]; getCoords(x, x_cb, arg.X, parity); #pragma unroll for (int d=0; d<4; d++) x[d] += arg.border[d]; int e_cb = linkIndex(x,arg.E); Link w = arg.oProd(sig, e_cb, parity); Link force = arg.outA(sig, e_cb, parity); force += arg.coeff * w; arg.outA(sig, e_cb, parity) = force; } /********************************allLinkKernel********************************************* * * In this function we need * READ * 3 LINKS: ad_link, ab_link, bc_link * 5 COLOR MATRIX: Qprev_at_D, oprod_at_C, newOprod_at_A(sig), newOprod_at_D/newOprod_at_A(mu), shortP_at_D * WRITE: * 3 COLOR MATRIX: newOprod_at_A(sig), newOprod_at_D/newOprod_at_A(mu), shortP_at_D, * * If sig is negative, then we don't need to read/write the color matrix newOprod_at_A(sig) * * Therefore the data traffic, in two-number pair (num_of_link, num_of_color_matrix) * * if (sig is positive): (3, 8) * else : (3, 6) * * This function is called 384 times, half positive sig, half negative sig * * Flop count, in two-number pair (matrix_multi, matrix_add) * if(sig is positive) (6,3) * else (4,2) * ************************************************************************************************/ template <int sig_positive, int mu_positive, typename Arg> __global__ void allLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.D, parity); for (int d=0; d<4; d++) x[d] += arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity^arg.oddness_change; auto mycoeff = CoeffSign(sig_positive,parity)*arg.coeff; int y[4] = {x[0], x[1], x[2], x[3]}; int mysig = posDir(arg.sig); updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_b = linkIndex(y,arg.E); int ab_link_nbr_idx = (sig_positive) ? e_cb : point_b; for (int d=0; d<4; d++) y[d] = x[d]; /* sig * A________B * mu | | * D | |C * * A is the current point (sid) * */ int mu = mu_positive ? arg.mu : opp_dir(arg.mu); int dir = mu_positive ? -1 : 1; updateCoords(y, mu, dir, arg); int point_d = linkIndex(y,arg.E); updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_c = linkIndex(y,arg.E); Link Uab = arg.link(posDir(arg.sig), ab_link_nbr_idx, sig_positive^(1-parity)); Link Uad = arg.link(mu, mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity); Link Ubc = arg.link(mu, mu_positive ? point_c : point_b, mu_positive ? parity : 1-parity); Link Ox = arg.qPrev(0, point_d, 1-parity); Link Oy = arg.oProd(0, point_c, parity); Link Oz = mu_positive ? conj(Ubc)*Oy : Ubc*Oy; if (sig_positive) { Link force = arg.outA(arg.sig, e_cb, parity); force += Sign(parity)*mycoeff*Oz*Ox* (mu_positive ? Uad : conj(Uad)); arg.outA(arg.sig, e_cb, parity) = force; Oy = Uab*Oz; } else { Oy = conj(Uab)*Oz; } Link force = arg.outA(mu, mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity); force += Sign(mu_positive ? 1-parity : parity)*mycoeff* (mu_positive ? Oy*Ox : conj(Ox)*conj(Oy)); arg.outA(mu, mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity) = force; Link shortP = arg.outB(0, point_d, 1-parity); shortP += arg.accumu_coeff* (mu_positive ? Uad : conj(Uad)) *Oy; arg.outB(0, point_d, 1-parity) = shortP; } /**************************middleLinkKernel***************************** * * * Generally we need * READ * 3 LINKS: ab_link, bc_link, ad_link * 3 COLOR MATRIX: newOprod_at_A, oprod_at_C, Qprod_at_D * WRITE * 4 COLOR MATRIX: newOprod_at_A, P3_at_A, Pmu_at_B, Qmu_at_A * * Three call variations: * 1. when Qprev == NULL: Qprod_at_D does not exist and is not read in * 2. full read/write * 3. when Pmu/Qmu == NULL, Pmu_at_B and Qmu_at_A are not written out * * In all three above case, if the direction sig is negative, newOprod_at_A is * not read in or written out. * * Therefore the data traffic, in two-number pair (num_of_link, num_of_color_matrix) * Call 1: (called 48 times, half positive sig, half negative sig) * if (sig is positive): (3, 6) * else : (3, 4) * Call 2: (called 192 time, half positive sig, half negative sig) * if (sig is positive): (3, 7) * else : (3, 5) * Call 3: (called 48 times, half positive sig, half negative sig) * if (sig is positive): (3, 5) * else : (3, 2) no need to loadQprod_at_D in this case * * note: oprod_at_C could actually be read in from D when it is the fresh outer product * and we call it oprod_at_C to simply naming. This does not affect our data traffic analysis * * Flop count, in two-number pair (matrix_multi, matrix_add) * call 1: if (sig is positive) (3, 1) * else (2, 0) * call 2: if (sig is positive) (4, 1) * else (3, 0) * call 3: if (sig is positive) (4, 1) * (Lepage) else (2, 0) * ****************************************************************************/ template <int sig_positive, int mu_positive, bool pMu, bool qMu, bool qPrev, typename Arg> __global__ void middleLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.D, parity); /* A________B * mu | | * D| |C * * A is the current point (sid) * */ for (int d=0; d<4; d++) x[d] += arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity ^ arg.oddness_change; int y[4] = {x[0], x[1], x[2], x[3]}; int mymu = posDir(arg.mu); updateCoords(y, mymu, (mu_positive ? -1 : 1), arg); int point_d = linkIndex(y, arg.E); int ad_link_nbr_idx = mu_positive ? point_d : e_cb; int mysig = posDir(arg.sig); updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_c = linkIndex(y, arg.E); for (int d=0; d<4; d++) y[d] = x[d]; updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_b = linkIndex(y, arg.E); int bc_link_nbr_idx = mu_positive ? point_c : point_b; int ab_link_nbr_idx = sig_positive ? e_cb : point_b; // load the link variable connecting a and b Link Uab = arg.link(mysig, ab_link_nbr_idx, sig_positive^(1-parity)); // load the link variable connecting b and c Link Ubc = arg.link(mymu, bc_link_nbr_idx, mu_positive^(1-parity)); Link Oy; if (!qPrev) { Oy = arg.oProd(posDir(arg.sig), sig_positive ? point_d : point_c, sig_positive^parity); if (!sig_positive) Oy = conj(Oy); } else { // QprevOdd != NULL Oy = arg.oProd(0, point_c, parity); } Link Ow = !mu_positive ? Ubc*Oy : conj(Ubc)*Oy; if (pMu) arg.pMu(0, point_b, 1-parity) = Ow; arg.p3(0, e_cb, parity) = sig_positive ? Uab*Ow : conj(Uab)*Ow; Link Uad = arg.link(mymu, ad_link_nbr_idx, mu_positive^parity); if (!mu_positive) Uad = conj(Uad); if (!qPrev) { if (sig_positive) Oy = Ow*Uad; if ( qMu ) arg.qMu(0, e_cb, parity) = Uad; } else { Link Ox; if ( qMu || sig_positive ) { Oy = arg.qPrev(0, point_d, 1-parity); Ox = Oy*Uad; } if ( qMu ) arg.qMu(0, e_cb, parity) = Ox; if (sig_positive) Oy = Ow*Ox; } if (sig_positive) { Link oprod = arg.outA(arg.sig, e_cb, parity); oprod += arg.coeff*Oy; arg.outA(arg.sig, e_cb, parity) = oprod; } } /***********************************sideLinkKernel*************************** * * In general we need * READ * 1 LINK: ad_link * 4 COLOR MATRIX: shortP_at_D, newOprod, P3_at_A, Qprod_at_D, * WRITE * 2 COLOR MATRIX: shortP_at_D, newOprod, * * Two call variations: * 1. full read/write * 2. when shortP == NULL && Qprod == NULL: * no need to read ad_link/shortP_at_D or write shortP_at_D * Qprod_at_D does not exit and is not read in * * * Therefore the data traffic, in two-number pair (num_of_links, num_of_color_matrix) * Call 1: (called 192 times) * (1, 6) * * Call 2: (called 48 times) * (0, 3) * * note: newOprod can be at point D or A, depending on if mu is postive or negative * * Flop count, in two-number pair (matrix_multi, matrix_add) * call 1: (2, 2) * call 2: (0, 1) * *********************************************************************************/ template <int mu_positive, typename Arg> __global__ void sideLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb ,arg.D, parity); for (int d=0; d<4; d++) x[d] = x[d] + arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity ^ arg.oddness_change; /* compute the side link contribution to the momentum * * sig * A________B * | | mu * D | |C * * A is the current point (x_cb) * */ int mymu = posDir(arg.mu); int y[4] = {x[0], x[1], x[2], x[3]}; updateCoords(y, mymu, (mu_positive ? -1 : 1), arg); int point_d = linkIndex(y,arg.E); Link Oy = arg.p3(0, e_cb, parity); { int ad_link_nbr_idx = mu_positive ? point_d : e_cb; Link Uad = arg.link(mymu, ad_link_nbr_idx, mu_positive^parity); Link Ow = mu_positive ? Uad*Oy : conj(Uad)*Oy; Link shortP = arg.outB(0, point_d, 1-parity); shortP += arg.accumu_coeff * Ow; arg.outB(0, point_d, 1-parity) = shortP; } { Link Ox = arg.qProd(0, point_d, 1-parity); Link Ow = mu_positive ? Oy*Ox : conj(Ox)*conj(Oy); auto mycoeff = CoeffSign(goes_forward(arg.sig), parity)*CoeffSign(goes_forward(arg.mu),parity)*arg.coeff; Link oprod = arg.outA(mu_positive ? arg.mu : opp_dir(arg.mu), mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity); oprod += mycoeff * Ow; arg.outA(mu_positive ? arg.mu : opp_dir(arg.mu), mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity) = oprod; } } // Flop count, in two-number pair (matrix_mult, matrix_add) // (0,1) template <int mu_positive, typename Arg> __global__ void sideLinkShortKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.D, parity); for (int d=0; d<4; d++) x[d] = x[d] + arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity ^ arg.oddness_change; /* compute the side link contribution to the momentum * * sig * A________B * | | mu * D | |C * * A is the current point (x_cb) * */ int mymu = posDir(arg.mu); int y[4] = {x[0], x[1], x[2], x[3]}; updateCoords(y, mymu, (mu_positive ? -1 : 1), arg); int point_d = mu_positive ? linkIndex(y,arg.E) : e_cb; int parity_ = mu_positive ? 1-parity : parity; auto mycoeff = CoeffSign(goes_forward(arg.sig),parity)*CoeffSign(goes_forward(arg.mu),parity)*arg.coeff; Link Oy = arg.p3(0, e_cb, parity); Link oprod = arg.outA(posDir(arg.mu), point_d, parity_); oprod += mu_positive ? mycoeff * Oy : mycoeff * conj(Oy); arg.outA(posDir(arg.mu), point_d, parity_) = oprod; } template <typename Arg> class FatLinkForce : public TunableVectorYZ { Arg &arg; const GaugeField &meta; const HisqForceType type; unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } public: FatLinkForce(Arg &arg, const GaugeField &meta, int sig, int mu, HisqForceType type) : TunableVectorYZ(2,type == FORCE_ONE_LINK ? 4 : 1), arg(arg), meta(meta), type(type) { arg.sig = sig; arg.mu = mu; } TuneKey tuneKey() const { std::stringstream aux; aux << meta.AuxString() << comm_dim_partitioned_string() << ",threads=" << arg.threads; if (type == FORCE_MIDDLE_LINK || type == FORCE_LEPAGE_MIDDLE_LINK) aux << ",sig=" << arg.sig << ",mu=" << arg.mu << ",pMu=" << arg.p_mu << ",q_muu=" << arg.q_mu << ",q_prev=" << arg.q_prev; else if (type != FORCE_ONE_LINK) aux << ",mu=" << arg.mu; // no sig dependence needed for side link switch (type) { case FORCE_ONE_LINK: aux << ",ONE_LINK"; break; case FORCE_ALL_LINK: aux << ",ALL_LINK"; break; case FORCE_MIDDLE_LINK: aux << ",MIDDLE_LINK"; break; case FORCE_LEPAGE_MIDDLE_LINK: aux << ",LEPAGE_MIDDLE_LINK"; break; case FORCE_SIDE_LINK: aux << ",SIDE_LINK"; break; case FORCE_SIDE_LINK_SHORT: aux << ",SIDE_LINK_SHORT"; break; default: errorQuda("Undefined force type %d", type); } return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); switch (type) { case FORCE_ONE_LINK: hipLaunchKernelGGL(( oneLinkTermKernel<Arg>) , dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case FORCE_ALL_LINK: if (goes_forward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( allLinkKernel<1,1,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) hipLaunchKernelGGL(( allLinkKernel<1,0,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( allLinkKernel<0,1,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else hipLaunchKernelGGL(( allLinkKernel<0,0,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case FORCE_MIDDLE_LINK: if (!arg.p_mu || !arg.q_mu) errorQuda("Expect p_mu=%d and q_mu=%d to both be true", arg.p_mu, arg.q_mu); if (arg.q_prev) { if (goes_forward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<1,1,true,true,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<1,0,true,true,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<0,1,true,true,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else hipLaunchKernelGGL(( middleLinkKernel<0,0,true,true,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); } else { if (goes_forward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<1,1,true,true,false,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<1,0,true,true,false,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<0,1,true,true,false,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else hipLaunchKernelGGL(( middleLinkKernel<0,0,true,true,false,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); } break; case FORCE_LEPAGE_MIDDLE_LINK: if (arg.p_mu || arg.q_mu || !arg.q_prev) errorQuda("Expect p_mu=%d and q_mu=%d to both be false and q_prev=%d true", arg.p_mu, arg.q_mu, arg.q_prev); if (goes_forward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<1,1,false,false,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<1,0,false,false,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) hipLaunchKernelGGL(( middleLinkKernel<0,1,false,false,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else hipLaunchKernelGGL(( middleLinkKernel<0,0,false,false,true,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case FORCE_SIDE_LINK: if (goes_forward(arg.mu))hipLaunchKernelGGL(( sideLinkKernel<1,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else hipLaunchKernelGGL(( sideLinkKernel<0,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case FORCE_SIDE_LINK_SHORT: if (goes_forward(arg.mu))hipLaunchKernelGGL(( sideLinkShortKernel<1,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); else hipLaunchKernelGGL(( sideLinkShortKernel<0,Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; default: errorQuda("Undefined force type %d", type); } } void preTune() { switch (type) { case FORCE_ONE_LINK: arg.outA.save(); break; case FORCE_ALL_LINK: arg.outA.save(); arg.outB.save(); break; case FORCE_MIDDLE_LINK: arg.pMu.save(); arg.qMu.save(); case FORCE_LEPAGE_MIDDLE_LINK: arg.outA.save(); arg.p3.save(); break; case FORCE_SIDE_LINK: arg.outB.save(); case FORCE_SIDE_LINK_SHORT: arg.outA.save(); break; default: errorQuda("Undefined force type %d", type); } } void postTune() { switch (type) { case FORCE_ONE_LINK: arg.outA.load(); break; case FORCE_ALL_LINK: arg.outA.load(); arg.outB.load(); break; case FORCE_MIDDLE_LINK: arg.pMu.load(); arg.qMu.load(); case FORCE_LEPAGE_MIDDLE_LINK: arg.outA.load(); arg.p3.load(); break; case FORCE_SIDE_LINK: arg.outB.load(); case FORCE_SIDE_LINK_SHORT: arg.outA.load(); break; default: errorQuda("Undefined force type %d", type); } } long long flops() const { switch (type) { case FORCE_ONE_LINK: return 2*4*arg.threads*36ll; case FORCE_ALL_LINK: return 2*arg.threads*(goes_forward(arg.sig) ? 1242ll : 828ll); case FORCE_MIDDLE_LINK: case FORCE_LEPAGE_MIDDLE_LINK: return 2*arg.threads*(2 * 198 + (!arg.q_prev && goes_forward(arg.sig) ? 198 : 0) + (arg.q_prev && (arg.q_mu || goes_forward(arg.sig) ) ? 198 : 0) + ((arg.q_prev && goes_forward(arg.sig) ) ? 198 : 0) + ( goes_forward(arg.sig) ? 216 : 0) ); case FORCE_SIDE_LINK: return 2*arg.threads*2*234; case FORCE_SIDE_LINK_SHORT: return 2*arg.threads*36; default: errorQuda("Undefined force type %d", type); } return 0; } long long bytes() const { switch (type) { case FORCE_ONE_LINK: return 2*4*arg.threads*( arg.oProd.Bytes() + 2*arg.outA.Bytes() ); case FORCE_ALL_LINK: return 2*arg.threads*( (goes_forward(arg.sig) ? 4 : 2)*arg.outA.Bytes() + 3*arg.link.Bytes() + arg.oProd.Bytes() + arg.qPrev.Bytes() + 2*arg.outB.Bytes()); case FORCE_MIDDLE_LINK: case FORCE_LEPAGE_MIDDLE_LINK: return 2*arg.threads*( ( goes_forward(arg.sig) ? 2*arg.outA.Bytes() : 0 ) + (arg.p_mu ? arg.pMu.Bytes() : 0) + (arg.q_mu ? arg.qMu.Bytes() : 0) + ( ( goes_forward(arg.sig) || arg.q_mu ) ? arg.qPrev.Bytes() : 0) + arg.p3.Bytes() + 3*arg.link.Bytes() + arg.oProd.Bytes() ); case FORCE_SIDE_LINK: return 2*arg.threads*( 2*arg.outA.Bytes() + 2*arg.outB.Bytes() + arg.p3.Bytes() + arg.link.Bytes() + arg.qProd.Bytes() ); case FORCE_SIDE_LINK_SHORT: return 2*arg.threads*( 2*arg.outA.Bytes() + arg.p3.Bytes() ); default: errorQuda("Undefined force type %d", type); } return 0; } }; template <typename real, int nColor, QudaReconstructType recon> struct HisqStaplesForce { HisqStaplesForce(GaugeField &Pmu, GaugeField &P3, GaugeField &P5, GaugeField &Pnumu, GaugeField &Qmu, GaugeField &Qnumu, GaugeField &newOprod, const GaugeField &oprod, const GaugeField &link, const double *path_coeff_array) { PathCoefficients<real> act_path_coeff(path_coeff_array); real OneLink = act_path_coeff.one; real ThreeSt = act_path_coeff.three; real mThreeSt = -ThreeSt; real FiveSt = act_path_coeff.five; real mFiveSt = -FiveSt; real SevenSt = act_path_coeff.seven; real Lepage = act_path_coeff.lepage; real mLepage = -Lepage; FatLinkArg<real, nColor> arg(newOprod, oprod, link, OneLink, FORCE_ONE_LINK); FatLinkForce<decltype(arg)> oneLink(arg, link, 0, 0, FORCE_ONE_LINK); oneLink.apply(0); for (int sig=0; sig<8; sig++) { for (int mu=0; mu<8; mu++) { if ( (mu == sig) || (mu == opp_dir(sig))) continue; //3-link //Kernel A: middle link FatLinkArg<real, nColor> middleLinkArg( newOprod, Pmu, P3, Qmu, oprod, link, mThreeSt, 2, FORCE_MIDDLE_LINK); FatLinkForce<decltype(arg)> middleLink(middleLinkArg, link, sig, mu, FORCE_MIDDLE_LINK); middleLink.apply(0); for (int nu=0; nu < 8; nu++) { if (nu == sig || nu == opp_dir(sig) || nu == mu || nu == opp_dir(mu)) continue; //5-link: middle link //Kernel B FatLinkArg<real, nColor> middleLinkArg( newOprod, Pnumu, P5, Qnumu, Pmu, Qmu, link, FiveSt, 1, FORCE_MIDDLE_LINK); FatLinkForce<decltype(arg)> middleLink(middleLinkArg, link, sig, nu, FORCE_MIDDLE_LINK); middleLink.apply(0); for (int rho = 0; rho < 8; rho++) { if (rho == sig || rho == opp_dir(sig) || rho == mu || rho == opp_dir(mu) || rho == nu || rho == opp_dir(nu)) continue; //7-link: middle link and side link FatLinkArg<real, nColor> arg(newOprod, P5, Pnumu, Qnumu, link, SevenSt, FiveSt != 0 ? SevenSt/FiveSt : 0, 1, FORCE_ALL_LINK, true); FatLinkForce<decltype(arg)> all(arg, link, sig, rho, FORCE_ALL_LINK); all.apply(0); }//rho //5-link: side link FatLinkArg<real, nColor> arg(newOprod, P3, P5, Qmu, link, mFiveSt, (ThreeSt != 0 ? FiveSt/ThreeSt : 0), 1, FORCE_SIDE_LINK); FatLinkForce<decltype(arg)> side(arg, link, sig, nu, FORCE_SIDE_LINK); side.apply(0); } //nu //lepage if (Lepage != 0.) { FatLinkArg<real, nColor> middleLinkArg( newOprod, P5, Pmu, Qmu, link, Lepage, 2, FORCE_LEPAGE_MIDDLE_LINK); FatLinkForce<decltype(arg)> middleLink(middleLinkArg, link, sig, mu, FORCE_LEPAGE_MIDDLE_LINK); middleLink.apply(0); FatLinkArg<real, nColor> arg(newOprod, P3, P5, Qmu, link, mLepage, (ThreeSt != 0 ? Lepage/ThreeSt : 0), 2, FORCE_SIDE_LINK); FatLinkForce<decltype(arg)> side(arg, link, sig, mu, FORCE_SIDE_LINK); side.apply(0); } // Lepage != 0.0 // 3-link side link FatLinkArg<real, nColor> arg(newOprod, P3, link, ThreeSt, 1, FORCE_SIDE_LINK_SHORT); FatLinkForce<decltype(arg)> side(arg, P3, sig, mu, FORCE_SIDE_LINK_SHORT); side.apply(0); }//mu }//sig } }; void hisqStaplesForce(GaugeField &newOprod, const GaugeField &oprod, const GaugeField &link, const double path_coeff_array[6]) { if (!link.isNative()) errorQuda("Unsupported gauge order %d", link.Order()); if (!oprod.isNative()) errorQuda("Unsupported gauge order %d", oprod.Order()); if (!newOprod.isNative()) errorQuda("Unsupported gauge order %d", newOprod.Order()); if (checkLocation(newOprod,oprod,link) == QUDA_CPU_FIELD_LOCATION) errorQuda("CPU not implemented"); // create color matrix fields with zero padding GaugeFieldParam gauge_param(link); gauge_param.reconstruct = QUDA_RECONSTRUCT_NO; gauge_param.order = QUDA_FLOAT2_GAUGE_ORDER; gauge_param.geometry = QUDA_SCALAR_GEOMETRY; cudaGaugeField Pmu(gauge_param); cudaGaugeField P3(gauge_param); cudaGaugeField P5(gauge_param); cudaGaugeField Pnumu(gauge_param); cudaGaugeField Qmu(gauge_param); cudaGaugeField Qnumu(gauge_param); QudaPrecision precision = checkPrecision(oprod, link, newOprod); instantiate<HisqStaplesForce, ReconstructNone>(Pmu, P3, P5, Pnumu, Qmu, Qnumu, newOprod, oprod, link, path_coeff_array); hipDeviceSynchronize(); checkCudaError(); } template <typename real, int nColor, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct CompleteForceArg : public BaseForceArg<real, nColor, reconstruct> { typedef typename gauge_mapper<real,QUDA_RECONSTRUCT_NO>::type F; F outA; // force output accessor const F oProd; // force input accessor const real coeff; CompleteForceArg(GaugeField &force, const GaugeField &link) : BaseForceArg<real, nColor, reconstruct>(link, 0), outA(force), oProd(force), coeff(0.0) { } }; // Flops count: 4 matrix multiplications per lattice site = 792 Flops per site template <typename Arg> __global__ void completeForceKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.X, parity); for (int d=0; d<4; d++) x[d] += arg.border[d]; int e_cb = linkIndex(x,arg.E); #pragma unroll for (int sig=0; sig<4; ++sig) { Link Uw = arg.link(sig, e_cb, parity); Link Ox = arg.oProd(sig, e_cb, parity); Link Ow = Uw*Ox; makeAntiHerm(Ow); typename Arg::real coeff = (parity==1) ? -1.0 : 1.0; arg.outA(sig, e_cb, parity) = coeff*Ow; } } template <typename real, int nColor, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct LongLinkArg : public BaseForceArg<real, nColor, reconstruct> { typedef typename gauge::FloatNOrder<real,18,2,11> M; typedef typename gauge_mapper<real,QUDA_RECONSTRUCT_NO>::type F; F outA; const F oProd; const real coeff; LongLinkArg(GaugeField &newOprod, const GaugeField &link, const GaugeField &oprod, real coeff) : BaseForceArg<real, nColor, reconstruct>(link,0), outA(newOprod), oProd(oprod), coeff(coeff) { } }; // Flops count, in two-number pair (matrix_mult, matrix_add) // (24, 12) // 4968 Flops per site in total template <typename Arg> __global__ void longLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; int dx[4] = {0,0,0,0}; getCoords(x, x_cb, arg.X, parity); for (int i=0; i<4; i++) x[i] += arg.border[i]; int e_cb = linkIndex(x,arg.E); /* * * A B C D E * ---- ---- ---- ---- * * ---> sig direction * * C is the current point (sid) * */ // compute the force for forward long links #pragma unroll for (int sig=0; sig<4; sig++) { int point_c = e_cb; dx[sig]++; int point_d = linkIndexShift(x,dx,arg.E); dx[sig]++; int point_e = linkIndexShift(x,dx,arg.E); dx[sig] = -1; int point_b = linkIndexShift(x,dx,arg.E); dx[sig]--; int point_a = linkIndexShift(x,dx,arg.E); dx[sig] = 0; Link Uab = arg.link(sig, point_a, parity); Link Ubc = arg.link(sig, point_b, 1-parity); Link Ude = arg.link(sig, point_d, 1-parity); Link Uef = arg.link(sig, point_e, parity); Link Oz = arg.oProd(sig, point_c, parity); Link Oy = arg.oProd(sig, point_b, 1-parity); Link Ox = arg.oProd(sig, point_a, parity); Link temp = Ude*Uef*Oz - Ude*Oy*Ubc + Ox*Uab*Ubc; Link force = arg.outA(sig, e_cb, parity); arg.outA(sig, e_cb, parity) = force + arg.coeff*temp; } // loop over sig } template <typename Arg> class HisqForce : public TunableVectorY { Arg &arg; const GaugeField &meta; const HisqForceType type; unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } public: HisqForce(Arg &arg, const GaugeField &meta, int sig, int mu, HisqForceType type) : TunableVectorY(2), arg(arg), meta(meta), type(type) { arg.sig = sig; arg.mu = mu; } void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); switch (type) { case FORCE_LONG_LINK: hipLaunchKernelGGL(( longLinkKernel<Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; case FORCE_COMPLETE: hipLaunchKernelGGL(( completeForceKernel<Arg>), dim3(tp.grid),dim3(tp.block),tp.shared_bytes,stream, arg); break; default: errorQuda("Undefined force type %d", type); } } TuneKey tuneKey() const { std::stringstream aux; aux << meta.AuxString() << comm_dim_partitioned_string() << ",threads=" << arg.threads; switch (type) { case FORCE_LONG_LINK: aux << ",LONG_LINK"; break; case FORCE_COMPLETE: aux << ",COMPLETE"; break; default: errorQuda("Undefined force type %d", type); } return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void preTune() { switch (type) { case FORCE_LONG_LINK: case FORCE_COMPLETE: arg.outA.save(); break; default: errorQuda("Undefined force type %d", type); } } void postTune() { switch (type) { case FORCE_LONG_LINK: case FORCE_COMPLETE: arg.outA.load(); break; default: errorQuda("Undefined force type %d", type); } } long long flops() const { switch (type) { case FORCE_LONG_LINK: return 2*arg.threads*4968ll; case FORCE_COMPLETE: return 2*arg.threads*792ll; default: errorQuda("Undefined force type %d", type); } return 0; } long long bytes() const { switch (type) { case FORCE_LONG_LINK: return 4*2*arg.threads*(2*arg.outA.Bytes() + 4*arg.link.Bytes() + 3*arg.oProd.Bytes()); case FORCE_COMPLETE: return 4*2*arg.threads*(arg.outA.Bytes() + arg.link.Bytes() + arg.oProd.Bytes()); default: errorQuda("Undefined force type %d", type); } return 0; } }; template <typename real, int nColor, QudaReconstructType recon> struct HisqLongLinkForce { HisqLongLinkForce(GaugeField &newOprod, const GaugeField &oldOprod, const GaugeField &link, double coeff) { LongLinkArg<real, nColor, recon> arg(newOprod, link, oldOprod, coeff); HisqForce<decltype(arg)> longLink(arg, link, 0, 0, FORCE_LONG_LINK); longLink.apply(0); hipDeviceSynchronize(); checkCudaError(); } }; void hisqLongLinkForce(GaugeField &newOprod, const GaugeField &oldOprod, const GaugeField &link, double coeff) { if (!link.isNative()) errorQuda("Unsupported gauge order %d", link.Order()); if (!oldOprod.isNative()) errorQuda("Unsupported gauge order %d", oldOprod.Order()); if (!newOprod.isNative()) errorQuda("Unsupported gauge order %d", newOprod.Order()); if (checkLocation(newOprod,oldOprod,link) == QUDA_CPU_FIELD_LOCATION) errorQuda("CPU not implemented"); checkPrecision(newOprod, link, oldOprod); instantiate<HisqLongLinkForce, ReconstructNone>(newOprod, oldOprod, link, coeff); } template <typename real, int nColor, QudaReconstructType recon> struct HisqCompleteForce { HisqCompleteForce(GaugeField &force, const GaugeField &link) { CompleteForceArg<real, nColor, recon> arg(force, link); HisqForce<decltype(arg)> completeForce(arg, link, 0, 0, FORCE_COMPLETE); completeForce.apply(0); hipDeviceSynchronize(); checkCudaError(); } }; void hisqCompleteForce(GaugeField &force, const GaugeField &link) { if (!link.isNative()) errorQuda("Unsupported gauge order %d", link.Order()); if (!force.isNative()) errorQuda("Unsupported gauge order %d", force.Order()); if (checkLocation(force,link) == QUDA_CPU_FIELD_LOCATION) errorQuda("CPU not implemented"); checkPrecision(link, force); instantiate<HisqCompleteForce, ReconstructNone>(force, link); } } // namespace fermion_force } // namespace quda #endif // GPU_HISQ_FORCE
aa2e92220226325f23b366861e866f22d499d6ee.cu
#include <utility> #include <typeinfo> #include <quda_internal.h> #include <gauge_field.h> #include <ks_improved_force.h> #include <quda_matrix.h> #include <tune_quda.h> #include <index_helper.cuh> #include <gauge_field_order.h> #include <instantiate.h> #ifdef GPU_HISQ_FORCE namespace quda { namespace fermion_force { enum { XUP = 0, YUP = 1, ZUP = 2, TUP = 3, TDOWN = 4, ZDOWN = 5, YDOWN = 6, XDOWN = 7 }; enum HisqForceType { FORCE_ALL_LINK, FORCE_MIDDLE_LINK, FORCE_LEPAGE_MIDDLE_LINK, FORCE_SIDE_LINK, FORCE_SIDE_LINK_SHORT, FORCE_LONG_LINK, FORCE_COMPLETE, FORCE_ONE_LINK, FORCE_INVALID }; constexpr int opp_dir(int dir) { return 7-dir; } constexpr int goes_forward(int dir) { return dir<=3; } constexpr int goes_backward(int dir) { return dir>3; } constexpr int CoeffSign(int pos_dir, int odd_lattice) { return 2*((pos_dir + odd_lattice + 1) & 1) - 1; } constexpr int Sign(int parity) { return parity ? -1 : 1; } constexpr int posDir(int dir) { return (dir >= 4) ? 7-dir : dir; } template <int dir, typename Arg> constexpr void updateCoords(int x[], int shift, const Arg &arg) { x[dir] = (x[dir] + shift + arg.E[dir]) % arg.E[dir]; } template <typename Arg> constexpr void updateCoords(int x[], int dir, int shift, const Arg &arg) { switch (dir) { case 0: updateCoords<0>(x, shift, arg); break; case 1: updateCoords<1>(x, shift, arg); break; case 2: updateCoords<2>(x, shift, arg); break; case 3: updateCoords<3>(x, shift, arg); break; } } //struct for holding the fattening path coefficients template <typename real> struct PathCoefficients { const real one; const real three; const real five; const real seven; const real naik; const real lepage; PathCoefficients(const double *path_coeff_array) : one(path_coeff_array[0]), naik(path_coeff_array[1]), three(path_coeff_array[2]), five(path_coeff_array[3]), seven(path_coeff_array[4]), lepage(path_coeff_array[5]) { } }; template <typename real_, int nColor_, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct BaseForceArg { using real = real_; static constexpr int nColor = nColor_; typedef typename gauge_mapper<real,reconstruct>::type G; const G link; int threads; int X[4]; // regular grid dims int D[4]; // working set grid dims int E[4]; // extended grid dims int commDim[4]; int border[4]; int base_idx[4]; // the offset into the extended field int oddness_change; int mu; int sig; /** @param[in] link Gauge field @param[in] overlap Radius of additional redundant computation to do */ BaseForceArg(const GaugeField &link, int overlap) : link(link), threads(1), commDim{ comm_dim_partitioned(0), comm_dim_partitioned(1), comm_dim_partitioned(2), comm_dim_partitioned(3) } { for (int d=0; d<4; d++) { E[d] = link.X()[d]; border[d] = link.R()[d]; X[d] = E[d] - 2*border[d]; D[d] = comm_dim_partitioned(d) ? X[d]+overlap*2 : X[d]; base_idx[d] = comm_dim_partitioned(d) ? border[d]-overlap : 0; threads *= D[d]; } threads /= 2; oddness_change = (base_idx[0] + base_idx[1] + base_idx[2] + base_idx[3])&1; } }; template <typename real, int nColor, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct FatLinkArg : public BaseForceArg<real, nColor, reconstruct> { using BaseForceArg = BaseForceArg<real, nColor, reconstruct>; typedef typename gauge_mapper<real,QUDA_RECONSTRUCT_NO>::type F; F outA; F outB; F pMu; F p3; F qMu; const F oProd; const F qProd; const F qPrev; const real coeff; const real accumu_coeff; const bool p_mu; const bool q_mu; const bool q_prev; FatLinkArg(GaugeField &force, const GaugeField &oProd, const GaugeField &link, real coeff, HisqForceType type) : BaseForceArg(link, 0), outA(force), outB(force), pMu(oProd), p3(oProd), qMu(oProd), oProd(oProd), qProd(oProd), qPrev(oProd), coeff(coeff), accumu_coeff(0), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_ONE_LINK) errorQuda("This constructor is for FORCE_ONE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &pMu, GaugeField &P3, GaugeField &qMu, const GaugeField &oProd, const GaugeField &qPrev, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(pMu), p3(P3), qMu(qMu), oProd(oProd), qProd(oProd), qPrev(qPrev), coeff(coeff), accumu_coeff(0), p_mu(true), q_mu(true), q_prev(true) { if (type != FORCE_MIDDLE_LINK) errorQuda("This constructor is for FORCE_MIDDLE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &pMu, GaugeField &P3, GaugeField &qMu, const GaugeField &oProd, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(pMu), p3(P3), qMu(qMu), oProd(oProd), qProd(oProd), qPrev(qMu), coeff(coeff), accumu_coeff(0), p_mu(true), q_mu(true), q_prev(false) { if (type != FORCE_MIDDLE_LINK) errorQuda("This constructor is for FORCE_MIDDLE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &P3, const GaugeField &oProd, const GaugeField &qPrev, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(P3), p3(P3), qMu(qPrev), oProd(oProd), qProd(oProd), qPrev(qPrev), coeff(coeff), accumu_coeff(0), p_mu(false), q_mu(false), q_prev(true) { if (type != FORCE_LEPAGE_MIDDLE_LINK) errorQuda("This constructor is for FORCE_MIDDLE_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &shortP, const GaugeField &P3, const GaugeField &qProd, const GaugeField &link, real coeff, real accumu_coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(shortP), pMu(P3), p3(P3), qMu(qProd), oProd(qProd), qProd(qProd), qPrev(qProd), coeff(coeff), accumu_coeff(accumu_coeff), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_SIDE_LINK) errorQuda("This constructor is for FORCE_SIDE_LINK or FORCE_ALL_LINK"); } FatLinkArg(GaugeField &newOprod, GaugeField &P3, const GaugeField &link, real coeff, int overlap, HisqForceType type) : BaseForceArg(link, overlap), outA(newOprod), outB(newOprod), pMu(P3), p3(P3), qMu(P3), oProd(P3), qProd(P3), qPrev(P3), coeff(coeff), accumu_coeff(0.0), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_SIDE_LINK_SHORT) errorQuda("This constructor is for FORCE_SIDE_LINK_SHORT"); } FatLinkArg(GaugeField &newOprod, GaugeField &shortP, const GaugeField &oProd, const GaugeField &qPrev, const GaugeField &link, real coeff, real accumu_coeff, int overlap, HisqForceType type, bool dummy) : BaseForceArg(link, overlap), outA(newOprod), outB(shortP), oProd(oProd), qPrev(qPrev), pMu(shortP), p3(shortP), qMu(qPrev), qProd(qPrev), // dummy coeff(coeff), accumu_coeff(accumu_coeff), p_mu(false), q_mu(false), q_prev(false) { if (type != FORCE_ALL_LINK) errorQuda("This constructor is for FORCE_ALL_LINK"); } }; template <typename Arg> __global__ void oneLinkTermKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int sig = blockIdx.z * blockDim.z + threadIdx.z; if (sig >= 4) return; int x[4]; getCoords(x, x_cb, arg.X, parity); #pragma unroll for (int d=0; d<4; d++) x[d] += arg.border[d]; int e_cb = linkIndex(x,arg.E); Link w = arg.oProd(sig, e_cb, parity); Link force = arg.outA(sig, e_cb, parity); force += arg.coeff * w; arg.outA(sig, e_cb, parity) = force; } /********************************allLinkKernel********************************************* * * In this function we need * READ * 3 LINKS: ad_link, ab_link, bc_link * 5 COLOR MATRIX: Qprev_at_D, oprod_at_C, newOprod_at_A(sig), newOprod_at_D/newOprod_at_A(mu), shortP_at_D * WRITE: * 3 COLOR MATRIX: newOprod_at_A(sig), newOprod_at_D/newOprod_at_A(mu), shortP_at_D, * * If sig is negative, then we don't need to read/write the color matrix newOprod_at_A(sig) * * Therefore the data traffic, in two-number pair (num_of_link, num_of_color_matrix) * * if (sig is positive): (3, 8) * else : (3, 6) * * This function is called 384 times, half positive sig, half negative sig * * Flop count, in two-number pair (matrix_multi, matrix_add) * if(sig is positive) (6,3) * else (4,2) * ************************************************************************************************/ template <int sig_positive, int mu_positive, typename Arg> __global__ void allLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.D, parity); for (int d=0; d<4; d++) x[d] += arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity^arg.oddness_change; auto mycoeff = CoeffSign(sig_positive,parity)*arg.coeff; int y[4] = {x[0], x[1], x[2], x[3]}; int mysig = posDir(arg.sig); updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_b = linkIndex(y,arg.E); int ab_link_nbr_idx = (sig_positive) ? e_cb : point_b; for (int d=0; d<4; d++) y[d] = x[d]; /* sig * A________B * mu | | * D | |C * * A is the current point (sid) * */ int mu = mu_positive ? arg.mu : opp_dir(arg.mu); int dir = mu_positive ? -1 : 1; updateCoords(y, mu, dir, arg); int point_d = linkIndex(y,arg.E); updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_c = linkIndex(y,arg.E); Link Uab = arg.link(posDir(arg.sig), ab_link_nbr_idx, sig_positive^(1-parity)); Link Uad = arg.link(mu, mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity); Link Ubc = arg.link(mu, mu_positive ? point_c : point_b, mu_positive ? parity : 1-parity); Link Ox = arg.qPrev(0, point_d, 1-parity); Link Oy = arg.oProd(0, point_c, parity); Link Oz = mu_positive ? conj(Ubc)*Oy : Ubc*Oy; if (sig_positive) { Link force = arg.outA(arg.sig, e_cb, parity); force += Sign(parity)*mycoeff*Oz*Ox* (mu_positive ? Uad : conj(Uad)); arg.outA(arg.sig, e_cb, parity) = force; Oy = Uab*Oz; } else { Oy = conj(Uab)*Oz; } Link force = arg.outA(mu, mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity); force += Sign(mu_positive ? 1-parity : parity)*mycoeff* (mu_positive ? Oy*Ox : conj(Ox)*conj(Oy)); arg.outA(mu, mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity) = force; Link shortP = arg.outB(0, point_d, 1-parity); shortP += arg.accumu_coeff* (mu_positive ? Uad : conj(Uad)) *Oy; arg.outB(0, point_d, 1-parity) = shortP; } /**************************middleLinkKernel***************************** * * * Generally we need * READ * 3 LINKS: ab_link, bc_link, ad_link * 3 COLOR MATRIX: newOprod_at_A, oprod_at_C, Qprod_at_D * WRITE * 4 COLOR MATRIX: newOprod_at_A, P3_at_A, Pmu_at_B, Qmu_at_A * * Three call variations: * 1. when Qprev == NULL: Qprod_at_D does not exist and is not read in * 2. full read/write * 3. when Pmu/Qmu == NULL, Pmu_at_B and Qmu_at_A are not written out * * In all three above case, if the direction sig is negative, newOprod_at_A is * not read in or written out. * * Therefore the data traffic, in two-number pair (num_of_link, num_of_color_matrix) * Call 1: (called 48 times, half positive sig, half negative sig) * if (sig is positive): (3, 6) * else : (3, 4) * Call 2: (called 192 time, half positive sig, half negative sig) * if (sig is positive): (3, 7) * else : (3, 5) * Call 3: (called 48 times, half positive sig, half negative sig) * if (sig is positive): (3, 5) * else : (3, 2) no need to loadQprod_at_D in this case * * note: oprod_at_C could actually be read in from D when it is the fresh outer product * and we call it oprod_at_C to simply naming. This does not affect our data traffic analysis * * Flop count, in two-number pair (matrix_multi, matrix_add) * call 1: if (sig is positive) (3, 1) * else (2, 0) * call 2: if (sig is positive) (4, 1) * else (3, 0) * call 3: if (sig is positive) (4, 1) * (Lepage) else (2, 0) * ****************************************************************************/ template <int sig_positive, int mu_positive, bool pMu, bool qMu, bool qPrev, typename Arg> __global__ void middleLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.D, parity); /* A________B * mu | | * D| |C * * A is the current point (sid) * */ for (int d=0; d<4; d++) x[d] += arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity ^ arg.oddness_change; int y[4] = {x[0], x[1], x[2], x[3]}; int mymu = posDir(arg.mu); updateCoords(y, mymu, (mu_positive ? -1 : 1), arg); int point_d = linkIndex(y, arg.E); int ad_link_nbr_idx = mu_positive ? point_d : e_cb; int mysig = posDir(arg.sig); updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_c = linkIndex(y, arg.E); for (int d=0; d<4; d++) y[d] = x[d]; updateCoords(y, mysig, (sig_positive ? 1 : -1), arg); int point_b = linkIndex(y, arg.E); int bc_link_nbr_idx = mu_positive ? point_c : point_b; int ab_link_nbr_idx = sig_positive ? e_cb : point_b; // load the link variable connecting a and b Link Uab = arg.link(mysig, ab_link_nbr_idx, sig_positive^(1-parity)); // load the link variable connecting b and c Link Ubc = arg.link(mymu, bc_link_nbr_idx, mu_positive^(1-parity)); Link Oy; if (!qPrev) { Oy = arg.oProd(posDir(arg.sig), sig_positive ? point_d : point_c, sig_positive^parity); if (!sig_positive) Oy = conj(Oy); } else { // QprevOdd != NULL Oy = arg.oProd(0, point_c, parity); } Link Ow = !mu_positive ? Ubc*Oy : conj(Ubc)*Oy; if (pMu) arg.pMu(0, point_b, 1-parity) = Ow; arg.p3(0, e_cb, parity) = sig_positive ? Uab*Ow : conj(Uab)*Ow; Link Uad = arg.link(mymu, ad_link_nbr_idx, mu_positive^parity); if (!mu_positive) Uad = conj(Uad); if (!qPrev) { if (sig_positive) Oy = Ow*Uad; if ( qMu ) arg.qMu(0, e_cb, parity) = Uad; } else { Link Ox; if ( qMu || sig_positive ) { Oy = arg.qPrev(0, point_d, 1-parity); Ox = Oy*Uad; } if ( qMu ) arg.qMu(0, e_cb, parity) = Ox; if (sig_positive) Oy = Ow*Ox; } if (sig_positive) { Link oprod = arg.outA(arg.sig, e_cb, parity); oprod += arg.coeff*Oy; arg.outA(arg.sig, e_cb, parity) = oprod; } } /***********************************sideLinkKernel*************************** * * In general we need * READ * 1 LINK: ad_link * 4 COLOR MATRIX: shortP_at_D, newOprod, P3_at_A, Qprod_at_D, * WRITE * 2 COLOR MATRIX: shortP_at_D, newOprod, * * Two call variations: * 1. full read/write * 2. when shortP == NULL && Qprod == NULL: * no need to read ad_link/shortP_at_D or write shortP_at_D * Qprod_at_D does not exit and is not read in * * * Therefore the data traffic, in two-number pair (num_of_links, num_of_color_matrix) * Call 1: (called 192 times) * (1, 6) * * Call 2: (called 48 times) * (0, 3) * * note: newOprod can be at point D or A, depending on if mu is postive or negative * * Flop count, in two-number pair (matrix_multi, matrix_add) * call 1: (2, 2) * call 2: (0, 1) * *********************************************************************************/ template <int mu_positive, typename Arg> __global__ void sideLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb ,arg.D, parity); for (int d=0; d<4; d++) x[d] = x[d] + arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity ^ arg.oddness_change; /* compute the side link contribution to the momentum * * sig * A________B * | | mu * D | |C * * A is the current point (x_cb) * */ int mymu = posDir(arg.mu); int y[4] = {x[0], x[1], x[2], x[3]}; updateCoords(y, mymu, (mu_positive ? -1 : 1), arg); int point_d = linkIndex(y,arg.E); Link Oy = arg.p3(0, e_cb, parity); { int ad_link_nbr_idx = mu_positive ? point_d : e_cb; Link Uad = arg.link(mymu, ad_link_nbr_idx, mu_positive^parity); Link Ow = mu_positive ? Uad*Oy : conj(Uad)*Oy; Link shortP = arg.outB(0, point_d, 1-parity); shortP += arg.accumu_coeff * Ow; arg.outB(0, point_d, 1-parity) = shortP; } { Link Ox = arg.qProd(0, point_d, 1-parity); Link Ow = mu_positive ? Oy*Ox : conj(Ox)*conj(Oy); auto mycoeff = CoeffSign(goes_forward(arg.sig), parity)*CoeffSign(goes_forward(arg.mu),parity)*arg.coeff; Link oprod = arg.outA(mu_positive ? arg.mu : opp_dir(arg.mu), mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity); oprod += mycoeff * Ow; arg.outA(mu_positive ? arg.mu : opp_dir(arg.mu), mu_positive ? point_d : e_cb, mu_positive ? 1-parity : parity) = oprod; } } // Flop count, in two-number pair (matrix_mult, matrix_add) // (0,1) template <int mu_positive, typename Arg> __global__ void sideLinkShortKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.D, parity); for (int d=0; d<4; d++) x[d] = x[d] + arg.base_idx[d]; int e_cb = linkIndex(x,arg.E); parity = parity ^ arg.oddness_change; /* compute the side link contribution to the momentum * * sig * A________B * | | mu * D | |C * * A is the current point (x_cb) * */ int mymu = posDir(arg.mu); int y[4] = {x[0], x[1], x[2], x[3]}; updateCoords(y, mymu, (mu_positive ? -1 : 1), arg); int point_d = mu_positive ? linkIndex(y,arg.E) : e_cb; int parity_ = mu_positive ? 1-parity : parity; auto mycoeff = CoeffSign(goes_forward(arg.sig),parity)*CoeffSign(goes_forward(arg.mu),parity)*arg.coeff; Link Oy = arg.p3(0, e_cb, parity); Link oprod = arg.outA(posDir(arg.mu), point_d, parity_); oprod += mu_positive ? mycoeff * Oy : mycoeff * conj(Oy); arg.outA(posDir(arg.mu), point_d, parity_) = oprod; } template <typename Arg> class FatLinkForce : public TunableVectorYZ { Arg &arg; const GaugeField &meta; const HisqForceType type; unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } public: FatLinkForce(Arg &arg, const GaugeField &meta, int sig, int mu, HisqForceType type) : TunableVectorYZ(2,type == FORCE_ONE_LINK ? 4 : 1), arg(arg), meta(meta), type(type) { arg.sig = sig; arg.mu = mu; } TuneKey tuneKey() const { std::stringstream aux; aux << meta.AuxString() << comm_dim_partitioned_string() << ",threads=" << arg.threads; if (type == FORCE_MIDDLE_LINK || type == FORCE_LEPAGE_MIDDLE_LINK) aux << ",sig=" << arg.sig << ",mu=" << arg.mu << ",pMu=" << arg.p_mu << ",q_muu=" << arg.q_mu << ",q_prev=" << arg.q_prev; else if (type != FORCE_ONE_LINK) aux << ",mu=" << arg.mu; // no sig dependence needed for side link switch (type) { case FORCE_ONE_LINK: aux << ",ONE_LINK"; break; case FORCE_ALL_LINK: aux << ",ALL_LINK"; break; case FORCE_MIDDLE_LINK: aux << ",MIDDLE_LINK"; break; case FORCE_LEPAGE_MIDDLE_LINK: aux << ",LEPAGE_MIDDLE_LINK"; break; case FORCE_SIDE_LINK: aux << ",SIDE_LINK"; break; case FORCE_SIDE_LINK_SHORT: aux << ",SIDE_LINK_SHORT"; break; default: errorQuda("Undefined force type %d", type); } return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); switch (type) { case FORCE_ONE_LINK: oneLinkTermKernel<Arg> <<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case FORCE_ALL_LINK: if (goes_forward(arg.sig) && goes_forward(arg.mu)) allLinkKernel<1,1,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) allLinkKernel<1,0,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) allLinkKernel<0,1,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else allLinkKernel<0,0,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case FORCE_MIDDLE_LINK: if (!arg.p_mu || !arg.q_mu) errorQuda("Expect p_mu=%d and q_mu=%d to both be true", arg.p_mu, arg.q_mu); if (arg.q_prev) { if (goes_forward(arg.sig) && goes_forward(arg.mu)) middleLinkKernel<1,1,true,true,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) middleLinkKernel<1,0,true,true,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) middleLinkKernel<0,1,true,true,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else middleLinkKernel<0,0,true,true,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); } else { if (goes_forward(arg.sig) && goes_forward(arg.mu)) middleLinkKernel<1,1,true,true,false,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) middleLinkKernel<1,0,true,true,false,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) middleLinkKernel<0,1,true,true,false,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else middleLinkKernel<0,0,true,true,false,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); } break; case FORCE_LEPAGE_MIDDLE_LINK: if (arg.p_mu || arg.q_mu || !arg.q_prev) errorQuda("Expect p_mu=%d and q_mu=%d to both be false and q_prev=%d true", arg.p_mu, arg.q_mu, arg.q_prev); if (goes_forward(arg.sig) && goes_forward(arg.mu)) middleLinkKernel<1,1,false,false,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_forward(arg.sig) && goes_backward(arg.mu)) middleLinkKernel<1,0,false,false,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else if (goes_backward(arg.sig) && goes_forward(arg.mu)) middleLinkKernel<0,1,false,false,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else middleLinkKernel<0,0,false,false,true,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case FORCE_SIDE_LINK: if (goes_forward(arg.mu)) sideLinkKernel<1,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else sideLinkKernel<0,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case FORCE_SIDE_LINK_SHORT: if (goes_forward(arg.mu)) sideLinkShortKernel<1,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); else sideLinkShortKernel<0,Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; default: errorQuda("Undefined force type %d", type); } } void preTune() { switch (type) { case FORCE_ONE_LINK: arg.outA.save(); break; case FORCE_ALL_LINK: arg.outA.save(); arg.outB.save(); break; case FORCE_MIDDLE_LINK: arg.pMu.save(); arg.qMu.save(); case FORCE_LEPAGE_MIDDLE_LINK: arg.outA.save(); arg.p3.save(); break; case FORCE_SIDE_LINK: arg.outB.save(); case FORCE_SIDE_LINK_SHORT: arg.outA.save(); break; default: errorQuda("Undefined force type %d", type); } } void postTune() { switch (type) { case FORCE_ONE_LINK: arg.outA.load(); break; case FORCE_ALL_LINK: arg.outA.load(); arg.outB.load(); break; case FORCE_MIDDLE_LINK: arg.pMu.load(); arg.qMu.load(); case FORCE_LEPAGE_MIDDLE_LINK: arg.outA.load(); arg.p3.load(); break; case FORCE_SIDE_LINK: arg.outB.load(); case FORCE_SIDE_LINK_SHORT: arg.outA.load(); break; default: errorQuda("Undefined force type %d", type); } } long long flops() const { switch (type) { case FORCE_ONE_LINK: return 2*4*arg.threads*36ll; case FORCE_ALL_LINK: return 2*arg.threads*(goes_forward(arg.sig) ? 1242ll : 828ll); case FORCE_MIDDLE_LINK: case FORCE_LEPAGE_MIDDLE_LINK: return 2*arg.threads*(2 * 198 + (!arg.q_prev && goes_forward(arg.sig) ? 198 : 0) + (arg.q_prev && (arg.q_mu || goes_forward(arg.sig) ) ? 198 : 0) + ((arg.q_prev && goes_forward(arg.sig) ) ? 198 : 0) + ( goes_forward(arg.sig) ? 216 : 0) ); case FORCE_SIDE_LINK: return 2*arg.threads*2*234; case FORCE_SIDE_LINK_SHORT: return 2*arg.threads*36; default: errorQuda("Undefined force type %d", type); } return 0; } long long bytes() const { switch (type) { case FORCE_ONE_LINK: return 2*4*arg.threads*( arg.oProd.Bytes() + 2*arg.outA.Bytes() ); case FORCE_ALL_LINK: return 2*arg.threads*( (goes_forward(arg.sig) ? 4 : 2)*arg.outA.Bytes() + 3*arg.link.Bytes() + arg.oProd.Bytes() + arg.qPrev.Bytes() + 2*arg.outB.Bytes()); case FORCE_MIDDLE_LINK: case FORCE_LEPAGE_MIDDLE_LINK: return 2*arg.threads*( ( goes_forward(arg.sig) ? 2*arg.outA.Bytes() : 0 ) + (arg.p_mu ? arg.pMu.Bytes() : 0) + (arg.q_mu ? arg.qMu.Bytes() : 0) + ( ( goes_forward(arg.sig) || arg.q_mu ) ? arg.qPrev.Bytes() : 0) + arg.p3.Bytes() + 3*arg.link.Bytes() + arg.oProd.Bytes() ); case FORCE_SIDE_LINK: return 2*arg.threads*( 2*arg.outA.Bytes() + 2*arg.outB.Bytes() + arg.p3.Bytes() + arg.link.Bytes() + arg.qProd.Bytes() ); case FORCE_SIDE_LINK_SHORT: return 2*arg.threads*( 2*arg.outA.Bytes() + arg.p3.Bytes() ); default: errorQuda("Undefined force type %d", type); } return 0; } }; template <typename real, int nColor, QudaReconstructType recon> struct HisqStaplesForce { HisqStaplesForce(GaugeField &Pmu, GaugeField &P3, GaugeField &P5, GaugeField &Pnumu, GaugeField &Qmu, GaugeField &Qnumu, GaugeField &newOprod, const GaugeField &oprod, const GaugeField &link, const double *path_coeff_array) { PathCoefficients<real> act_path_coeff(path_coeff_array); real OneLink = act_path_coeff.one; real ThreeSt = act_path_coeff.three; real mThreeSt = -ThreeSt; real FiveSt = act_path_coeff.five; real mFiveSt = -FiveSt; real SevenSt = act_path_coeff.seven; real Lepage = act_path_coeff.lepage; real mLepage = -Lepage; FatLinkArg<real, nColor> arg(newOprod, oprod, link, OneLink, FORCE_ONE_LINK); FatLinkForce<decltype(arg)> oneLink(arg, link, 0, 0, FORCE_ONE_LINK); oneLink.apply(0); for (int sig=0; sig<8; sig++) { for (int mu=0; mu<8; mu++) { if ( (mu == sig) || (mu == opp_dir(sig))) continue; //3-link //Kernel A: middle link FatLinkArg<real, nColor> middleLinkArg( newOprod, Pmu, P3, Qmu, oprod, link, mThreeSt, 2, FORCE_MIDDLE_LINK); FatLinkForce<decltype(arg)> middleLink(middleLinkArg, link, sig, mu, FORCE_MIDDLE_LINK); middleLink.apply(0); for (int nu=0; nu < 8; nu++) { if (nu == sig || nu == opp_dir(sig) || nu == mu || nu == opp_dir(mu)) continue; //5-link: middle link //Kernel B FatLinkArg<real, nColor> middleLinkArg( newOprod, Pnumu, P5, Qnumu, Pmu, Qmu, link, FiveSt, 1, FORCE_MIDDLE_LINK); FatLinkForce<decltype(arg)> middleLink(middleLinkArg, link, sig, nu, FORCE_MIDDLE_LINK); middleLink.apply(0); for (int rho = 0; rho < 8; rho++) { if (rho == sig || rho == opp_dir(sig) || rho == mu || rho == opp_dir(mu) || rho == nu || rho == opp_dir(nu)) continue; //7-link: middle link and side link FatLinkArg<real, nColor> arg(newOprod, P5, Pnumu, Qnumu, link, SevenSt, FiveSt != 0 ? SevenSt/FiveSt : 0, 1, FORCE_ALL_LINK, true); FatLinkForce<decltype(arg)> all(arg, link, sig, rho, FORCE_ALL_LINK); all.apply(0); }//rho //5-link: side link FatLinkArg<real, nColor> arg(newOprod, P3, P5, Qmu, link, mFiveSt, (ThreeSt != 0 ? FiveSt/ThreeSt : 0), 1, FORCE_SIDE_LINK); FatLinkForce<decltype(arg)> side(arg, link, sig, nu, FORCE_SIDE_LINK); side.apply(0); } //nu //lepage if (Lepage != 0.) { FatLinkArg<real, nColor> middleLinkArg( newOprod, P5, Pmu, Qmu, link, Lepage, 2, FORCE_LEPAGE_MIDDLE_LINK); FatLinkForce<decltype(arg)> middleLink(middleLinkArg, link, sig, mu, FORCE_LEPAGE_MIDDLE_LINK); middleLink.apply(0); FatLinkArg<real, nColor> arg(newOprod, P3, P5, Qmu, link, mLepage, (ThreeSt != 0 ? Lepage/ThreeSt : 0), 2, FORCE_SIDE_LINK); FatLinkForce<decltype(arg)> side(arg, link, sig, mu, FORCE_SIDE_LINK); side.apply(0); } // Lepage != 0.0 // 3-link side link FatLinkArg<real, nColor> arg(newOprod, P3, link, ThreeSt, 1, FORCE_SIDE_LINK_SHORT); FatLinkForce<decltype(arg)> side(arg, P3, sig, mu, FORCE_SIDE_LINK_SHORT); side.apply(0); }//mu }//sig } }; void hisqStaplesForce(GaugeField &newOprod, const GaugeField &oprod, const GaugeField &link, const double path_coeff_array[6]) { if (!link.isNative()) errorQuda("Unsupported gauge order %d", link.Order()); if (!oprod.isNative()) errorQuda("Unsupported gauge order %d", oprod.Order()); if (!newOprod.isNative()) errorQuda("Unsupported gauge order %d", newOprod.Order()); if (checkLocation(newOprod,oprod,link) == QUDA_CPU_FIELD_LOCATION) errorQuda("CPU not implemented"); // create color matrix fields with zero padding GaugeFieldParam gauge_param(link); gauge_param.reconstruct = QUDA_RECONSTRUCT_NO; gauge_param.order = QUDA_FLOAT2_GAUGE_ORDER; gauge_param.geometry = QUDA_SCALAR_GEOMETRY; cudaGaugeField Pmu(gauge_param); cudaGaugeField P3(gauge_param); cudaGaugeField P5(gauge_param); cudaGaugeField Pnumu(gauge_param); cudaGaugeField Qmu(gauge_param); cudaGaugeField Qnumu(gauge_param); QudaPrecision precision = checkPrecision(oprod, link, newOprod); instantiate<HisqStaplesForce, ReconstructNone>(Pmu, P3, P5, Pnumu, Qmu, Qnumu, newOprod, oprod, link, path_coeff_array); cudaDeviceSynchronize(); checkCudaError(); } template <typename real, int nColor, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct CompleteForceArg : public BaseForceArg<real, nColor, reconstruct> { typedef typename gauge_mapper<real,QUDA_RECONSTRUCT_NO>::type F; F outA; // force output accessor const F oProd; // force input accessor const real coeff; CompleteForceArg(GaugeField &force, const GaugeField &link) : BaseForceArg<real, nColor, reconstruct>(link, 0), outA(force), oProd(force), coeff(0.0) { } }; // Flops count: 4 matrix multiplications per lattice site = 792 Flops per site template <typename Arg> __global__ void completeForceKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; getCoords(x, x_cb, arg.X, parity); for (int d=0; d<4; d++) x[d] += arg.border[d]; int e_cb = linkIndex(x,arg.E); #pragma unroll for (int sig=0; sig<4; ++sig) { Link Uw = arg.link(sig, e_cb, parity); Link Ox = arg.oProd(sig, e_cb, parity); Link Ow = Uw*Ox; makeAntiHerm(Ow); typename Arg::real coeff = (parity==1) ? -1.0 : 1.0; arg.outA(sig, e_cb, parity) = coeff*Ow; } } template <typename real, int nColor, QudaReconstructType reconstruct=QUDA_RECONSTRUCT_NO> struct LongLinkArg : public BaseForceArg<real, nColor, reconstruct> { typedef typename gauge::FloatNOrder<real,18,2,11> M; typedef typename gauge_mapper<real,QUDA_RECONSTRUCT_NO>::type F; F outA; const F oProd; const real coeff; LongLinkArg(GaugeField &newOprod, const GaugeField &link, const GaugeField &oprod, real coeff) : BaseForceArg<real, nColor, reconstruct>(link,0), outA(newOprod), oProd(oprod), coeff(coeff) { } }; // Flops count, in two-number pair (matrix_mult, matrix_add) // (24, 12) // 4968 Flops per site in total template <typename Arg> __global__ void longLinkKernel(Arg arg) { typedef Matrix<complex<typename Arg::real>, Arg::nColor> Link; int x_cb = blockIdx.x * blockDim.x + threadIdx.x; if (x_cb >= arg.threads) return; int parity = blockIdx.y * blockDim.y + threadIdx.y; int x[4]; int dx[4] = {0,0,0,0}; getCoords(x, x_cb, arg.X, parity); for (int i=0; i<4; i++) x[i] += arg.border[i]; int e_cb = linkIndex(x,arg.E); /* * * A B C D E * ---- ---- ---- ---- * * ---> sig direction * * C is the current point (sid) * */ // compute the force for forward long links #pragma unroll for (int sig=0; sig<4; sig++) { int point_c = e_cb; dx[sig]++; int point_d = linkIndexShift(x,dx,arg.E); dx[sig]++; int point_e = linkIndexShift(x,dx,arg.E); dx[sig] = -1; int point_b = linkIndexShift(x,dx,arg.E); dx[sig]--; int point_a = linkIndexShift(x,dx,arg.E); dx[sig] = 0; Link Uab = arg.link(sig, point_a, parity); Link Ubc = arg.link(sig, point_b, 1-parity); Link Ude = arg.link(sig, point_d, 1-parity); Link Uef = arg.link(sig, point_e, parity); Link Oz = arg.oProd(sig, point_c, parity); Link Oy = arg.oProd(sig, point_b, 1-parity); Link Ox = arg.oProd(sig, point_a, parity); Link temp = Ude*Uef*Oz - Ude*Oy*Ubc + Ox*Uab*Ubc; Link force = arg.outA(sig, e_cb, parity); arg.outA(sig, e_cb, parity) = force + arg.coeff*temp; } // loop over sig } template <typename Arg> class HisqForce : public TunableVectorY { Arg &arg; const GaugeField &meta; const HisqForceType type; unsigned int minThreads() const { return arg.threads; } bool tuneGridDim() const { return false; } public: HisqForce(Arg &arg, const GaugeField &meta, int sig, int mu, HisqForceType type) : TunableVectorY(2), arg(arg), meta(meta), type(type) { arg.sig = sig; arg.mu = mu; } void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); switch (type) { case FORCE_LONG_LINK: longLinkKernel<Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; case FORCE_COMPLETE: completeForceKernel<Arg><<<tp.grid,tp.block,tp.shared_bytes,stream>>>(arg); break; default: errorQuda("Undefined force type %d", type); } } TuneKey tuneKey() const { std::stringstream aux; aux << meta.AuxString() << comm_dim_partitioned_string() << ",threads=" << arg.threads; switch (type) { case FORCE_LONG_LINK: aux << ",LONG_LINK"; break; case FORCE_COMPLETE: aux << ",COMPLETE"; break; default: errorQuda("Undefined force type %d", type); } return TuneKey(meta.VolString(), typeid(*this).name(), aux.str().c_str()); } void preTune() { switch (type) { case FORCE_LONG_LINK: case FORCE_COMPLETE: arg.outA.save(); break; default: errorQuda("Undefined force type %d", type); } } void postTune() { switch (type) { case FORCE_LONG_LINK: case FORCE_COMPLETE: arg.outA.load(); break; default: errorQuda("Undefined force type %d", type); } } long long flops() const { switch (type) { case FORCE_LONG_LINK: return 2*arg.threads*4968ll; case FORCE_COMPLETE: return 2*arg.threads*792ll; default: errorQuda("Undefined force type %d", type); } return 0; } long long bytes() const { switch (type) { case FORCE_LONG_LINK: return 4*2*arg.threads*(2*arg.outA.Bytes() + 4*arg.link.Bytes() + 3*arg.oProd.Bytes()); case FORCE_COMPLETE: return 4*2*arg.threads*(arg.outA.Bytes() + arg.link.Bytes() + arg.oProd.Bytes()); default: errorQuda("Undefined force type %d", type); } return 0; } }; template <typename real, int nColor, QudaReconstructType recon> struct HisqLongLinkForce { HisqLongLinkForce(GaugeField &newOprod, const GaugeField &oldOprod, const GaugeField &link, double coeff) { LongLinkArg<real, nColor, recon> arg(newOprod, link, oldOprod, coeff); HisqForce<decltype(arg)> longLink(arg, link, 0, 0, FORCE_LONG_LINK); longLink.apply(0); cudaDeviceSynchronize(); checkCudaError(); } }; void hisqLongLinkForce(GaugeField &newOprod, const GaugeField &oldOprod, const GaugeField &link, double coeff) { if (!link.isNative()) errorQuda("Unsupported gauge order %d", link.Order()); if (!oldOprod.isNative()) errorQuda("Unsupported gauge order %d", oldOprod.Order()); if (!newOprod.isNative()) errorQuda("Unsupported gauge order %d", newOprod.Order()); if (checkLocation(newOprod,oldOprod,link) == QUDA_CPU_FIELD_LOCATION) errorQuda("CPU not implemented"); checkPrecision(newOprod, link, oldOprod); instantiate<HisqLongLinkForce, ReconstructNone>(newOprod, oldOprod, link, coeff); } template <typename real, int nColor, QudaReconstructType recon> struct HisqCompleteForce { HisqCompleteForce(GaugeField &force, const GaugeField &link) { CompleteForceArg<real, nColor, recon> arg(force, link); HisqForce<decltype(arg)> completeForce(arg, link, 0, 0, FORCE_COMPLETE); completeForce.apply(0); cudaDeviceSynchronize(); checkCudaError(); } }; void hisqCompleteForce(GaugeField &force, const GaugeField &link) { if (!link.isNative()) errorQuda("Unsupported gauge order %d", link.Order()); if (!force.isNative()) errorQuda("Unsupported gauge order %d", force.Order()); if (checkLocation(force,link) == QUDA_CPU_FIELD_LOCATION) errorQuda("CPU not implemented"); checkPrecision(link, force); instantiate<HisqCompleteForce, ReconstructNone>(force, link); } } // namespace fermion_force } // namespace quda #endif // GPU_HISQ_FORCE
f267fa84d833d79883d66a69cf41cca034ec77ef.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void wipe(int *buffer, int length) { length >>= 5; int tid = threadIdx.x; for(int i = 0; i < length; i++) { buffer[(i << 5) + tid] = -1; } }
f267fa84d833d79883d66a69cf41cca034ec77ef.cu
#include "includes.h" __global__ void wipe(int *buffer, int length) { length >>= 5; int tid = threadIdx.x; for(int i = 0; i < length; i++) { buffer[(i << 5) + tid] = -1; } }
6a537ebd29c34e5846af0709153f1461d4c00b6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zpipelinedgmres.cu, normal z -> c, Tue Aug 30 09:38:45 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define COMPLEX #define BLOCK_SIZE 512 template< int n > __device__ void sum_reduce( /*int n,*/ int i, float* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } __global__ void magma_cpipelined_correction( int n, int k, magmaFloatComplex * skp, magmaFloatComplex * r, magmaFloatComplex * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float zz= 0.0, tmp= 0.0; extern __shared__ magmaFloatComplex temp[]; temp[ i ] = ( i < k ) ? skp[ i ] * skp[ i ] : MAGMA_C_MAKE( 0.0, 0.0); __syncthreads(); if (i < 64) { temp[ i ] += temp[ i + 64 ]; } __syncthreads(); if ( i < 32 ) { temp[ i ] += temp[ i + 32 ]; __syncthreads(); temp[ i ] += temp[ i + 16 ]; __syncthreads(); temp[ i ] += temp[ i + 8 ]; __syncthreads(); temp[ i ] += temp[ i + 4 ]; __syncthreads(); temp[ i ] += temp[ i + 2 ]; __syncthreads(); temp[ i ] += temp[ i + 1 ]; __syncthreads(); } if ( i == 0 ) { tmp = MAGMA_C_REAL( temp[ i ] ); zz = MAGMA_C_REAL( skp[(k)] ); skp[k] = MAGMA_C_MAKE( sqrt(zz-tmp),0.0 ); } } __global__ void magma_cpipelined_copyscale( int n, int k, magmaFloatComplex * skp, magmaFloatComplex * r, magmaFloatComplex * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; magmaFloatComplex rr=skp[k]; if ( i < n ) { v[i] = r[i] * 1.0 / rr; } } //----------------------------------------------------------------------------// __global__ void magma_cpipelinedscnrm2_kernel( int m, magmaFloatComplex * da, int ldda, magmaFloatComplex * dxnorm ) { const int i = threadIdx.x; magmaFloatComplex_ptr dx = da + blockIdx.x * ldda; __shared__ float sum[ 512 ]; float re, lsum; // get norm of dx lsum = 0; for( int j = i; j < m; j += 512 ) { #ifdef REAL re = dx[j]; lsum += re*re; #else re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; sum_reduce< 512 >( i, sum ); if (i==0) dxnorm[blockIdx.x] = MAGMA_C_MAKE( sqrt(sum[0]), 0.0 ); } //----------------------------------------------------------------------------// __global__ void magma_cpipelinesscale( int n, magmaFloatComplex * r, magmaFloatComplex * drnorm ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<n ) { r[i] = r[i] * 1.0 / drnorm[0]; } } /** Purpose ------- Computes the correction term of the pipelined GMRES according to P. Ghysels and scales and copies the new search direction Returns the vector v = r/ ( skp[k] - (sum_i=1^k skp[i]^2) ) . Arguments --------- @param[in] n int length of v_i @param[in] k int # skp entries v_i^T * r ( without r ) @param[in] r magmaFloatComplex_ptr vector of length n @param[in] v magmaFloatComplex_ptr vector of length n @param[in] skp magmaFloatComplex_ptr array of parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_caux ********************************************************************/ extern "C" magma_int_t magma_ccopyscale( magma_int_t n, magma_int_t k, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr skp, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( k, BLOCK_SIZE ) ); unsigned int Ms = Bs.x * sizeof( magmaFloatComplex ); dim3 Gs2( magma_ceildiv( n, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cpipelined_correction), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , n, k, skp, r, v ); hipLaunchKernelGGL(( magma_cpipelined_copyscale), dim3(Gs2), dim3(Bs), 0, queue->cuda_stream() , n, k, skp, r, v ); return MAGMA_SUCCESS; } extern "C" magma_int_t magma_scnrm2scale( magma_int_t m, magmaFloatComplex_ptr r, magma_int_t lddr, magmaFloatComplex_ptr drnorm, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( 512 ); hipLaunchKernelGGL(( magma_cpipelinedscnrm2_kernel), dim3(blocks), dim3(threads), 0, queue->cuda_stream() , m, r, lddr, drnorm ); dim3 Bs( BLOCK_SIZE ); dim3 Gs2( magma_ceildiv( m, BLOCK_SIZE ) ); hipLaunchKernelGGL(( magma_cpipelinesscale), dim3(Gs2), dim3(Bs), 0, queue->cuda_stream() , m, r, drnorm ); return MAGMA_SUCCESS; }
6a537ebd29c34e5846af0709153f1461d4c00b6f.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from sparse-iter/blas/zpipelinedgmres.cu, normal z -> c, Tue Aug 30 09:38:45 2016 @author Hartwig Anzt */ #include "magmasparse_internal.h" #define COMPLEX #define BLOCK_SIZE 512 template< int n > __device__ void sum_reduce( /*int n,*/ int i, float* x ) { __syncthreads(); if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[i] += x[i+1024]; } __syncthreads(); } if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[i] += x[i+ 512]; } __syncthreads(); } if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[i] += x[i+ 256]; } __syncthreads(); } if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[i] += x[i+ 128]; } __syncthreads(); } if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[i] += x[i+ 64]; } __syncthreads(); } if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[i] += x[i+ 32]; } __syncthreads(); } // probably don't need __syncthreads for < 16 threads // because of implicit warp level synchronization. if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[i] += x[i+ 16]; } __syncthreads(); } if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[i] += x[i+ 8]; } __syncthreads(); } if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[i] += x[i+ 4]; } __syncthreads(); } if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[i] += x[i+ 2]; } __syncthreads(); } if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[i] += x[i+ 1]; } __syncthreads(); } } __global__ void magma_cpipelined_correction( int n, int k, magmaFloatComplex * skp, magmaFloatComplex * r, magmaFloatComplex * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; float zz= 0.0, tmp= 0.0; extern __shared__ magmaFloatComplex temp[]; temp[ i ] = ( i < k ) ? skp[ i ] * skp[ i ] : MAGMA_C_MAKE( 0.0, 0.0); __syncthreads(); if (i < 64) { temp[ i ] += temp[ i + 64 ]; } __syncthreads(); if ( i < 32 ) { temp[ i ] += temp[ i + 32 ]; __syncthreads(); temp[ i ] += temp[ i + 16 ]; __syncthreads(); temp[ i ] += temp[ i + 8 ]; __syncthreads(); temp[ i ] += temp[ i + 4 ]; __syncthreads(); temp[ i ] += temp[ i + 2 ]; __syncthreads(); temp[ i ] += temp[ i + 1 ]; __syncthreads(); } if ( i == 0 ) { tmp = MAGMA_C_REAL( temp[ i ] ); zz = MAGMA_C_REAL( skp[(k)] ); skp[k] = MAGMA_C_MAKE( sqrt(zz-tmp),0.0 ); } } __global__ void magma_cpipelined_copyscale( int n, int k, magmaFloatComplex * skp, magmaFloatComplex * r, magmaFloatComplex * v ) { int i = blockIdx.x * blockDim.x + threadIdx.x; magmaFloatComplex rr=skp[k]; if ( i < n ) { v[i] = r[i] * 1.0 / rr; } } //----------------------------------------------------------------------------// __global__ void magma_cpipelinedscnrm2_kernel( int m, magmaFloatComplex * da, int ldda, magmaFloatComplex * dxnorm ) { const int i = threadIdx.x; magmaFloatComplex_ptr dx = da + blockIdx.x * ldda; __shared__ float sum[ 512 ]; float re, lsum; // get norm of dx lsum = 0; for( int j = i; j < m; j += 512 ) { #ifdef REAL re = dx[j]; lsum += re*re; #else re = MAGMA_C_REAL( dx[j] ); float im = MAGMA_C_IMAG( dx[j] ); lsum += re*re + im*im; #endif } sum[i] = lsum; sum_reduce< 512 >( i, sum ); if (i==0) dxnorm[blockIdx.x] = MAGMA_C_MAKE( sqrt(sum[0]), 0.0 ); } //----------------------------------------------------------------------------// __global__ void magma_cpipelinesscale( int n, magmaFloatComplex * r, magmaFloatComplex * drnorm ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i<n ) { r[i] = r[i] * 1.0 / drnorm[0]; } } /** Purpose ------- Computes the correction term of the pipelined GMRES according to P. Ghysels and scales and copies the new search direction Returns the vector v = r/ ( skp[k] - (sum_i=1^k skp[i]^2) ) . Arguments --------- @param[in] n int length of v_i @param[in] k int # skp entries v_i^T * r ( without r ) @param[in] r magmaFloatComplex_ptr vector of length n @param[in] v magmaFloatComplex_ptr vector of length n @param[in] skp magmaFloatComplex_ptr array of parameters @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_caux ********************************************************************/ extern "C" magma_int_t magma_ccopyscale( magma_int_t n, magma_int_t k, magmaFloatComplex_ptr r, magmaFloatComplex_ptr v, magmaFloatComplex_ptr skp, magma_queue_t queue ) { dim3 Bs( BLOCK_SIZE ); dim3 Gs( magma_ceildiv( k, BLOCK_SIZE ) ); unsigned int Ms = Bs.x * sizeof( magmaFloatComplex ); dim3 Gs2( magma_ceildiv( n, BLOCK_SIZE ) ); magma_cpipelined_correction<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( n, k, skp, r, v ); magma_cpipelined_copyscale<<< Gs2, Bs, 0, queue->cuda_stream() >>> ( n, k, skp, r, v ); return MAGMA_SUCCESS; } extern "C" magma_int_t magma_scnrm2scale( magma_int_t m, magmaFloatComplex_ptr r, magma_int_t lddr, magmaFloatComplex_ptr drnorm, magma_queue_t queue ) { dim3 blocks( 1 ); dim3 threads( 512 ); magma_cpipelinedscnrm2_kernel<<< blocks, threads, 0, queue->cuda_stream() >>> ( m, r, lddr, drnorm ); dim3 Bs( BLOCK_SIZE ); dim3 Gs2( magma_ceildiv( m, BLOCK_SIZE ) ); magma_cpipelinesscale<<< Gs2, Bs, 0, queue->cuda_stream() >>>( m, r, drnorm ); return MAGMA_SUCCESS; }
96aeb9c6af41a3c92a9f297a8d014084e57a317b.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_bfs.cu * * @brief Simple test driver program for breadth-first search. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <algorithm> #include <iostream> #include <fstream> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> #include <gunrock/util/track_utils.cuh> // BFS includes #include <gunrock/app/bfs/bfs_enactor.cuh> #include <gunrock/app/bfs/bfs_problem.cuh> #include <gunrock/app/bfs/bfs_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bfs; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "test <graph-type> [graph-type-arguments]\n" "Graph type and graph type arguments:\n" " market <matrix-market-file-name>\n" " Reads a Matrix-Market coordinate-formatted graph of\n" " directed/undirected edges from STDIN (or from the\n" " optionally-specified file).\n" " rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n" " Generate R-MAT graph as input\n" " --rmat_scale=<vertex-scale>\n" " --rmat_nodes=<number-nodes>\n" " --rmat_edgefactor=<edge-factor>\n" " --rmat_edges=<number-edges>\n" " --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n" " --rmat_seed=<seed>\n" " rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n" " Generate Random Geometry Graph as input\n" " --rgg_scale=<vertex-scale>\n" " --rgg_nodes=<number-nodes>\n" " --rgg_thfactor=<threshold-factor>\n" " --rgg_threshold=<threshold>\n" " --rgg_vmultipiler=<vmultipiler>\n" " --rgg_seed=<seed>\n\n" "Optional arguments:\n" "[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n" "[--undirected] Treat the graph as undirected (symmetric).\n" "[--idempotence] Whether or not to enable idempotent operation.\n" "[--instrumented] Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty.\n" " (a relative indicator of load imbalance.)\n" "[--src=<Vertex-ID|largestdegree|randomize|randomize2|list>]\n" " Begins traversal from the source (Default: 0).\n" " If largestdegree: from largest degree vertex.\n" " If randomize: from a random source vertex.\n" " If randomize2: from a different random source vertex for each iteration.\n" " If list: need to provide a source list through --source_list=n0,n1,...,nk\n" "[--quick] Skip the CPU reference validation process.\n" "[--mark-pred] Keep both label info and predecessor info.\n" "[--disable-size-check] Disable frontier queue size check.\n" "[--grid-size=<grid size>] Maximum allowed grid size setting.\n" "[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--in-sizing=<in/out_queue_scale_factor>]\n" " Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--v] Print verbose per iteration debug info.\n" "[--iteration-num=<num>] Number of runs to perform the test.\n" "[--traversal-mode=<0|1>] Set traversal strategy, 0 for Load-Balanced\n" " 1 for Dynamic-Cooperative (Default: dynamic\n" " determine based on average degree).\n" "[--partition-method=<random|biasrandom|clustered|metis>]\n" " Choose partitioner (Default use random).\n" "[--quiet] No output (unless --json is specified).\n" "[--json] Output JSON-format statistics to STDOUT.\n" "[--jsonfile=<name>] Output JSON-format statistics to file <name>\n" "[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n" " where name is auto-generated.\n" ); } /** * @brief Displays the BFS result (i.e., distance from source) * * @tparam VertexId * @tparam SizeT * @tparam MARK_PREDECESSORS * @tparam ENABLE_IDEMPOTENCE * * @param[in] labels Search depth from the source for each node. * @param[in] preds Predecessor node id for each node. * @param[in] num_nodes Number of nodes in the graph. * @param[in] quiet Don't print out anything to stdout */ template < typename VertexId, typename SizeT, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE > void DisplaySolution( VertexId *labels, VertexId *preds, SizeT num_nodes, bool quiet = false) { if (quiet) { return; } // careful: if later code in this // function changes something, this // return is the wrong thing to do if (num_nodes > 40) { num_nodes = 40; } printf("\nFirst %lld labels of the GPU result:\n", (long long)num_nodes); printf("["); for (VertexId i = 0; i < num_nodes; ++i) { PrintValue(i); printf(":"); PrintValue(labels[i]); if (MARK_PREDECESSORS) //&& !ENABLE_IDEMPOTENCE) { printf(","); PrintValue(preds[i]); } printf(" "); } printf("]\n"); } /****************************************************************************** * BFS Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference BFS ranking implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * @tparam ENABLE_IDEMPOTENCE * * @param[in] graph Reference to the CSR graph we process on * @param[in] source_path Host-side vector to store CPU computed labels for each node * @param[in] predecessor Host-side vector to store CPU computed predecessor for each node * @param[in] src Source node where BFS starts * @param[in] quiet Don't print out anything to stdout */ template < typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE > void ReferenceBFS( const Csr<VertexId, SizeT, Value> *graph, VertexId *source_path, VertexId *predecessor, VertexId src, bool quiet = false) { // Initialize labels for (VertexId i = 0; i < graph->nodes; ++i) { source_path[i] = /*ENABLE_IDEMPOTENCE ? -1 :*/ util::MaxValue<VertexId>(); if (MARK_PREDECESSORS) { predecessor[i] = util::InvalidValue<VertexId>(); } } source_path[src] = 0; VertexId search_depth = 0; // Initialize queue for managing previously-discovered nodes std::deque<VertexId> frontier; frontier.push_back(src); // Perform BFS CpuTimer cpu_timer; cpu_timer.Start(); while (!frontier.empty()) { // Dequeue node from frontier VertexId dequeued_node = frontier.front(); frontier.pop_front(); VertexId neighbor_dist = source_path[dequeued_node] + 1; // Locate adjacency list SizeT edges_begin = graph->row_offsets[dequeued_node]; SizeT edges_end = graph->row_offsets[dequeued_node + 1]; for (SizeT edge = edges_begin; edge < edges_end; ++edge) { //Lookup neighbor and enqueue if undiscovered VertexId neighbor = graph->column_indices[edge]; if (source_path[neighbor] > neighbor_dist) //|| source_path[neighbor] == -1) { source_path[neighbor] = neighbor_dist; if (MARK_PREDECESSORS) { predecessor[neighbor] = dequeued_node; } if (search_depth < neighbor_dist) { search_depth = neighbor_dist; } frontier.push_back(neighbor); } } } if (MARK_PREDECESSORS) { predecessor[src] = util::InvalidValue<VertexId>(); } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); search_depth++; if (!quiet) { printf("CPU BFS finished in %lf msec. cpu_search_depth: %lld\n", elapsed, (long long)search_depth); } } /** * @brief Run BFS tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * @tparam ENABLE_IDEMPOTENCE * * @param[in] info Pointer to info contains parameters and statistics. * * \return hipError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE > hipError_t RunTests(Info<VertexId, SizeT, Value> *info) { typedef BFSProblem < VertexId, SizeT, Value, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE> //(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE) > Problem; // does not use double buffer typedef BFSEnactor < Problem> //INSTRUMENT, //DEBUG, //SIZE_CHECK > Enactor; // parse configurations from mObject info Csr<VertexId, SizeT, Value> *graph = info->csr_ptr; Csr<VertexId, SizeT, Value> *inv_graph = info->csc_ptr; VertexId src = info->info["source_vertex" ].get_int64(); int max_grid_size = info->info["max_grid_size" ].get_int (); int num_gpus = info->info["num_gpus" ].get_int (); double max_queue_sizing = info->info["max_queue_sizing" ].get_real (); double max_queue_sizing1 = info->info["max_queue_sizing1" ].get_real (); double max_in_sizing = info->info["max_in_sizing" ].get_real (); std::string partition_method = info->info["partition_method" ].get_str (); double partition_factor = info->info["partition_factor" ].get_real (); int partition_seed = info->info["partition_seed" ].get_int (); bool quiet_mode = info->info["quiet_mode" ].get_bool (); bool quick_mode = info->info["quick_mode" ].get_bool (); bool stream_from_host = info->info["stream_from_host" ].get_bool (); std::string traversal_mode = info->info["traversal_mode" ].get_str (); bool instrument = info->info["instrument" ].get_bool (); bool debug = info->info["debug_mode" ].get_bool (); bool size_check = info->info["size_check" ].get_bool (); int iterations = info->info["num_iteration" ].get_int (); std::string src_type = info->info["source_type" ].get_str (); int src_seed = info->info["source_seed" ].get_int (); int communicate_latency = info->info["communicate_latency"].get_int (); float communicate_multipy = info->info["communicate_multipy"].get_real(); int expand_latency = info->info["expand_latency" ].get_int (); int subqueue_latency = info->info["subqueue_latency" ].get_int (); int fullqueue_latency = info->info["fullqueue_latency" ].get_int (); int makeout_latency = info->info["makeout_latency" ].get_int (); bool direction_optimized = info->info["direction_optimized"].get_bool(); float do_a = info->info["do_a" ].get_real(); float do_b = info->info["do_b" ].get_real(); bool undirected = info->info["undirected" ].get_bool(); if (max_queue_sizing < 0) max_queue_sizing = 6.5; if (max_in_sizing < 0) max_in_sizing = 4; if (communicate_multipy > 1) max_in_sizing *= communicate_multipy; CpuTimer cpu_timer; hipError_t retval = hipSuccess; cpu_timer.Start(); json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // TODO: remove after merge mgpu-cq ContextPtr *context = (ContextPtr*) info->context; hipStream_t *streams = (hipStream_t*)info->streams; // allocate host-side label array (for both reference and GPU results) VertexId *reference_labels = new VertexId[graph->nodes]; VertexId *reference_preds = new VertexId[graph->nodes]; VertexId *h_labels = new VertexId[graph->nodes]; VertexId *reference_check_label = (quick_mode) ? NULL : reference_labels; VertexId *reference_check_preds = NULL; VertexId *h_preds = NULL; if (MARK_PREDECESSORS) { h_preds = new VertexId[graph->nodes]; if (!quick_mode) { reference_check_preds = reference_preds; } } size_t *org_size = new size_t[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; if (retval = util::SetDevice(gpu_idx[gpu])) return retval; if (retval = util::GRError( hipMemGetInfo(&(org_size[gpu]), &dummy), "hipMemGetInfo failed", __FILE__, __LINE__)) return retval; } Problem* problem = new Problem(direction_optimized, undirected); // allocate problem on GPU if (retval = util::GRError(problem->Init( stream_from_host, graph, inv_graph, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "BFS Problem Init failed", __FILE__, __LINE__)) return retval; Enactor* enactor = new Enactor( num_gpus, gpu_idx, instrument, debug, size_check, direction_optimized); // enactor map if (retval = util::GRError(enactor->Init( context, problem, max_grid_size, traversal_mode), "BFS Enactor Init failed", __FILE__, __LINE__)) return retval; enactor -> communicate_latency = communicate_latency; enactor -> communicate_multipy = communicate_multipy; enactor -> expand_latency = expand_latency; enactor -> subqueue_latency = subqueue_latency; enactor -> fullqueue_latency = fullqueue_latency; enactor -> makeout_latency = makeout_latency; enactor -> do_a = do_a; enactor -> do_b = do_b; if (retval = util::SetDevice(gpu_idx[0])) return retval; if (retval = util::latency::Test( streams[0], problem -> data_slices[0] -> latency_data, communicate_latency, communicate_multipy, expand_latency, subqueue_latency, fullqueue_latency, makeout_latency)) return retval; cpu_timer.Stop(); info -> info["preprocess_time"] = cpu_timer.ElapsedMillis(); // perform BFS double total_elapsed = 0.0; double single_elapsed = 0.0; double max_elapsed = 0.0; double min_elapsed = 1e10; json_spirit::mArray process_times; if (src_type == "random2") { if (src_seed == -1) src_seed = time(NULL); if (!quiet_mode) printf("src_seed = %d\n", src_seed); srand(src_seed); } if (!quiet_mode) printf("Using traversal-mode %s\n", traversal_mode.c_str()); json_spirit::mArray source_list; if (src_type == "list") source_list = info->info["source_list"].get_array(); for (int iter = 0; iter < iterations; ++iter) { if (src_type == "random2") { bool src_valid = false; while (!src_valid) { src = rand() % graph -> nodes; if (graph -> row_offsets[src] != graph -> row_offsets[src+1]) src_valid = true; } } else if (src_type == "list") { if (source_list.size() == 0) { if (!quiet_mode) printf("No source list found. Use 0 as source.\n"); src = 0; } else { src = source_list[iter].get_int(); } } if (retval = util::GRError(problem->Reset( src, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BFS Problem Reset failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Reset(), "BFS Enactor Reset failed", __FILE__, __LINE__)) return retval; for (int gpu = 0; gpu < num_gpus; gpu++) { if (retval = util::SetDevice(gpu_idx[gpu])) return retval; if (retval = util::GRError(hipDeviceSynchronize(), "hipDeviceSynchronize failed", __FILE__, __LINE__)) return retval; } if (!quiet_mode) { printf("__________________________\n"); fflush(stdout); } cpu_timer.Start(); if (retval = util::GRError(enactor->Enact(src, traversal_mode), "BFS Enact failed", __FILE__, __LINE__)) return retval; cpu_timer.Stop(); single_elapsed = cpu_timer.ElapsedMillis(); total_elapsed += single_elapsed; process_times.push_back(single_elapsed); if (single_elapsed > max_elapsed) max_elapsed = single_elapsed; if (single_elapsed < min_elapsed) min_elapsed = single_elapsed; if (!quiet_mode) { printf("--------------------------\n" "iteration %d elapsed: %lf ms, src = %lld, #iteration = %lld\n", iter, single_elapsed, (long long)src, (long long)enactor -> enactor_stats -> iteration); fflush(stdout); } } total_elapsed /= iterations; info -> info["process_times"] = process_times; info -> info["min_process_time"] = min_elapsed; info -> info["max_process_time"] = max_elapsed; // compute reference CPU BFS solution for source-distance if (!quick_mode) { if (!quiet_mode) { printf("Computing reference value ...\n"); } ReferenceBFS<VertexId, SizeT, Value, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE>( graph, reference_check_label, reference_check_preds, src, quiet_mode); if (!quiet_mode) { printf("\n"); } } cpu_timer.Start(); // copy out results if (retval = util::GRError(problem->Extract(h_labels, h_preds), "BFS Problem Extraction failed", __FILE__, __LINE__)) return retval; // verify the result if ((!quick_mode) && (!quiet_mode)) { printf("Label Validity: "); int num_errors = CompareResults( h_labels, reference_check_label, graph->nodes, true, quiet_mode); if (num_errors > 0) { printf("%d errors occurred.", num_errors); } printf("\n"); if (MARK_PREDECESSORS) { printf("Predecessor Validity: \n"); num_errors = 0; #pragma omp parallel for for (VertexId v=0; v<graph->nodes; v++) { if (h_labels[v] == /*(ENABLE_IDEMPOTENCE ? -1 :*/ util::MaxValue<VertexId>()) continue; // unvisited vertex if (v == src && h_preds[v] == util::InvalidValue<VertexId>()) continue; // source vertex VertexId pred = h_preds[v]; if (pred >= graph->nodes || pred < 0) { if (num_errors == 0) printf("INCORRECT: pred[%lld] : %lld out of bound\n", (long long)v, (long long)pred); #pragma omp atomic num_errors ++; continue; } if (h_labels[v] != h_labels[pred] + 1) { if (num_errors == 0) printf("INCORRECT: label[%lld] (%lld) != label[%lld] (%lld) + 1\n", (long long)v, (long long)h_labels[v], (long long)pred, (long long)h_labels[pred]); #pragma omp atomic num_errors ++; continue; } bool v_found = false; for (SizeT t = graph->row_offsets[pred]; t < graph->row_offsets[pred+1]; t++) if (v == graph->column_indices[t]) { v_found = true; break; } if (!v_found) { if (num_errors == 0) printf("INCORRECT: Vertex %lld not in Vertex %lld's neighbor list\n", (long long)v, (long long)pred); #pragma omp atomic num_errors ++; continue; } } if (num_errors > 0) { printf("%d errors occurred.", num_errors); } else printf("CORRECT"); printf("\n"); } } if (!quick_mode && TO_TRACK) { VertexId **v_ = NULL; if (num_gpus > 1) { v_ = new VertexId*[num_gpus]; for (int gpu=0; gpu<num_gpus; gpu++) { v_[gpu] = new VertexId[graph->nodes]; for (VertexId v=0; v<graph->nodes; v++) v_[gpu][v] = -1; for (VertexId v=0; v<problem->sub_graphs[gpu].nodes; v++) v_[gpu][problem->original_vertexes[gpu][v]] = v; } } util::Track_Results(graph, num_gpus, (VertexId)1, h_labels, reference_check_label, num_gpus > 1 ? problem->partition_tables[0] : NULL, v_); char file_name[512]; sprintf(file_name, "./eval/error_dump/error_%lld_%d.txt", (long long)time(NULL), gpu_idx[0]); util::Output_Errors(file_name, graph -> nodes, num_gpus, (VertexId)0, h_labels, reference_check_label, num_gpus > 1 ? problem->partition_tables[0] : NULL, v_); if (num_gpus > 1) { for (int gpu=0; gpu<num_gpus; gpu++) { delete[] v_[gpu]; v_[gpu] = NULL; } delete[] v_; v_=NULL; } } // display Solution if (!quiet_mode) { DisplaySolution<VertexId, SizeT, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE> (h_labels, h_preds, graph->nodes, quiet_mode); } info->ComputeTraversalStats( // compute running statistics enactor->enactor_stats.GetPointer(), total_elapsed, h_labels); if (!quiet_mode) { printf("\n\tMemory Usage(B)\t"); for (int gpu = 0; gpu < num_gpus; gpu++) if (num_gpus > 1) { if (gpu != 0) { printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); } else { printf(" #keys%d,0\t #keys%d,1", gpu, gpu); } } else { printf(" #keys%d,0\t #keys%d,1", gpu, gpu); } if (num_gpus > 1) { printf(" #keys%d", num_gpus); } printf("\n"); double max_queue_sizing_[2] = {0, 0 }, max_in_sizing_ = 0; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t gpu_free, dummy; hipSetDevice(gpu_idx[gpu]); hipMemGetInfo(&gpu_free, &dummy); printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free); for (int i = 0; i < num_gpus; i++) { for (int j = 0; j < 2; j++) { SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) { max_queue_sizing_[j] = factor; } } if (num_gpus > 1 && i != 0 ) { for (int t = 0; t < 2; t++) { SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) { max_in_sizing_ = factor; } } } } if (num_gpus > 1) { printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); } printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus > 1) { printf("\t in_sizing =\t %lf", max_in_sizing_); } printf("\n"); } // Clean up if (org_size ) {delete[] org_size ; org_size = NULL;} if (enactor ) { if (retval = util::GRError(enactor -> Release(), "BFS Enactor Release failed", __FILE__, __LINE__)) return retval; delete enactor ; enactor = NULL; } if (problem ) { if (retval = util::GRError(problem -> Release(), "BFS Problem Release failed", __FILE__, __LINE__)) return retval; delete problem ; problem = NULL; } if (reference_labels) {delete[] reference_labels; reference_labels = NULL;} if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;} if (h_labels ) {delete[] h_labels ; h_labels = NULL;} cpu_timer.Stop(); info->info["postprocess_time"] = cpu_timer.ElapsedMillis(); if (h_preds ) { if (info->info["output_filename"].get_str() != "") { cpu_timer.Start(); std::ofstream fout; size_t buf_size = 1024 * 1024 * 16; char *fout_buf = new char[buf_size]; fout.rdbuf() -> pubsetbuf(fout_buf, buf_size); fout.open(info->info["output_filename"].get_str().c_str()); for (VertexId v=0; v<graph->nodes; v++) { if (v == src) fout<< v+1 << "," << v+1 << std::endl; // root node else if (h_preds[v] != -2) // valid pred fout<< v+1 << "," << h_preds[v]+1 << std::endl; } fout.close(); delete[] fout_buf; fout_buf = NULL; cpu_timer.Stop(); info->info["write_time"] = cpu_timer.ElapsedMillis(); } delete[] h_preds ; h_preds = NULL; } return retval; } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * * @param[in] info Pointer to info contains parameters and statistics. * * \return hipError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS > hipError_t RunTests_enable_idempotence(Info<VertexId, SizeT, Value> *info) { if (info->info["idempotent"].get_bool()) return RunTests <VertexId, SizeT, Value,/* INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, true > (info); else return RunTests <VertexId, SizeT, Value,/* INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, false> (info); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. * * \return hipError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value> hipError_t RunTests_mark_predecessors(Info<VertexId, SizeT, Value> *info) { if (info->info["mark_predecessors"].get_bool()) return RunTests_enable_idempotence<VertexId, SizeT, Value, /*INSTRUMENT, DEBUG, SIZE_CHECK,*/ true> (info); else return RunTests_enable_idempotence<VertexId, SizeT, Value,/* INSTRUMENT, DEBUG, SIZE_CHECK,*/ false> (info); } /****************************************************************************** * Main ******************************************************************************/ template < typename VertexId, // use int as the vertex identifier typename SizeT , // use int as the graph size type typename Value > // use int as the value type int main_(CommandLineArgs *args) { CpuTimer cpu_timer, cpu_timer2; cpu_timer.Start(); //typedef int VertexId; // Use int as the vertex identifier //typedef int Value; // Use int as the value type //typedef long long SizeT; // Use int as the graph size type Csr<VertexId, SizeT, Value> csr(false); // CSR graph we process on Csr<VertexId, SizeT, Value> csc(false); // CSC graph we process on Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>; // graph construction or generation related parameters info->info["undirected"] = args -> CheckCmdLineFlag("undirected"); cpu_timer2.Start(); info->Init("BFS", *args, csr, csc); // initialize Info structure cpu_timer2.Stop(); info->info["load_time"] = cpu_timer2.ElapsedMillis(); hipError_t retval = RunTests_mark_predecessors<VertexId, SizeT, Value>(info); // run test cpu_timer.Stop(); info->info["total_time"] = cpu_timer.ElapsedMillis(); if (!(info->info["quiet_mode"].get_bool())) { info->DisplayStats(); // display collected statistics } info->CollectInfo(); // collected all the info and put into JSON mObject return retval; } template < typename VertexId, // the vertex identifier type, usually int or long long typename SizeT > // the size tyep, usually int or long long int main_Value(CommandLineArgs *args) { // Value = VertexId for bfs return main_<VertexId, SizeT, VertexId>(args); // if (args -> CheckCmdLineFlag("64bit-Value")) // return main_<VertexId, SizeT, long long>(args); // else // return main_<VertexId, SizeT, int >(args); } template < typename VertexId> int main_SizeT(CommandLineArgs *args) { // can be disabled to reduce compile time if (args -> CheckCmdLineFlag("64bit-SizeT") || sizeof(VertexId) > 4) return main_Value<VertexId, long long>(args); else return main_Value<VertexId, int >(args); } int main_VertexId(CommandLineArgs *args) { // can be disabled to reduce compile time // atomicMin(long long) is only available for compute capability 3.5 or higher if (args -> CheckCmdLineFlag("64bit-VertexId")) //#if __GR_CUDA_ARCH__ <= 300 // { // printf("64bit-VertexId disabled, because atomicMin(long long) is only supported by compute capability 3.5 or higher\n"); // return 1; // } //#else return main_SizeT<long long>(args); //#endif else return main_SizeT<int >(args); } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help")) { Usage(); return 1; } return main_VertexId(&args); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
96aeb9c6af41a3c92a9f297a8d014084e57a317b.cu
// ---------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------- /** * @file * test_bfs.cu * * @brief Simple test driver program for breadth-first search. */ #include <stdio.h> #include <string> #include <deque> #include <vector> #include <algorithm> #include <iostream> #include <fstream> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> #include <gunrock/util/track_utils.cuh> // BFS includes #include <gunrock/app/bfs/bfs_enactor.cuh> #include <gunrock/app/bfs/bfs_problem.cuh> #include <gunrock/app/bfs/bfs_functor.cuh> // Operator includes #include <gunrock/oprtr/advance/kernel.cuh> #include <gunrock/oprtr/filter/kernel.cuh> #include <moderngpu.cuh> using namespace gunrock; using namespace gunrock::app; using namespace gunrock::util; using namespace gunrock::oprtr; using namespace gunrock::app::bfs; /****************************************************************************** * Housekeeping Routines ******************************************************************************/ void Usage() { printf( "test <graph-type> [graph-type-arguments]\n" "Graph type and graph type arguments:\n" " market <matrix-market-file-name>\n" " Reads a Matrix-Market coordinate-formatted graph of\n" " directed/undirected edges from STDIN (or from the\n" " optionally-specified file).\n" " rmat (default: rmat_scale = 10, a = 0.57, b = c = 0.19)\n" " Generate R-MAT graph as input\n" " --rmat_scale=<vertex-scale>\n" " --rmat_nodes=<number-nodes>\n" " --rmat_edgefactor=<edge-factor>\n" " --rmat_edges=<number-edges>\n" " --rmat_a=<factor> --rmat_b=<factor> --rmat_c=<factor>\n" " --rmat_seed=<seed>\n" " rgg (default: rgg_scale = 10, rgg_thfactor = 0.55)\n" " Generate Random Geometry Graph as input\n" " --rgg_scale=<vertex-scale>\n" " --rgg_nodes=<number-nodes>\n" " --rgg_thfactor=<threshold-factor>\n" " --rgg_threshold=<threshold>\n" " --rgg_vmultipiler=<vmultipiler>\n" " --rgg_seed=<seed>\n\n" "Optional arguments:\n" "[--device=<device_index>] Set GPU(s) for testing (Default: 0).\n" "[--undirected] Treat the graph as undirected (symmetric).\n" "[--idempotence] Whether or not to enable idempotent operation.\n" "[--instrumented] Keep kernels statics [Default: Disable].\n" " total_queued, search_depth and barrier duty.\n" " (a relative indicator of load imbalance.)\n" "[--src=<Vertex-ID|largestdegree|randomize|randomize2|list>]\n" " Begins traversal from the source (Default: 0).\n" " If largestdegree: from largest degree vertex.\n" " If randomize: from a random source vertex.\n" " If randomize2: from a different random source vertex for each iteration.\n" " If list: need to provide a source list through --source_list=n0,n1,...,nk\n" "[--quick] Skip the CPU reference validation process.\n" "[--mark-pred] Keep both label info and predecessor info.\n" "[--disable-size-check] Disable frontier queue size check.\n" "[--grid-size=<grid size>] Maximum allowed grid size setting.\n" "[--queue-sizing=<factor>] Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--in-sizing=<in/out_queue_scale_factor>]\n" " Allocates a frontier queue sized at: \n" " (graph-edges * <factor>). (Default: 1.0)\n" "[--v] Print verbose per iteration debug info.\n" "[--iteration-num=<num>] Number of runs to perform the test.\n" "[--traversal-mode=<0|1>] Set traversal strategy, 0 for Load-Balanced\n" " 1 for Dynamic-Cooperative (Default: dynamic\n" " determine based on average degree).\n" "[--partition-method=<random|biasrandom|clustered|metis>]\n" " Choose partitioner (Default use random).\n" "[--quiet] No output (unless --json is specified).\n" "[--json] Output JSON-format statistics to STDOUT.\n" "[--jsonfile=<name>] Output JSON-format statistics to file <name>\n" "[--jsondir=<dir>] Output JSON-format statistics to <dir>/name,\n" " where name is auto-generated.\n" ); } /** * @brief Displays the BFS result (i.e., distance from source) * * @tparam VertexId * @tparam SizeT * @tparam MARK_PREDECESSORS * @tparam ENABLE_IDEMPOTENCE * * @param[in] labels Search depth from the source for each node. * @param[in] preds Predecessor node id for each node. * @param[in] num_nodes Number of nodes in the graph. * @param[in] quiet Don't print out anything to stdout */ template < typename VertexId, typename SizeT, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE > void DisplaySolution( VertexId *labels, VertexId *preds, SizeT num_nodes, bool quiet = false) { if (quiet) { return; } // careful: if later code in this // function changes something, this // return is the wrong thing to do if (num_nodes > 40) { num_nodes = 40; } printf("\nFirst %lld labels of the GPU result:\n", (long long)num_nodes); printf("["); for (VertexId i = 0; i < num_nodes; ++i) { PrintValue(i); printf(":"); PrintValue(labels[i]); if (MARK_PREDECESSORS) //&& !ENABLE_IDEMPOTENCE) { printf(","); PrintValue(preds[i]); } printf(" "); } printf("]\n"); } /****************************************************************************** * BFS Testing Routines *****************************************************************************/ /** * @brief A simple CPU-based reference BFS ranking implementation. * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * @tparam ENABLE_IDEMPOTENCE * * @param[in] graph Reference to the CSR graph we process on * @param[in] source_path Host-side vector to store CPU computed labels for each node * @param[in] predecessor Host-side vector to store CPU computed predecessor for each node * @param[in] src Source node where BFS starts * @param[in] quiet Don't print out anything to stdout */ template < typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE > void ReferenceBFS( const Csr<VertexId, SizeT, Value> *graph, VertexId *source_path, VertexId *predecessor, VertexId src, bool quiet = false) { // Initialize labels for (VertexId i = 0; i < graph->nodes; ++i) { source_path[i] = /*ENABLE_IDEMPOTENCE ? -1 :*/ util::MaxValue<VertexId>(); if (MARK_PREDECESSORS) { predecessor[i] = util::InvalidValue<VertexId>(); } } source_path[src] = 0; VertexId search_depth = 0; // Initialize queue for managing previously-discovered nodes std::deque<VertexId> frontier; frontier.push_back(src); // Perform BFS CpuTimer cpu_timer; cpu_timer.Start(); while (!frontier.empty()) { // Dequeue node from frontier VertexId dequeued_node = frontier.front(); frontier.pop_front(); VertexId neighbor_dist = source_path[dequeued_node] + 1; // Locate adjacency list SizeT edges_begin = graph->row_offsets[dequeued_node]; SizeT edges_end = graph->row_offsets[dequeued_node + 1]; for (SizeT edge = edges_begin; edge < edges_end; ++edge) { //Lookup neighbor and enqueue if undiscovered VertexId neighbor = graph->column_indices[edge]; if (source_path[neighbor] > neighbor_dist) //|| source_path[neighbor] == -1) { source_path[neighbor] = neighbor_dist; if (MARK_PREDECESSORS) { predecessor[neighbor] = dequeued_node; } if (search_depth < neighbor_dist) { search_depth = neighbor_dist; } frontier.push_back(neighbor); } } } if (MARK_PREDECESSORS) { predecessor[src] = util::InvalidValue<VertexId>(); } cpu_timer.Stop(); float elapsed = cpu_timer.ElapsedMillis(); search_depth++; if (!quiet) { printf("CPU BFS finished in %lf msec. cpu_search_depth: %lld\n", elapsed, (long long)search_depth); } } /** * @brief Run BFS tests * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * @tparam ENABLE_IDEMPOTENCE * * @param[in] info Pointer to info contains parameters and statistics. * * \return cudaError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS, bool ENABLE_IDEMPOTENCE > cudaError_t RunTests(Info<VertexId, SizeT, Value> *info) { typedef BFSProblem < VertexId, SizeT, Value, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE> //(MARK_PREDECESSORS && ENABLE_IDEMPOTENCE) > Problem; // does not use double buffer typedef BFSEnactor < Problem> //INSTRUMENT, //DEBUG, //SIZE_CHECK > Enactor; // parse configurations from mObject info Csr<VertexId, SizeT, Value> *graph = info->csr_ptr; Csr<VertexId, SizeT, Value> *inv_graph = info->csc_ptr; VertexId src = info->info["source_vertex" ].get_int64(); int max_grid_size = info->info["max_grid_size" ].get_int (); int num_gpus = info->info["num_gpus" ].get_int (); double max_queue_sizing = info->info["max_queue_sizing" ].get_real (); double max_queue_sizing1 = info->info["max_queue_sizing1" ].get_real (); double max_in_sizing = info->info["max_in_sizing" ].get_real (); std::string partition_method = info->info["partition_method" ].get_str (); double partition_factor = info->info["partition_factor" ].get_real (); int partition_seed = info->info["partition_seed" ].get_int (); bool quiet_mode = info->info["quiet_mode" ].get_bool (); bool quick_mode = info->info["quick_mode" ].get_bool (); bool stream_from_host = info->info["stream_from_host" ].get_bool (); std::string traversal_mode = info->info["traversal_mode" ].get_str (); bool instrument = info->info["instrument" ].get_bool (); bool debug = info->info["debug_mode" ].get_bool (); bool size_check = info->info["size_check" ].get_bool (); int iterations = info->info["num_iteration" ].get_int (); std::string src_type = info->info["source_type" ].get_str (); int src_seed = info->info["source_seed" ].get_int (); int communicate_latency = info->info["communicate_latency"].get_int (); float communicate_multipy = info->info["communicate_multipy"].get_real(); int expand_latency = info->info["expand_latency" ].get_int (); int subqueue_latency = info->info["subqueue_latency" ].get_int (); int fullqueue_latency = info->info["fullqueue_latency" ].get_int (); int makeout_latency = info->info["makeout_latency" ].get_int (); bool direction_optimized = info->info["direction_optimized"].get_bool(); float do_a = info->info["do_a" ].get_real(); float do_b = info->info["do_b" ].get_real(); bool undirected = info->info["undirected" ].get_bool(); if (max_queue_sizing < 0) max_queue_sizing = 6.5; if (max_in_sizing < 0) max_in_sizing = 4; if (communicate_multipy > 1) max_in_sizing *= communicate_multipy; CpuTimer cpu_timer; cudaError_t retval = cudaSuccess; cpu_timer.Start(); json_spirit::mArray device_list = info->info["device_list"].get_array(); int* gpu_idx = new int[num_gpus]; for (int i = 0; i < num_gpus; i++) gpu_idx[i] = device_list[i].get_int(); // TODO: remove after merge mgpu-cq ContextPtr *context = (ContextPtr*) info->context; cudaStream_t *streams = (cudaStream_t*)info->streams; // allocate host-side label array (for both reference and GPU results) VertexId *reference_labels = new VertexId[graph->nodes]; VertexId *reference_preds = new VertexId[graph->nodes]; VertexId *h_labels = new VertexId[graph->nodes]; VertexId *reference_check_label = (quick_mode) ? NULL : reference_labels; VertexId *reference_check_preds = NULL; VertexId *h_preds = NULL; if (MARK_PREDECESSORS) { h_preds = new VertexId[graph->nodes]; if (!quick_mode) { reference_check_preds = reference_preds; } } size_t *org_size = new size_t[num_gpus]; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t dummy; if (retval = util::SetDevice(gpu_idx[gpu])) return retval; if (retval = util::GRError( cudaMemGetInfo(&(org_size[gpu]), &dummy), "cudaMemGetInfo failed", __FILE__, __LINE__)) return retval; } Problem* problem = new Problem(direction_optimized, undirected); // allocate problem on GPU if (retval = util::GRError(problem->Init( stream_from_host, graph, inv_graph, num_gpus, gpu_idx, partition_method, streams, max_queue_sizing, max_in_sizing, partition_factor, partition_seed), "BFS Problem Init failed", __FILE__, __LINE__)) return retval; Enactor* enactor = new Enactor( num_gpus, gpu_idx, instrument, debug, size_check, direction_optimized); // enactor map if (retval = util::GRError(enactor->Init( context, problem, max_grid_size, traversal_mode), "BFS Enactor Init failed", __FILE__, __LINE__)) return retval; enactor -> communicate_latency = communicate_latency; enactor -> communicate_multipy = communicate_multipy; enactor -> expand_latency = expand_latency; enactor -> subqueue_latency = subqueue_latency; enactor -> fullqueue_latency = fullqueue_latency; enactor -> makeout_latency = makeout_latency; enactor -> do_a = do_a; enactor -> do_b = do_b; if (retval = util::SetDevice(gpu_idx[0])) return retval; if (retval = util::latency::Test( streams[0], problem -> data_slices[0] -> latency_data, communicate_latency, communicate_multipy, expand_latency, subqueue_latency, fullqueue_latency, makeout_latency)) return retval; cpu_timer.Stop(); info -> info["preprocess_time"] = cpu_timer.ElapsedMillis(); // perform BFS double total_elapsed = 0.0; double single_elapsed = 0.0; double max_elapsed = 0.0; double min_elapsed = 1e10; json_spirit::mArray process_times; if (src_type == "random2") { if (src_seed == -1) src_seed = time(NULL); if (!quiet_mode) printf("src_seed = %d\n", src_seed); srand(src_seed); } if (!quiet_mode) printf("Using traversal-mode %s\n", traversal_mode.c_str()); json_spirit::mArray source_list; if (src_type == "list") source_list = info->info["source_list"].get_array(); for (int iter = 0; iter < iterations; ++iter) { if (src_type == "random2") { bool src_valid = false; while (!src_valid) { src = rand() % graph -> nodes; if (graph -> row_offsets[src] != graph -> row_offsets[src+1]) src_valid = true; } } else if (src_type == "list") { if (source_list.size() == 0) { if (!quiet_mode) printf("No source list found. Use 0 as source.\n"); src = 0; } else { src = source_list[iter].get_int(); } } if (retval = util::GRError(problem->Reset( src, enactor->GetFrontierType(), max_queue_sizing, max_queue_sizing1), "BFS Problem Reset failed", __FILE__, __LINE__)) return retval; if (retval = util::GRError(enactor->Reset(), "BFS Enactor Reset failed", __FILE__, __LINE__)) return retval; for (int gpu = 0; gpu < num_gpus; gpu++) { if (retval = util::SetDevice(gpu_idx[gpu])) return retval; if (retval = util::GRError(cudaDeviceSynchronize(), "cudaDeviceSynchronize failed", __FILE__, __LINE__)) return retval; } if (!quiet_mode) { printf("__________________________\n"); fflush(stdout); } cpu_timer.Start(); if (retval = util::GRError(enactor->Enact(src, traversal_mode), "BFS Enact failed", __FILE__, __LINE__)) return retval; cpu_timer.Stop(); single_elapsed = cpu_timer.ElapsedMillis(); total_elapsed += single_elapsed; process_times.push_back(single_elapsed); if (single_elapsed > max_elapsed) max_elapsed = single_elapsed; if (single_elapsed < min_elapsed) min_elapsed = single_elapsed; if (!quiet_mode) { printf("--------------------------\n" "iteration %d elapsed: %lf ms, src = %lld, #iteration = %lld\n", iter, single_elapsed, (long long)src, (long long)enactor -> enactor_stats -> iteration); fflush(stdout); } } total_elapsed /= iterations; info -> info["process_times"] = process_times; info -> info["min_process_time"] = min_elapsed; info -> info["max_process_time"] = max_elapsed; // compute reference CPU BFS solution for source-distance if (!quick_mode) { if (!quiet_mode) { printf("Computing reference value ...\n"); } ReferenceBFS<VertexId, SizeT, Value, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE>( graph, reference_check_label, reference_check_preds, src, quiet_mode); if (!quiet_mode) { printf("\n"); } } cpu_timer.Start(); // copy out results if (retval = util::GRError(problem->Extract(h_labels, h_preds), "BFS Problem Extraction failed", __FILE__, __LINE__)) return retval; // verify the result if ((!quick_mode) && (!quiet_mode)) { printf("Label Validity: "); int num_errors = CompareResults( h_labels, reference_check_label, graph->nodes, true, quiet_mode); if (num_errors > 0) { printf("%d errors occurred.", num_errors); } printf("\n"); if (MARK_PREDECESSORS) { printf("Predecessor Validity: \n"); num_errors = 0; #pragma omp parallel for for (VertexId v=0; v<graph->nodes; v++) { if (h_labels[v] == /*(ENABLE_IDEMPOTENCE ? -1 :*/ util::MaxValue<VertexId>()) continue; // unvisited vertex if (v == src && h_preds[v] == util::InvalidValue<VertexId>()) continue; // source vertex VertexId pred = h_preds[v]; if (pred >= graph->nodes || pred < 0) { if (num_errors == 0) printf("INCORRECT: pred[%lld] : %lld out of bound\n", (long long)v, (long long)pred); #pragma omp atomic num_errors ++; continue; } if (h_labels[v] != h_labels[pred] + 1) { if (num_errors == 0) printf("INCORRECT: label[%lld] (%lld) != label[%lld] (%lld) + 1\n", (long long)v, (long long)h_labels[v], (long long)pred, (long long)h_labels[pred]); #pragma omp atomic num_errors ++; continue; } bool v_found = false; for (SizeT t = graph->row_offsets[pred]; t < graph->row_offsets[pred+1]; t++) if (v == graph->column_indices[t]) { v_found = true; break; } if (!v_found) { if (num_errors == 0) printf("INCORRECT: Vertex %lld not in Vertex %lld's neighbor list\n", (long long)v, (long long)pred); #pragma omp atomic num_errors ++; continue; } } if (num_errors > 0) { printf("%d errors occurred.", num_errors); } else printf("CORRECT"); printf("\n"); } } if (!quick_mode && TO_TRACK) { VertexId **v_ = NULL; if (num_gpus > 1) { v_ = new VertexId*[num_gpus]; for (int gpu=0; gpu<num_gpus; gpu++) { v_[gpu] = new VertexId[graph->nodes]; for (VertexId v=0; v<graph->nodes; v++) v_[gpu][v] = -1; for (VertexId v=0; v<problem->sub_graphs[gpu].nodes; v++) v_[gpu][problem->original_vertexes[gpu][v]] = v; } } util::Track_Results(graph, num_gpus, (VertexId)1, h_labels, reference_check_label, num_gpus > 1 ? problem->partition_tables[0] : NULL, v_); char file_name[512]; sprintf(file_name, "./eval/error_dump/error_%lld_%d.txt", (long long)time(NULL), gpu_idx[0]); util::Output_Errors(file_name, graph -> nodes, num_gpus, (VertexId)0, h_labels, reference_check_label, num_gpus > 1 ? problem->partition_tables[0] : NULL, v_); if (num_gpus > 1) { for (int gpu=0; gpu<num_gpus; gpu++) { delete[] v_[gpu]; v_[gpu] = NULL; } delete[] v_; v_=NULL; } } // display Solution if (!quiet_mode) { DisplaySolution<VertexId, SizeT, MARK_PREDECESSORS, ENABLE_IDEMPOTENCE> (h_labels, h_preds, graph->nodes, quiet_mode); } info->ComputeTraversalStats( // compute running statistics enactor->enactor_stats.GetPointer(), total_elapsed, h_labels); if (!quiet_mode) { printf("\n\tMemory Usage(B)\t"); for (int gpu = 0; gpu < num_gpus; gpu++) if (num_gpus > 1) { if (gpu != 0) { printf(" #keys%d,0\t #keys%d,1\t #ins%d,0\t #ins%d,1", gpu, gpu, gpu, gpu); } else { printf(" #keys%d,0\t #keys%d,1", gpu, gpu); } } else { printf(" #keys%d,0\t #keys%d,1", gpu, gpu); } if (num_gpus > 1) { printf(" #keys%d", num_gpus); } printf("\n"); double max_queue_sizing_[2] = {0, 0 }, max_in_sizing_ = 0; for (int gpu = 0; gpu < num_gpus; gpu++) { size_t gpu_free, dummy; cudaSetDevice(gpu_idx[gpu]); cudaMemGetInfo(&gpu_free, &dummy); printf("GPU_%d\t %ld", gpu_idx[gpu], org_size[gpu] - gpu_free); for (int i = 0; i < num_gpus; i++) { for (int j = 0; j < 2; j++) { SizeT x = problem->data_slices[gpu]->frontier_queues[i].keys[j].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / (num_gpus > 1 ? problem->graph_slices[gpu]->in_counter[i] : problem->graph_slices[gpu]->nodes); if (factor > max_queue_sizing_[j]) { max_queue_sizing_[j] = factor; } } if (num_gpus > 1 && i != 0 ) { for (int t = 0; t < 2; t++) { SizeT x = problem->data_slices[gpu][0].keys_in[t][i].GetSize(); printf("\t %lld", (long long) x); double factor = 1.0 * x / problem->graph_slices[gpu]->in_counter[i]; if (factor > max_in_sizing_) { max_in_sizing_ = factor; } } } } if (num_gpus > 1) { printf("\t %lld", (long long)(problem->data_slices[gpu]->frontier_queues[num_gpus].keys[0].GetSize())); } printf("\n"); } printf("\t queue_sizing =\t %lf \t %lf", max_queue_sizing_[0], max_queue_sizing_[1]); if (num_gpus > 1) { printf("\t in_sizing =\t %lf", max_in_sizing_); } printf("\n"); } // Clean up if (org_size ) {delete[] org_size ; org_size = NULL;} if (enactor ) { if (retval = util::GRError(enactor -> Release(), "BFS Enactor Release failed", __FILE__, __LINE__)) return retval; delete enactor ; enactor = NULL; } if (problem ) { if (retval = util::GRError(problem -> Release(), "BFS Problem Release failed", __FILE__, __LINE__)) return retval; delete problem ; problem = NULL; } if (reference_labels) {delete[] reference_labels; reference_labels = NULL;} if (reference_preds ) {delete[] reference_preds ; reference_preds = NULL;} if (h_labels ) {delete[] h_labels ; h_labels = NULL;} cpu_timer.Stop(); info->info["postprocess_time"] = cpu_timer.ElapsedMillis(); if (h_preds ) { if (info->info["output_filename"].get_str() != "") { cpu_timer.Start(); std::ofstream fout; size_t buf_size = 1024 * 1024 * 16; char *fout_buf = new char[buf_size]; fout.rdbuf() -> pubsetbuf(fout_buf, buf_size); fout.open(info->info["output_filename"].get_str().c_str()); for (VertexId v=0; v<graph->nodes; v++) { if (v == src) fout<< v+1 << "," << v+1 << std::endl; // root node else if (h_preds[v] != -2) // valid pred fout<< v+1 << "," << h_preds[v]+1 << std::endl; } fout.close(); delete[] fout_buf; fout_buf = NULL; cpu_timer.Stop(); info->info["write_time"] = cpu_timer.ElapsedMillis(); } delete[] h_preds ; h_preds = NULL; } return retval; } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * @tparam MARK_PREDECESSORS * * @param[in] info Pointer to info contains parameters and statistics. * * \return cudaError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value, bool MARK_PREDECESSORS > cudaError_t RunTests_enable_idempotence(Info<VertexId, SizeT, Value> *info) { if (info->info["idempotent"].get_bool()) return RunTests <VertexId, SizeT, Value,/* INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, true > (info); else return RunTests <VertexId, SizeT, Value,/* INSTRUMENT, DEBUG, SIZE_CHECK,*/ MARK_PREDECESSORS, false> (info); } /** * @brief RunTests entry * * @tparam VertexId * @tparam Value * @tparam SizeT * * @param[in] info Pointer to info contains parameters and statistics. * * \return cudaError_t object which indicates the success of * all CUDA function calls. */ template < typename VertexId, typename SizeT, typename Value> cudaError_t RunTests_mark_predecessors(Info<VertexId, SizeT, Value> *info) { if (info->info["mark_predecessors"].get_bool()) return RunTests_enable_idempotence<VertexId, SizeT, Value, /*INSTRUMENT, DEBUG, SIZE_CHECK,*/ true> (info); else return RunTests_enable_idempotence<VertexId, SizeT, Value,/* INSTRUMENT, DEBUG, SIZE_CHECK,*/ false> (info); } /****************************************************************************** * Main ******************************************************************************/ template < typename VertexId, // use int as the vertex identifier typename SizeT , // use int as the graph size type typename Value > // use int as the value type int main_(CommandLineArgs *args) { CpuTimer cpu_timer, cpu_timer2; cpu_timer.Start(); //typedef int VertexId; // Use int as the vertex identifier //typedef int Value; // Use int as the value type //typedef long long SizeT; // Use int as the graph size type Csr<VertexId, SizeT, Value> csr(false); // CSR graph we process on Csr<VertexId, SizeT, Value> csc(false); // CSC graph we process on Info<VertexId, SizeT, Value> *info = new Info<VertexId, SizeT, Value>; // graph construction or generation related parameters info->info["undirected"] = args -> CheckCmdLineFlag("undirected"); cpu_timer2.Start(); info->Init("BFS", *args, csr, csc); // initialize Info structure cpu_timer2.Stop(); info->info["load_time"] = cpu_timer2.ElapsedMillis(); cudaError_t retval = RunTests_mark_predecessors<VertexId, SizeT, Value>(info); // run test cpu_timer.Stop(); info->info["total_time"] = cpu_timer.ElapsedMillis(); if (!(info->info["quiet_mode"].get_bool())) { info->DisplayStats(); // display collected statistics } info->CollectInfo(); // collected all the info and put into JSON mObject return retval; } template < typename VertexId, // the vertex identifier type, usually int or long long typename SizeT > // the size tyep, usually int or long long int main_Value(CommandLineArgs *args) { // Value = VertexId for bfs return main_<VertexId, SizeT, VertexId>(args); // if (args -> CheckCmdLineFlag("64bit-Value")) // return main_<VertexId, SizeT, long long>(args); // else // return main_<VertexId, SizeT, int >(args); } template < typename VertexId> int main_SizeT(CommandLineArgs *args) { // can be disabled to reduce compile time if (args -> CheckCmdLineFlag("64bit-SizeT") || sizeof(VertexId) > 4) return main_Value<VertexId, long long>(args); else return main_Value<VertexId, int >(args); } int main_VertexId(CommandLineArgs *args) { // can be disabled to reduce compile time // atomicMin(long long) is only available for compute capability 3.5 or higher if (args -> CheckCmdLineFlag("64bit-VertexId")) //#if __GR_CUDA_ARCH__ <= 300 // { // printf("64bit-VertexId disabled, because atomicMin(long long) is only supported by compute capability 3.5 or higher\n"); // return 1; // } //#else return main_SizeT<long long>(args); //#endif else return main_SizeT<int >(args); } int main(int argc, char** argv) { CommandLineArgs args(argc, argv); int graph_args = argc - args.ParsedArgc() - 1; if (argc < 2 || graph_args < 1 || args.CheckCmdLineFlag("help")) { Usage(); return 1; } return main_VertexId(&args); } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
4ae7adebe60e599859db4d1e412232ee86fe11d3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "rotateCuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const uint8_t *in = NULL; hipMalloc(&in, XSIZE*YSIZE); uint32_t rowSizeIn = XSIZE*YSIZE; uint8_t *out = NULL; hipMalloc(&out, XSIZE*YSIZE); uint32_t rowSizeOut = XSIZE*YSIZE; float inXStart = 1; float inYStart = 1; uint32_t width = XSIZE; uint32_t height = YSIZE; float cosAngle = 1; float sinAngle = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( rotateCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, in,rowSizeIn,out,rowSizeOut,inXStart,inYStart,width,height,cosAngle,sinAngle); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( rotateCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, in,rowSizeIn,out,rowSizeOut,inXStart,inYStart,width,height,cosAngle,sinAngle); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( rotateCuda), dim3(gridBlock),dim3(threadBlock), 0, 0, in,rowSizeIn,out,rowSizeOut,inXStart,inYStart,width,height,cosAngle,sinAngle); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4ae7adebe60e599859db4d1e412232ee86fe11d3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "rotateCuda.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const uint8_t *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); uint32_t rowSizeIn = XSIZE*YSIZE; uint8_t *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); uint32_t rowSizeOut = XSIZE*YSIZE; float inXStart = 1; float inYStart = 1; uint32_t width = XSIZE; uint32_t height = YSIZE; float cosAngle = 1; float sinAngle = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); rotateCuda<<<gridBlock,threadBlock>>>(in,rowSizeIn,out,rowSizeOut,inXStart,inYStart,width,height,cosAngle,sinAngle); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { rotateCuda<<<gridBlock,threadBlock>>>(in,rowSizeIn,out,rowSizeOut,inXStart,inYStart,width,height,cosAngle,sinAngle); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { rotateCuda<<<gridBlock,threadBlock>>>(in,rowSizeIn,out,rowSizeOut,inXStart,inYStart,width,height,cosAngle,sinAngle); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6911df939ef10ed2d05bcc31311201bca8ba806b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision$ // $Date$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * radixsort_app.cu * * @brief CUDPP application-level radix sorting routines */ /** @addtogroup cudpp_app * @{ */ /** @name RadixSort Functions * @{ */ #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_radixsort.h" #include "cudpp_scan.h" #include "kernel/radixsort_kernel.cu" #include <cutil.h> #include <cstdlib> #include <cstdio> #include <assert.h> typedef unsigned int uint; /** @brief Perform one step of the radix sort. Sorts by nbits key bits per step, * starting at startbit. * * Uses cudppScanDispatch() for the prefix sum of radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. **/ template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStep(uint *keys, uint *values, const CUDPPRadixSortPlan *plan, uint numElements) { const uint eltsPerBlock = SORT_CTA_SIZE * 4; const uint eltsPerBlock2 = SORT_CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop ? 65535 : numBlocks2; uint blocksReorder = loop ? 65535 : numBlocks2; uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[0] : plan->m_persistentCTAThreshold[0]; bool persist = plan->m_bUsePersistentCTAs && (numElements >= threshold); if (persist) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = numBlocks; blocksFind = numBlocks2; blocksReorder = numBlocks2; // Run an empty kernel -- this seems to reset some of the CTA scheduling hardware // on GT200, resulting in better scheduling and lower run times if (startbit > 0) { hipLaunchKernelGGL(( emptyKernel), dim3(numCTAs(emptyKernel)), dim3(SORT_CTA_SIZE), 0, 0, ); } } if (fullBlocks) { if (loop) { if (persist) { blocks = flip? numCTAs(radixSortBlocks<4, 0, true, true, true>) : numCTAs(radixSortBlocks<4, 0, true, false, true>); } hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, true>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, true, flip, false>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } else { if (loop) { if (persist) { blocks = flip ? numCTAs(radixSortBlocks<4, 0, false, true, true>) : numCTAs(radixSortBlocks<4, 0, false, false, true>); } hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, true>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { hipLaunchKernelGGL(( radixSortBlocks<nbits, startbit, false, flip, false>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } CUT_CHECK_ERROR("radixSortBlocks"); if (fullBlocks) { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, true, true>); } hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, false, true>); } hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } } CUT_CHECK_ERROR("findRadixOffsets"); cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, 0, plan->m_scanPlan); if (fullBlocks) { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, true, true, true, true>) : numCTAs(reorderData<0, true, true, false, true>); } hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { hipLaunchKernelGGL(( reorderData<startbit, true, true, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, true, false, true, true>) : numCTAs(reorderData<0, true, false, false, true>); } hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { hipLaunchKernelGGL(( reorderData<startbit, true, false, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } } else { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, false, true, true, true>) : numCTAs(reorderData<0, false, true, false, true>); } hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { hipLaunchKernelGGL(( reorderData<startbit, false, true, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, false, false, true, true>) : numCTAs(reorderData<0, false, false, false, true>); } hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { hipLaunchKernelGGL(( reorderData<startbit, false, false, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } } CUT_CHECK_ERROR("radixSortStep"); } /** * @brief Single-block optimization for sorts of fewer than 4 * CTA_SIZE elements * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param numElements Number of elements in the sort. **/ template <bool flip> void radixSortSingleBlock(uint *keys, uint *values, uint numElements) { bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0); if (fullBlocks) { hipLaunchKernelGGL(( radixSortBlocks<32, 0, true, flip, false>) , dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 0); } else { hipLaunchKernelGGL(( radixSortBlocks<32, 0, false, flip, false>) , dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 0); } if (flip)hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(SORT_CTA_SIZE), 0, 0, keys, numElements); CUT_CHECK_ERROR("radixSortSingleBlock"); } /** * @brief Main radix sort function * * Main radix sort function. Sorts in place in the keys and values arrays, * but uses the other device arrays as temporary storage. All pointer * parameters are device pointers. Uses cudppScan() for the prefix sum of * radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. * @param[in] flipBits Is set true if key datatype is a float * (neg. numbers) for special float sorting operations. * @param[in] keyBits Number of interesting bits in the key **/ void radixSort(uint *keys, uint* values, const CUDPPRadixSortPlan *plan, size_t numElements, bool flipBits, int keyBits) { if(numElements <= WARP_SIZE) { if (flipBits) hipLaunchKernelGGL(( radixSortSingleWarp<true>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements); else hipLaunchKernelGGL(( radixSortSingleWarp<false>), dim3(1), dim3(numElements), 0, 0, keys, values, numElements); CUT_CHECK_ERROR("radixSortSingleWarp"); return; } #ifdef __DEVICE_EMULATION__ printf("bits: %d\n", keyBits); #endif if(numElements <= SORT_CTA_SIZE * 4) { if (flipBits) radixSortSingleBlock<true>(keys, values, numElements); else radixSortSingleBlock<false>(keys, values, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStep<4, 0, true, false> (keys, values, plan, numElements); } else { radixSortStep<4, 0, false, false> (keys, values, plan, numElements); } if (keyBits > 4) { radixSortStep<4, 4, false, false> (keys, values, plan, numElements); } if (keyBits > 8) { radixSortStep<4, 8, false, false> (keys, values, plan, numElements); } if (keyBits > 12) { radixSortStep<4, 12, false, false> (keys, values, plan, numElements); } if (keyBits > 16) { radixSortStep<4, 16, false, false> (keys, values, plan, numElements); } if (keyBits > 20) { radixSortStep<4, 20, false, false> (keys, values, plan, numElements); } if (keyBits > 24) { radixSortStep<4, 24, false, false> (keys, values, plan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStep<4, 28, false, true> (keys, values, plan, numElements); } else { radixSortStep<4, 28, false, false> (keys, values, plan, numElements); } } } /** * @brief Wrapper to call main radix sort function. For float configuration. * * Calls the main radix sort function. For float configuration. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. * @param[in] negativeKeys Is set true if key datatype has neg. numbers. * @param[in] keyBits Number of interesting bits in the key **/ extern "C" void radixSortFloatKeys(float* keys, uint* values, const CUDPPRadixSortPlan *plan, size_t numElements, bool negativeKeys, int keyBits) { radixSort((uint*)keys, (uint*)values, plan, numElements, negativeKeys, keyBits); } /** @brief Perform one step of the radix sort. Sorts by nbits key bits per step, * starting at startbit. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. **/ template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStepKeysOnly(uint *keys, const CUDPPRadixSortPlan *plan, uint numElements) { const uint eltsPerBlock = SORT_CTA_SIZE * 4; const uint eltsPerBlock2 = SORT_CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop ? 65535 : numBlocks2; uint blocksReorder = loop ? 65535 : numBlocks2; uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[1] : plan->m_persistentCTAThreshold[1]; bool persist = plan->m_bUsePersistentCTAs && (numElements >= threshold); if (persist) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = numBlocks; blocksFind = numBlocks2; blocksReorder = numBlocks2; } if (fullBlocks) { if (loop) { if (persist) { blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>) : numCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>); } hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, true>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, true, flip, false>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else { if (loop) { if (persist) { blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>) : numCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>); } hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, true>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else hipLaunchKernelGGL(( radixSortBlocksKeysOnly<nbits, startbit, false, flip, false>) , dim3(blocks), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } if (fullBlocks) { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, true, true>); } hipLaunchKernelGGL(( findRadixOffsets<startbit, true, true>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else hipLaunchKernelGGL(( findRadixOffsets<startbit, true, false>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, false, true>); } hipLaunchKernelGGL(( findRadixOffsets<startbit, false, true>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else hipLaunchKernelGGL(( findRadixOffsets<startbit, false, false>) , dim3(blocksFind), dim3(SORT_CTA_SIZE), 3 * SORT_CTA_SIZE * sizeof(uint), 0, (uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, 0, plan->m_scanPlan); if (fullBlocks) { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, true, true, true, true>) : numCTAs(reorderDataKeysOnly<0, true, true, false, true>); } hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, true, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, true, false, true, true>) : numCTAs(reorderDataKeysOnly<0, true, false, false, true>); } hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, true, false, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, false, true, true, true>) : numCTAs(reorderDataKeysOnly<0, false, true, false, true>); } hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, true, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, false, false, true, true>) : numCTAs(reorderDataKeysOnly<0, false, false, false, true>); } hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, true>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else hipLaunchKernelGGL(( reorderDataKeysOnly<startbit, false, false, unflip, false>) , dim3(blocksReorder), dim3(SORT_CTA_SIZE), 0, 0, keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } CUT_CHECK_ERROR("radixSortStepKeysOnly"); } /** * @brief Optimization for sorts of fewer than 4 * CTA_SIZE elements (keys only). * * @param[in,out] keys Keys to be sorted. * @param numElements Number of elements in the sort. **/ template <bool flip> void radixSortSingleBlockKeysOnly(uint *keys, uint numElements) { bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0); if (fullBlocks) { hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, true, flip, false>) , dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)keys, numElements, 1 ); } else { hipLaunchKernelGGL(( radixSortBlocksKeysOnly<32, 0, false, flip, false>) , dim3(1), dim3(SORT_CTA_SIZE), 4 * SORT_CTA_SIZE * sizeof(uint), 0, (uint4*)keys, (uint4*)keys, numElements, 1 ); } if (flip) hipLaunchKernelGGL(( unflipFloats), dim3(1), dim3(SORT_CTA_SIZE), 0, 0, keys, numElements); CUT_CHECK_ERROR("radixSortSingleBlock"); } /** * @brief Main radix sort function. For keys only configuration. * * Main radix sort function. Sorts in place in the keys array, * but uses the other device arrays as temporary storage. All pointer * parameters are device pointers. Uses scan for the prefix sum of * radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] flipBits Is set true if key datatype is a float (neg. numbers) * for special float sorting operations. * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key **/ extern "C" void radixSortKeysOnly(uint *keys, const CUDPPRadixSortPlan *plan, bool flipBits, size_t numElements, int keyBits) { if(numElements <= WARP_SIZE) { if (flipBits) hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<true>), dim3(1), dim3(numElements), 0, 0, keys, numElements); else hipLaunchKernelGGL(( radixSortSingleWarpKeysOnly<false>), dim3(1), dim3(numElements), 0, 0, keys, numElements); return; } if(numElements <= SORT_CTA_SIZE * 4) { if (flipBits) radixSortSingleBlockKeysOnly<true>(keys, numElements); else radixSortSingleBlockKeysOnly<false>(keys, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStepKeysOnly<4, 0, true, false>(keys, plan, numElements); } else { radixSortStepKeysOnly<4, 0, false, false>(keys, plan, numElements); } if (keyBits > 4) { radixSortStepKeysOnly<4, 4, false, false>(keys, plan, numElements); } if (keyBits > 8) { radixSortStepKeysOnly<4, 8, false, false>(keys, plan, numElements); } if (keyBits > 12) { radixSortStepKeysOnly<4, 12, false, false>(keys, plan, numElements); } if (keyBits > 16) { radixSortStepKeysOnly<4, 16, false, false>(keys, plan, numElements); } if (keyBits > 20) { radixSortStepKeysOnly<4, 20, false, false>(keys, plan, numElements); } if (keyBits > 24) { radixSortStepKeysOnly<4, 24, false, false>(keys, plan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStepKeysOnly<4, 28, false, true>(keys, plan, numElements); } else { radixSortStepKeysOnly<4, 28, false, false>(keys, plan, numElements); } } } /** * @brief Wrapper to call main radix sort function. For floats and keys only. * * Calls the radixSortKeysOnly function setting parameters for floats. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] negativeKeys Is set true if key flipBits is to be true in * radixSortKeysOnly(). * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key **/ extern "C" void radixSortFloatKeysOnly(float *keys, const CUDPPRadixSortPlan *plan, bool negativeKeys, size_t numElements, int keyBits) { radixSortKeysOnly((uint*)keys, plan, negativeKeys, numElements, keyBits); } extern "C" void initDeviceParameters(CUDPPRadixSortPlan *plan) { int deviceID = -1; if (hipSuccess == hipGetDevice(&deviceID)) { hipDeviceProp_t devprop; hipGetDeviceProperties(&devprop, deviceID); int smVersion = devprop.major * 10 + devprop.minor; // sm_12 and later devices don't need help with coalesce in reorderData kernel plan->m_bManualCoalesce = (smVersion < 12); // sm_20 and later devices are better off not using persistent CTAs plan->m_bUsePersistentCTAs = (smVersion < 20); if (plan->m_bUsePersistentCTAs) { // The following is only true on pre-sm_20 devices (pre-Fermi): // Empirically we have found that for some (usually larger) sort // sizes it is better to use exactly as many "persistent" CTAs // as can fill the GPU, which loop over the "blocks" of work. For smaller // arrays it is better to use the typical CUDA approach of launching one CTA // per block of work. // 0-element of these two-element arrays is for key-value sorts // 1-element is for key-only sorts plan->m_persistentCTAThreshold[0] = plan->m_bManualCoalesce ? 16777216 : 524288; plan->m_persistentCTAThresholdFullBlocks[0] = plan->m_bManualCoalesce ? 2097152: 524288; plan->m_persistentCTAThreshold[1] = plan->m_bManualCoalesce ? 16777216 : 8388608; plan->m_persistentCTAThresholdFullBlocks[1] = plan->m_bManualCoalesce ? 2097152: 0; // create a map of function pointers to register counts for more accurate occupancy calculation // Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it // Note we only insert the "loop" version of the kernels (the one with the last template param = true) // Because those are the only ones that require persistent CTAs that maximally fill the device. computeNumCTAs(radixSortBlocks<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocks<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocks<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocks<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(findRadixOffsets<0, false, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(findRadixOffsets<0, true, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(emptyKernel, 0, SORT_CTA_SIZE); } } } /** * @brief From the programmer-specified sort configuration, * creates internal memory for performing the sort. * * @param[in] plan Pointer to CUDPPRadixSortPlan object **/ extern "C" void allocRadixSortStorage(CUDPPRadixSortPlan *plan) { unsigned int numElements = plan->m_numElements; unsigned int numBlocks = ((numElements % (SORT_CTA_SIZE * 4)) == 0) ? (numElements / (SORT_CTA_SIZE * 4)) : (numElements / (SORT_CTA_SIZE * 4) + 1); switch(plan->m_config.datatype) { case CUDPP_UINT: CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempKeys, numElements * sizeof(unsigned int))); if (!plan->m_bKeysOnly) CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempValues, numElements * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_counters, WARP_SIZE * numBlocks * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_countersSum, WARP_SIZE * numBlocks * sizeof(unsigned int))); CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_blockOffsets, WARP_SIZE * numBlocks * sizeof(unsigned int))); break; case CUDPP_FLOAT: CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempKeys, numElements * sizeof(float))); if (!plan->m_bKeysOnly) CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_tempValues, numElements * sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_counters, WARP_SIZE * numBlocks * sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_countersSum, WARP_SIZE * numBlocks * sizeof(float))); CUDA_SAFE_CALL(hipMalloc((void **)&plan->m_blockOffsets, WARP_SIZE * numBlocks * sizeof(float))); break; } initDeviceParameters(plan); } /** @brief Deallocates intermediate memory from allocRadixSortStorage. * * * @param[in] plan Pointer to CUDPPRadixSortPlan object **/ extern "C" void freeRadixSortStorage(CUDPPRadixSortPlan* plan) { CUDA_SAFE_CALL( hipFree(plan->m_tempKeys)); CUDA_SAFE_CALL( hipFree(plan->m_tempValues)); CUDA_SAFE_CALL( hipFree(plan->m_counters)); CUDA_SAFE_CALL( hipFree(plan->m_countersSum)); CUDA_SAFE_CALL( hipFree(plan->m_blockOffsets)); } /** @brief Dispatch function to perform a sort on an array with * a specified configuration. * * This is the dispatch routine which calls radixSort...() with * appropriate template parameters and arguments as specified by * the plan. * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key* * @param[in] plan Configuration information for RadixSort. **/ extern "C" void cudppRadixSortDispatch(void *keys, void *values, size_t numElements, int keyBits, const CUDPPRadixSortPlan *plan) {/* if(plan->m_bKeysOnly) { switch(plan->m_config.datatype) { case CUDPP_UINT: radixSortKeysOnly((uint*)keys, plan, false, numElements, keyBits); break; case CUDPP_FLOAT: radixSortFloatKeysOnly((float*)keys, plan, true, numElements, keyBits); } } else { switch(plan->m_config.datatype) { case CUDPP_UINT: radixSort((uint*)keys, (uint*) values, plan, numElements, false, keyBits); break; case CUDPP_FLOAT: radixSortFloatKeys((float*)keys, (uint*) values, plan, numElements, true, keyBits); } }*/ } /** @} */ // end radixsort functions /** @} */ // end cudpp_app
6911df939ef10ed2d05bcc31311201bca8ba806b.cu
// ------------------------------------------------------------- // CUDPP -- CUDA Data Parallel Primitives library // ------------------------------------------------------------- // $Revision$ // $Date$ // ------------------------------------------------------------- // This source code is distributed under the terms of license.txt // in the root directory of this source distribution. // ------------------------------------------------------------- /** * @file * radixsort_app.cu * * @brief CUDPP application-level radix sorting routines */ /** @addtogroup cudpp_app * @{ */ /** @name RadixSort Functions * @{ */ #include "cudpp.h" #include "cudpp_util.h" #include "cudpp_radixsort.h" #include "cudpp_scan.h" #include "kernel/radixsort_kernel.cu" #include <cutil.h> #include <cstdlib> #include <cstdio> #include <assert.h> typedef unsigned int uint; /** @brief Perform one step of the radix sort. Sorts by nbits key bits per step, * starting at startbit. * * Uses cudppScanDispatch() for the prefix sum of radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. **/ template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStep(uint *keys, uint *values, const CUDPPRadixSortPlan *plan, uint numElements) { const uint eltsPerBlock = SORT_CTA_SIZE * 4; const uint eltsPerBlock2 = SORT_CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop ? 65535 : numBlocks2; uint blocksReorder = loop ? 65535 : numBlocks2; uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[0] : plan->m_persistentCTAThreshold[0]; bool persist = plan->m_bUsePersistentCTAs && (numElements >= threshold); if (persist) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = numBlocks; blocksFind = numBlocks2; blocksReorder = numBlocks2; // Run an empty kernel -- this seems to reset some of the CTA scheduling hardware // on GT200, resulting in better scheduling and lower run times if (startbit > 0) { emptyKernel<<<numCTAs(emptyKernel), SORT_CTA_SIZE>>>(); } } if (fullBlocks) { if (loop) { if (persist) { blocks = flip? numCTAs(radixSortBlocks<4, 0, true, true, true>) : numCTAs(radixSortBlocks<4, 0, true, false, true>); } radixSortBlocks<nbits, startbit, true, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { radixSortBlocks<nbits, startbit, true, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } else { if (loop) { if (persist) { blocks = flip ? numCTAs(radixSortBlocks<4, 0, false, true, true>) : numCTAs(radixSortBlocks<4, 0, false, false, true>); } radixSortBlocks<nbits, startbit, false, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } else { radixSortBlocks<nbits, startbit, false, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)plan->m_tempValues, (uint4*)keys, (uint4*)values, numElements, numBlocks); } } CUT_CHECK_ERROR("radixSortBlocks"); if (fullBlocks) { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, true, true>); } findRadixOffsets<startbit, true, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { findRadixOffsets<startbit, true, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, false, true>); } findRadixOffsets<startbit, false, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { findRadixOffsets<startbit, false, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } } CUT_CHECK_ERROR("findRadixOffsets"); cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, 0, plan->m_scanPlan); if (fullBlocks) { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, true, true, true, true>) : numCTAs(reorderData<0, true, true, false, true>); } reorderData<startbit, true, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, true, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, true, false, true, true>) : numCTAs(reorderData<0, true, false, false, true>); } reorderData<startbit, true, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, true, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } } else { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, false, true, true, true>) : numCTAs(reorderData<0, false, true, false, true>); } reorderData<startbit, false, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, false, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderData<0, false, false, true, true>) : numCTAs(reorderData<0, false, false, false, true>); } reorderData<startbit, false, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { reorderData<startbit, false, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, values, (uint2*)plan->m_tempKeys, (uint2*)plan->m_tempValues, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } } CUT_CHECK_ERROR("radixSortStep"); } /** * @brief Single-block optimization for sorts of fewer than 4 * CTA_SIZE elements * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param numElements Number of elements in the sort. **/ template <bool flip> void radixSortSingleBlock(uint *keys, uint *values, uint numElements) { bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0); if (fullBlocks) { radixSortBlocks<32, 0, true, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 0); } else { radixSortBlocks<32, 0, false, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)values, (uint4*)keys, (uint4*)values, numElements, 0); } if (flip) unflipFloats<<<1, SORT_CTA_SIZE>>>(keys, numElements); CUT_CHECK_ERROR("radixSortSingleBlock"); } /** * @brief Main radix sort function * * Main radix sort function. Sorts in place in the keys and values arrays, * but uses the other device arrays as temporary storage. All pointer * parameters are device pointers. Uses cudppScan() for the prefix sum of * radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. * @param[in] flipBits Is set true if key datatype is a float * (neg. numbers) for special float sorting operations. * @param[in] keyBits Number of interesting bits in the key **/ void radixSort(uint *keys, uint* values, const CUDPPRadixSortPlan *plan, size_t numElements, bool flipBits, int keyBits) { if(numElements <= WARP_SIZE) { if (flipBits) radixSortSingleWarp<true><<<1, numElements>>> (keys, values, numElements); else radixSortSingleWarp<false><<<1, numElements>>> (keys, values, numElements); CUT_CHECK_ERROR("radixSortSingleWarp"); return; } #ifdef __DEVICE_EMULATION__ printf("bits: %d\n", keyBits); #endif if(numElements <= SORT_CTA_SIZE * 4) { if (flipBits) radixSortSingleBlock<true>(keys, values, numElements); else radixSortSingleBlock<false>(keys, values, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStep<4, 0, true, false> (keys, values, plan, numElements); } else { radixSortStep<4, 0, false, false> (keys, values, plan, numElements); } if (keyBits > 4) { radixSortStep<4, 4, false, false> (keys, values, plan, numElements); } if (keyBits > 8) { radixSortStep<4, 8, false, false> (keys, values, plan, numElements); } if (keyBits > 12) { radixSortStep<4, 12, false, false> (keys, values, plan, numElements); } if (keyBits > 16) { radixSortStep<4, 16, false, false> (keys, values, plan, numElements); } if (keyBits > 20) { radixSortStep<4, 20, false, false> (keys, values, plan, numElements); } if (keyBits > 24) { radixSortStep<4, 24, false, false> (keys, values, plan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStep<4, 28, false, true> (keys, values, plan, numElements); } else { radixSortStep<4, 28, false, false> (keys, values, plan, numElements); } } } /** * @brief Wrapper to call main radix sort function. For float configuration. * * Calls the main radix sort function. For float configuration. * * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. * @param[in] negativeKeys Is set true if key datatype has neg. numbers. * @param[in] keyBits Number of interesting bits in the key **/ extern "C" void radixSortFloatKeys(float* keys, uint* values, const CUDPPRadixSortPlan *plan, size_t numElements, bool negativeKeys, int keyBits) { radixSort((uint*)keys, (uint*)values, plan, numElements, negativeKeys, keyBits); } /** @brief Perform one step of the radix sort. Sorts by nbits key bits per step, * starting at startbit. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] numElements Number of elements in the sort. **/ template<uint nbits, uint startbit, bool flip, bool unflip> void radixSortStepKeysOnly(uint *keys, const CUDPPRadixSortPlan *plan, uint numElements) { const uint eltsPerBlock = SORT_CTA_SIZE * 4; const uint eltsPerBlock2 = SORT_CTA_SIZE * 2; bool fullBlocks = ((numElements % eltsPerBlock) == 0); uint numBlocks = (fullBlocks) ? (numElements / eltsPerBlock) : (numElements / eltsPerBlock + 1); uint numBlocks2 = ((numElements % eltsPerBlock2) == 0) ? (numElements / eltsPerBlock2) : (numElements / eltsPerBlock2 + 1); bool loop = numBlocks > 65535; uint blocks = loop ? 65535 : numBlocks; uint blocksFind = loop ? 65535 : numBlocks2; uint blocksReorder = loop ? 65535 : numBlocks2; uint threshold = fullBlocks ? plan->m_persistentCTAThresholdFullBlocks[1] : plan->m_persistentCTAThreshold[1]; bool persist = plan->m_bUsePersistentCTAs && (numElements >= threshold); if (persist) { loop = (numElements > 262144) || (numElements >= 32768 && numElements < 65536); blocks = numBlocks; blocksFind = numBlocks2; blocksReorder = numBlocks2; } if (fullBlocks) { if (loop) { if (persist) { blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>) : numCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>); } radixSortBlocksKeysOnly<nbits, startbit, true, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else radixSortBlocksKeysOnly<nbits, startbit, true, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else { if (loop) { if (persist) { blocks = flip ? numCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>) : numCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>); } radixSortBlocksKeysOnly<nbits, startbit, false, flip, true> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } else radixSortBlocksKeysOnly<nbits, startbit, false, flip, false> <<<blocks, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)plan->m_tempKeys, (uint4*)keys, numElements, numBlocks); } if (fullBlocks) { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, true, true>); } findRadixOffsets<startbit, true, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else findRadixOffsets<startbit, true, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksFind = numCTAs(findRadixOffsets<0, false, true>); } findRadixOffsets<startbit, false, true> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } else findRadixOffsets<startbit, false, false> <<<blocksFind, SORT_CTA_SIZE, 3 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint2*)plan->m_tempKeys, plan->m_counters, plan->m_blockOffsets, numElements, numBlocks2); } cudppScanDispatch(plan->m_countersSum, plan->m_counters, 16*numBlocks2, 1, 0, plan->m_scanPlan); if (fullBlocks) { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, true, true, true, true>) : numCTAs(reorderDataKeysOnly<0, true, true, false, true>); } reorderDataKeysOnly<startbit, true, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, true, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, true, false, true, true>) : numCTAs(reorderDataKeysOnly<0, true, false, false, true>); } reorderDataKeysOnly<startbit, true, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, true, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } else { if (plan->m_bManualCoalesce) { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, false, true, true, true>) : numCTAs(reorderDataKeysOnly<0, false, true, false, true>); } reorderDataKeysOnly<startbit, false, true, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, false, true, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else { if (loop) { if (persist) { blocksReorder = unflip ? numCTAs(reorderDataKeysOnly<0, false, false, true, true>) : numCTAs(reorderDataKeysOnly<0, false, false, false, true>); } reorderDataKeysOnly<startbit, false, false, unflip, true> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } else reorderDataKeysOnly<startbit, false, false, unflip, false> <<<blocksReorder, SORT_CTA_SIZE>>> (keys, (uint2*)plan->m_tempKeys, plan->m_blockOffsets, plan->m_countersSum, plan->m_counters, numElements, numBlocks2); } } CUT_CHECK_ERROR("radixSortStepKeysOnly"); } /** * @brief Optimization for sorts of fewer than 4 * CTA_SIZE elements (keys only). * * @param[in,out] keys Keys to be sorted. * @param numElements Number of elements in the sort. **/ template <bool flip> void radixSortSingleBlockKeysOnly(uint *keys, uint numElements) { bool fullBlocks = (numElements % (SORT_CTA_SIZE * 4) == 0); if (fullBlocks) { radixSortBlocksKeysOnly<32, 0, true, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)keys, numElements, 1 ); } else { radixSortBlocksKeysOnly<32, 0, false, flip, false> <<<1, SORT_CTA_SIZE, 4 * SORT_CTA_SIZE * sizeof(uint)>>> ((uint4*)keys, (uint4*)keys, numElements, 1 ); } if (flip) unflipFloats<<<1, SORT_CTA_SIZE>>>(keys, numElements); CUT_CHECK_ERROR("radixSortSingleBlock"); } /** * @brief Main radix sort function. For keys only configuration. * * Main radix sort function. Sorts in place in the keys array, * but uses the other device arrays as temporary storage. All pointer * parameters are device pointers. Uses scan for the prefix sum of * radix counters. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] flipBits Is set true if key datatype is a float (neg. numbers) * for special float sorting operations. * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key **/ extern "C" void radixSortKeysOnly(uint *keys, const CUDPPRadixSortPlan *plan, bool flipBits, size_t numElements, int keyBits) { if(numElements <= WARP_SIZE) { if (flipBits) radixSortSingleWarpKeysOnly<true><<<1, numElements>>>(keys, numElements); else radixSortSingleWarpKeysOnly<false><<<1, numElements>>>(keys, numElements); return; } if(numElements <= SORT_CTA_SIZE * 4) { if (flipBits) radixSortSingleBlockKeysOnly<true>(keys, numElements); else radixSortSingleBlockKeysOnly<false>(keys, numElements); return; } // flip float bits on the first pass, unflip on the last pass if (flipBits) { radixSortStepKeysOnly<4, 0, true, false>(keys, plan, numElements); } else { radixSortStepKeysOnly<4, 0, false, false>(keys, plan, numElements); } if (keyBits > 4) { radixSortStepKeysOnly<4, 4, false, false>(keys, plan, numElements); } if (keyBits > 8) { radixSortStepKeysOnly<4, 8, false, false>(keys, plan, numElements); } if (keyBits > 12) { radixSortStepKeysOnly<4, 12, false, false>(keys, plan, numElements); } if (keyBits > 16) { radixSortStepKeysOnly<4, 16, false, false>(keys, plan, numElements); } if (keyBits > 20) { radixSortStepKeysOnly<4, 20, false, false>(keys, plan, numElements); } if (keyBits > 24) { radixSortStepKeysOnly<4, 24, false, false>(keys, plan, numElements); } if (keyBits > 28) { if (flipBits) // last pass { radixSortStepKeysOnly<4, 28, false, true>(keys, plan, numElements); } else { radixSortStepKeysOnly<4, 28, false, false>(keys, plan, numElements); } } } /** * @brief Wrapper to call main radix sort function. For floats and keys only. * * Calls the radixSortKeysOnly function setting parameters for floats. * * @param[in,out] keys Keys to be sorted. * @param[in] plan Configuration information for RadixSort. * @param[in] negativeKeys Is set true if key flipBits is to be true in * radixSortKeysOnly(). * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key **/ extern "C" void radixSortFloatKeysOnly(float *keys, const CUDPPRadixSortPlan *plan, bool negativeKeys, size_t numElements, int keyBits) { radixSortKeysOnly((uint*)keys, plan, negativeKeys, numElements, keyBits); } extern "C" void initDeviceParameters(CUDPPRadixSortPlan *plan) { int deviceID = -1; if (cudaSuccess == cudaGetDevice(&deviceID)) { cudaDeviceProp devprop; cudaGetDeviceProperties(&devprop, deviceID); int smVersion = devprop.major * 10 + devprop.minor; // sm_12 and later devices don't need help with coalesce in reorderData kernel plan->m_bManualCoalesce = (smVersion < 12); // sm_20 and later devices are better off not using persistent CTAs plan->m_bUsePersistentCTAs = (smVersion < 20); if (plan->m_bUsePersistentCTAs) { // The following is only true on pre-sm_20 devices (pre-Fermi): // Empirically we have found that for some (usually larger) sort // sizes it is better to use exactly as many "persistent" CTAs // as can fill the GPU, which loop over the "blocks" of work. For smaller // arrays it is better to use the typical CUDA approach of launching one CTA // per block of work. // 0-element of these two-element arrays is for key-value sorts // 1-element is for key-only sorts plan->m_persistentCTAThreshold[0] = plan->m_bManualCoalesce ? 16777216 : 524288; plan->m_persistentCTAThresholdFullBlocks[0] = plan->m_bManualCoalesce ? 2097152: 524288; plan->m_persistentCTAThreshold[1] = plan->m_bManualCoalesce ? 16777216 : 8388608; plan->m_persistentCTAThresholdFullBlocks[1] = plan->m_bManualCoalesce ? 2097152: 0; // create a map of function pointers to register counts for more accurate occupancy calculation // Must pass in the dynamic shared memory used by each kernel, since the runtime doesn't know it // Note we only insert the "loop" version of the kernels (the one with the last template param = true) // Because those are the only ones that require persistent CTAs that maximally fill the device. computeNumCTAs(radixSortBlocks<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocks<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocks<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocks<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, false, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, false, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(radixSortBlocksKeysOnly<4, 0, true, true, true>, 4 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(findRadixOffsets<0, false, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(findRadixOffsets<0, true, true>, 3 * SORT_CTA_SIZE * sizeof(uint), SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, false, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderData<0, true, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, false, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, false, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, false, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, true, false, true>, 0, SORT_CTA_SIZE); computeNumCTAs(reorderDataKeysOnly<0, true, true, true, true>, 0, SORT_CTA_SIZE); computeNumCTAs(emptyKernel, 0, SORT_CTA_SIZE); } } } /** * @brief From the programmer-specified sort configuration, * creates internal memory for performing the sort. * * @param[in] plan Pointer to CUDPPRadixSortPlan object **/ extern "C" void allocRadixSortStorage(CUDPPRadixSortPlan *plan) { unsigned int numElements = plan->m_numElements; unsigned int numBlocks = ((numElements % (SORT_CTA_SIZE * 4)) == 0) ? (numElements / (SORT_CTA_SIZE * 4)) : (numElements / (SORT_CTA_SIZE * 4) + 1); switch(plan->m_config.datatype) { case CUDPP_UINT: CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempKeys, numElements * sizeof(unsigned int))); if (!plan->m_bKeysOnly) CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempValues, numElements * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_counters, WARP_SIZE * numBlocks * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_countersSum, WARP_SIZE * numBlocks * sizeof(unsigned int))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_blockOffsets, WARP_SIZE * numBlocks * sizeof(unsigned int))); break; case CUDPP_FLOAT: CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempKeys, numElements * sizeof(float))); if (!plan->m_bKeysOnly) CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_tempValues, numElements * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_counters, WARP_SIZE * numBlocks * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_countersSum, WARP_SIZE * numBlocks * sizeof(float))); CUDA_SAFE_CALL(cudaMalloc((void **)&plan->m_blockOffsets, WARP_SIZE * numBlocks * sizeof(float))); break; } initDeviceParameters(plan); } /** @brief Deallocates intermediate memory from allocRadixSortStorage. * * * @param[in] plan Pointer to CUDPPRadixSortPlan object **/ extern "C" void freeRadixSortStorage(CUDPPRadixSortPlan* plan) { CUDA_SAFE_CALL( cudaFree(plan->m_tempKeys)); CUDA_SAFE_CALL( cudaFree(plan->m_tempValues)); CUDA_SAFE_CALL( cudaFree(plan->m_counters)); CUDA_SAFE_CALL( cudaFree(plan->m_countersSum)); CUDA_SAFE_CALL( cudaFree(plan->m_blockOffsets)); } /** @brief Dispatch function to perform a sort on an array with * a specified configuration. * * This is the dispatch routine which calls radixSort...() with * appropriate template parameters and arguments as specified by * the plan. * @param[in,out] keys Keys to be sorted. * @param[in,out] values Associated values to be sorted (through keys). * @param[in] numElements Number of elements in the sort. * @param[in] keyBits Number of interesting bits in the key* * @param[in] plan Configuration information for RadixSort. **/ extern "C" void cudppRadixSortDispatch(void *keys, void *values, size_t numElements, int keyBits, const CUDPPRadixSortPlan *plan) {/* if(plan->m_bKeysOnly) { switch(plan->m_config.datatype) { case CUDPP_UINT: radixSortKeysOnly((uint*)keys, plan, false, numElements, keyBits); break; case CUDPP_FLOAT: radixSortFloatKeysOnly((float*)keys, plan, true, numElements, keyBits); } } else { switch(plan->m_config.datatype) { case CUDPP_UINT: radixSort((uint*)keys, (uint*) values, plan, numElements, false, keyBits); break; case CUDPP_FLOAT: radixSortFloatKeys((float*)keys, (uint*) values, plan, numElements, true, keyBits); } }*/ } /** @} */ // end radixsort functions /** @} */ // end cudpp_app
489f56a81ed11adfeb3838f111c4f0c3b728a4cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #undef NDEBUG #include <hipcub/hipcub.hpp> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cassert> // Has to go after all cub headers. Otherwise, this test won't catch unused // variables in cub kernels. #include "catch2_test_helper.h" template <class ScanTileStateT> __global__ void init_kernel(ScanTileStateT tile_state, int blocks_in_grid) { tile_state.InitializeStatus(blocks_in_grid); } template <class MessageT> __global__ void decoupled_look_back_kernel(cub::ScanTileState<MessageT> tile_state, MessageT *tile_data) { using scan_op_t = hipcub::Sum; using scan_tile_state_t = cub::ScanTileState<MessageT>; using tile_prefix_op = cub::TilePrefixCallbackOp<MessageT, scan_op_t, scan_tile_state_t>; using temp_storage_t = typename tile_prefix_op::TempStorage; // Allocate temp storage in shared memory __shared__ temp_storage_t temp_storage; scan_op_t scan_op{}; const unsigned int threads_in_warp = 32; const unsigned int tid = threadIdx.x; // Construct prefix op tile_prefix_op prefix(tile_state, temp_storage, scan_op); const unsigned int tile_idx = prefix.GetTileIdx(); // "Compute" tile aggregate MessageT tile_aggregate = tile_data[tile_idx]; if (tile_idx == 0) { // There are no blocks to look back to, immediately set the inclusive state if (tid == 0) { tile_state.SetInclusive(tile_idx, tile_aggregate); tile_data[tile_idx] = tile_aggregate; } } else { // Only the first warp in the block can perform the look back const unsigned int warp_id = tid / threads_in_warp; if (warp_id == 0) { // Perform the decoupled look-back // Invocation of the prefix will block until the look-back is complete. MessageT exclusive_prefix = prefix(tile_aggregate); if (tid == 0) { MessageT inclusive_prefix = scan_op(exclusive_prefix, tile_aggregate); tile_data[tile_idx] = inclusive_prefix; } } __syncthreads(); assert(tile_data[tile_idx] == prefix.GetInclusivePrefix()); assert(tile_aggregate == prefix.GetBlockAggregate()); } } using message_types = c2h::type_list<std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t>; template <class MessageT> thrust::host_vector<MessageT> compute_reference(const thrust::device_vector<MessageT> &tile_aggregates) { if (tile_aggregates.empty()) { return {}; } thrust::host_vector<MessageT> reference = tile_aggregates; MessageT *h_reference = thrust::raw_pointer_cast(reference.data()); MessageT aggregate = h_reference[0]; for (std::size_t i = 1; i < reference.size(); i++) { aggregate += h_reference[i]; h_reference[i] = aggregate; } return reference; } CUB_TEST("Decoupled look-back works with various message types", "[decoupled look-back][device]", message_types) { using message_t = typename c2h::get<0, TestType>; using scan_tile_state_t = cub::ScanTileState<message_t>; const int max_tiles = 1024 * 1024; const int num_tiles = GENERATE_COPY(take(10, random(1, max_tiles))); thrust::device_vector<message_t> tile_data(num_tiles); message_t *d_tile_data = thrust::raw_pointer_cast(tile_data.data()); c2h::gen(CUB_SEED(2), tile_data); thrust::host_vector<message_t> reference = compute_reference(tile_data); // Query temporary storage requirements std::size_t temp_storage_bytes{}; scan_tile_state_t::AllocationSize(num_tiles, temp_storage_bytes); // Allocate temporary storage thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); std::uint8_t *d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); // Initialize temporary storage scan_tile_state_t tile_status; hipError_t status = tile_status.Init(num_tiles, d_temp_storage, temp_storage_bytes); REQUIRE(status == hipSuccess); const unsigned int threads_in_init_block = 256; const unsigned int blocks_in_init_grid = cub::DivideAndRoundUp(num_tiles, threads_in_init_block); hipLaunchKernelGGL(( init_kernel), dim3(blocks_in_init_grid), dim3(threads_in_init_block), 0, 0, tile_status, num_tiles); REQUIRE(hipSuccess == hipPeekAtLastError()); REQUIRE(hipSuccess == hipDeviceSynchronize()); // Launch decoupled look-back const unsigned int threads_in_block = 256; hipLaunchKernelGGL(( decoupled_look_back_kernel), dim3(num_tiles), dim3(threads_in_block), 0, 0, tile_status, d_tile_data); REQUIRE(hipSuccess == hipPeekAtLastError()); REQUIRE(hipSuccess == hipDeviceSynchronize()); REQUIRE(reference == tile_data); }
489f56a81ed11adfeb3838f111c4f0c3b728a4cd.cu
/****************************************************************************** * Copyright (c) 2011-2023, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ #undef NDEBUG #include <cub/device/device_scan.cuh> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <cassert> // Has to go after all cub headers. Otherwise, this test won't catch unused // variables in cub kernels. #include "catch2_test_helper.h" template <class ScanTileStateT> __global__ void init_kernel(ScanTileStateT tile_state, int blocks_in_grid) { tile_state.InitializeStatus(blocks_in_grid); } template <class MessageT> __global__ void decoupled_look_back_kernel(cub::ScanTileState<MessageT> tile_state, MessageT *tile_data) { using scan_op_t = cub::Sum; using scan_tile_state_t = cub::ScanTileState<MessageT>; using tile_prefix_op = cub::TilePrefixCallbackOp<MessageT, scan_op_t, scan_tile_state_t>; using temp_storage_t = typename tile_prefix_op::TempStorage; // Allocate temp storage in shared memory __shared__ temp_storage_t temp_storage; scan_op_t scan_op{}; const unsigned int threads_in_warp = 32; const unsigned int tid = threadIdx.x; // Construct prefix op tile_prefix_op prefix(tile_state, temp_storage, scan_op); const unsigned int tile_idx = prefix.GetTileIdx(); // "Compute" tile aggregate MessageT tile_aggregate = tile_data[tile_idx]; if (tile_idx == 0) { // There are no blocks to look back to, immediately set the inclusive state if (tid == 0) { tile_state.SetInclusive(tile_idx, tile_aggregate); tile_data[tile_idx] = tile_aggregate; } } else { // Only the first warp in the block can perform the look back const unsigned int warp_id = tid / threads_in_warp; if (warp_id == 0) { // Perform the decoupled look-back // Invocation of the prefix will block until the look-back is complete. MessageT exclusive_prefix = prefix(tile_aggregate); if (tid == 0) { MessageT inclusive_prefix = scan_op(exclusive_prefix, tile_aggregate); tile_data[tile_idx] = inclusive_prefix; } } __syncthreads(); assert(tile_data[tile_idx] == prefix.GetInclusivePrefix()); assert(tile_aggregate == prefix.GetBlockAggregate()); } } using message_types = c2h::type_list<std::uint8_t, std::uint16_t, std::uint32_t, std::uint64_t>; template <class MessageT> thrust::host_vector<MessageT> compute_reference(const thrust::device_vector<MessageT> &tile_aggregates) { if (tile_aggregates.empty()) { return {}; } thrust::host_vector<MessageT> reference = tile_aggregates; MessageT *h_reference = thrust::raw_pointer_cast(reference.data()); MessageT aggregate = h_reference[0]; for (std::size_t i = 1; i < reference.size(); i++) { aggregate += h_reference[i]; h_reference[i] = aggregate; } return reference; } CUB_TEST("Decoupled look-back works with various message types", "[decoupled look-back][device]", message_types) { using message_t = typename c2h::get<0, TestType>; using scan_tile_state_t = cub::ScanTileState<message_t>; const int max_tiles = 1024 * 1024; const int num_tiles = GENERATE_COPY(take(10, random(1, max_tiles))); thrust::device_vector<message_t> tile_data(num_tiles); message_t *d_tile_data = thrust::raw_pointer_cast(tile_data.data()); c2h::gen(CUB_SEED(2), tile_data); thrust::host_vector<message_t> reference = compute_reference(tile_data); // Query temporary storage requirements std::size_t temp_storage_bytes{}; scan_tile_state_t::AllocationSize(num_tiles, temp_storage_bytes); // Allocate temporary storage thrust::device_vector<std::uint8_t> temp_storage(temp_storage_bytes); std::uint8_t *d_temp_storage = thrust::raw_pointer_cast(temp_storage.data()); // Initialize temporary storage scan_tile_state_t tile_status; cudaError_t status = tile_status.Init(num_tiles, d_temp_storage, temp_storage_bytes); REQUIRE(status == cudaSuccess); const unsigned int threads_in_init_block = 256; const unsigned int blocks_in_init_grid = cub::DivideAndRoundUp(num_tiles, threads_in_init_block); init_kernel<<<blocks_in_init_grid, threads_in_init_block>>>(tile_status, num_tiles); REQUIRE(cudaSuccess == cudaPeekAtLastError()); REQUIRE(cudaSuccess == cudaDeviceSynchronize()); // Launch decoupled look-back const unsigned int threads_in_block = 256; decoupled_look_back_kernel<<<num_tiles, threads_in_block>>>(tile_status, d_tile_data); REQUIRE(cudaSuccess == cudaPeekAtLastError()); REQUIRE(cudaSuccess == cudaDeviceSynchronize()); REQUIRE(reference == tile_data); }
31312104a61f2a564b4702d50ae7c73e054ebbad.hip
// !!! This is a file automatically generated by hipify!!! // Source: http://web.mit.edu/pocky/www/cudaworkshop/MonteCarlo/Pi.cu // Written by Barry Wilkinson, UNC-Charlotte. Pi.cu December 22, 2010. //Derived somewhat from code developed by Patrick Rogers, UNC-C #include <stdlib.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include <time.h> #include <hiprand/hiprand_kernel.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535 // known value of pi __global__ void gpu_monte_carlo(float *estimate, hiprandState_t *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; hiprand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < TRIALS_PER_THREAD; i++) { x = hiprand_uniform (&states[tid]); y = hiprand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; // return estimate of pi } float host_monte_carlo(long trials) { float x, y; long points_in_circle; for(long i = 0; i < trials; i++) { x = rand() / (float) RAND_MAX; y = rand() / (float) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } int main (int argc, char *argv[]) { clock_t start, stop; float host[BLOCKS * THREADS]; float *dev; hiprandState_t *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); hipMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts hipMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(hiprandState_t) ); hipLaunchKernelGGL(( gpu_monte_carlo), dim3(BLOCKS), dim3(THREADS), 0, 0, dev, devStates); hipMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), hipMemcpyDeviceToHost); // return results float pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); float pi_cpu = host_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); return 0; }
31312104a61f2a564b4702d50ae7c73e054ebbad.cu
// Source: http://web.mit.edu/pocky/www/cudaworkshop/MonteCarlo/Pi.cu // Written by Barry Wilkinson, UNC-Charlotte. Pi.cu December 22, 2010. //Derived somewhat from code developed by Patrick Rogers, UNC-C #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <math.h> #include <time.h> #include <curand_kernel.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535 // known value of pi __global__ void gpu_monte_carlo(float *estimate, curandState *states) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; curand_init(1234, tid, 0, &states[tid]); // Initialize CURAND for(int i = 0; i < TRIALS_PER_THREAD; i++) { x = curand_uniform (&states[tid]); y = curand_uniform (&states[tid]); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; // return estimate of pi } float host_monte_carlo(long trials) { float x, y; long points_in_circle; for(long i = 0; i < trials; i++) { x = rand() / (float) RAND_MAX; y = rand() / (float) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } int main (int argc, char *argv[]) { clock_t start, stop; float host[BLOCKS * THREADS]; float *dev; curandState *devStates; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts cudaMalloc( (void **)&devStates, THREADS * BLOCKS * sizeof(curandState) ); gpu_monte_carlo<<<BLOCKS, THREADS>>>(dev, devStates); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); // return results float pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); float pi_cpu = host_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); return 0; }
841ca9316f1bd937b07bb5d574e2ee67f54e350f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * * Copyright (C) 2009 Tomas Mazanec * * Author: Tomas Mazanec <mazanec at utia.cas.cz> * Created: Fri Aug 28 10:16:54 CEST 2009 * * Application of CUDA in DSP algorithms * - Cross Ambiguity Function (CAF) implementation * */ #ifndef _CAF_KERNEL_H_ #define _CAF_KERNEL_H_ static __global__ void cafMulKernel( hipfftComplex* g_sig1, hipfftComplex* g_sig2, hipfftComplex* g_sigt) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; hipfftComplex a,b,c; a.x = (float)g_sig1[idx].x; a.y = (float)g_sig1[idx].y; b.x = (float)g_sig2[idx].x; b.y = (float)g_sig2[idx].y; c.x = (float)( (float)a.x * (float)b.x - (float)a.y * (float)b.y ); c.y = (float)( (float)a.x * (float)b.y + (float)a.y * (float)b.x ); g_sigt[idx] = (hipfftComplex)c; } #endif //_CAF_KERNEL_H_
841ca9316f1bd937b07bb5d574e2ee67f54e350f.cu
/* * * Copyright (C) 2009 Tomas Mazanec * * Author: Tomas Mazanec <mazanec at utia.cas.cz> * Created: Fri Aug 28 10:16:54 CEST 2009 * * Application of CUDA in DSP algorithms * - Cross Ambiguity Function (CAF) implementation * */ #ifndef _CAF_KERNEL_H_ #define _CAF_KERNEL_H_ static __global__ void cafMulKernel( cufftComplex* g_sig1, cufftComplex* g_sig2, cufftComplex* g_sigt) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; cufftComplex a,b,c; a.x = (float)g_sig1[idx].x; a.y = (float)g_sig1[idx].y; b.x = (float)g_sig2[idx].x; b.y = (float)g_sig2[idx].y; c.x = (float)( (float)a.x * (float)b.x - (float)a.y * (float)b.y ); c.y = (float)( (float)a.x * (float)b.y + (float)a.y * (float)b.x ); g_sigt[idx] = (cufftComplex)c; } #endif //_CAF_KERNEL_H_
be6ff16135564b1b0632233ed0317c5c92b9f18c.hip
// !!! This is a file automatically generated by hipify!!! #include "Storm.hpp" #include "Main.hpp" #include "Map.hpp" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <random> #include <ctime> #include <math.h> #define rand() myrand() using namespace std; unsigned int storm_phase_current = 0; unsigned int storm_phase_time = 0; vector<unsigned int> storm_phase_start_ticks; vector<unsigned int> storm_phase_duration_ticks; vector<float> storm_phase_mapratio; vector<float> storm_phase_dps; struct storm storm_last; struct storm storm_current; struct storm storm_to; __global__ void draw_storm_kernel(unsigned int *device_output_data, const unsigned int output_position, const unsigned int width, const unsigned int height, const unsigned int channels, const unsigned int camera_crop_x1, const unsigned int camera_crop_y1, const float camera_z, const struct storm storm_current, const struct storm storm_to, const unsigned int storm_alpha, const struct vector3<unsigned char> storm_color) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < width * height) { int current_x = (i % width); int current_y = (i / width); unsigned char* frame = (unsigned char*)&device_output_data[output_position]; unsigned int storm_alpha = 50; if (sqrtf((camera_crop_x1 + current_x*camera_z - storm_current.x) * (camera_crop_x1+ current_x * camera_z - storm_current.x) + (camera_crop_y1 + current_y * camera_z - storm_current.y) * (camera_crop_y1 + current_y * camera_z - storm_current.y)) >= storm_current.radius) { frame[current_y * (width * channels) + current_x * channels] = (255 - storm_alpha)/255.0f * frame[current_y * (width * channels) + current_x * channels] + (storm_alpha/255.0f) * storm_color[0]; frame[current_y * (width * channels) + current_x * channels + 1] = (255 - storm_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 1] + (storm_alpha / 255.0f) * storm_color[1]; frame[current_y * (width * channels) + current_x * channels + 2] = (255 - storm_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 2] + (storm_alpha / 255.0f) * storm_color[2]; } unsigned int storm_circle_alpha = 150; if (sqrtf((camera_crop_x1 + current_x * camera_z - storm_to.x) * (camera_crop_x1 + current_x * camera_z - storm_to.x) + (camera_crop_y1 + current_y * camera_z - storm_to.y) * (camera_crop_y1 + current_y * camera_z - storm_to.y)) >= storm_to.radius-2.0f && sqrtf((camera_crop_x1 + current_x * camera_z - storm_to.x) * (camera_crop_x1 + current_x * camera_z - storm_to.x) + (camera_crop_y1 + current_y * camera_z - storm_to.y) * (camera_crop_y1 + current_y * camera_z - storm_to.y)) <= storm_to.radius+2.0f ) { frame[current_y * (width * channels) + current_x * channels] = (255 - storm_circle_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels] + (storm_circle_alpha / 255.0f) * 255; frame[current_y * (width * channels) + current_x * channels + 1] = (255 - storm_circle_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 1] + (storm_circle_alpha / 255.0f) * 255; frame[current_y * (width * channels) + current_x * channels + 2] = (255 - storm_circle_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 2] + (storm_circle_alpha / 255.0f) * 255; } } } void launch_draw_storm_kernel(unsigned int* device_output_data, const unsigned int output_position, const unsigned int width, const unsigned int height, const unsigned int channels, const unsigned int camera_crop_x1, const unsigned int camera_crop_y1, const float camera_z, const struct storm storm_current, const struct storm storm_to, const unsigned int storm_alpha, const struct vector3<unsigned char> storm_color) { hipError_t err = hipSuccess; int threadsPerBlock = 256; int blocksPerGrid = (width * height + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( draw_storm_kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, device_output_data, output_position, width, height, channels, camera_crop_x1, camera_crop_y1, camera_z, storm_current, storm_to, storm_alpha, storm_color); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed in draw_storm_kernel (error code %s)\n", hipGetErrorString(err)); } } void storm_init(struct bit_field* bf_map, struct bit_field* bf_rw) { printf("initialising storm\n"); storm_phase_start_ticks.emplace_back(5400); storm_phase_duration_ticks.emplace_back(3600); storm_phase_mapratio.emplace_back(0.75f); storm_phase_dps.emplace_back(1.0f); storm_phase_start_ticks.emplace_back(1800); storm_phase_duration_ticks.emplace_back(1800); storm_phase_mapratio.emplace_back(0.4f); storm_phase_dps.emplace_back(2.0f); storm_phase_start_ticks.emplace_back(1800); storm_phase_duration_ticks.emplace_back(1800); storm_phase_mapratio.emplace_back(0.2f); storm_phase_dps.emplace_back(4.0f); storm_phase_start_ticks.emplace_back(1800); storm_phase_duration_ticks.emplace_back(1800); storm_phase_mapratio.emplace_back(0.0f); storm_phase_dps.emplace_back(8.0f); float storm_radius = ::min(gm.map_dimensions[0], gm.map_dimensions[1]) * storm_phase_mapratio[0]/2.0f; int storm_center_max_x = (int)floorf(std::max<float>(gm.map_dimensions[0] - storm_radius, 0.0f)); int storm_center_max_y = (int)floorf(std::max<float>(gm.map_dimensions[1] - storm_radius, 0.0f)); storm_current.x = (unsigned int)gm.map_dimensions[0] / 2.0f; storm_current.y = (unsigned int)gm.map_dimensions[1] / 2.0f; storm_current.radius = floorf(std::max<float>(gm.map_dimensions[0], gm.map_dimensions[1]) * std::sqrtf(2)/2.0f); storm_last.x = storm_current.x; storm_last.y = storm_current.y; storm_last.radius = storm_current.radius; bool pathable_target = false; while (!pathable_target) { storm_to.x = (unsigned int)storm_radius + (rand() % (int)(storm_center_max_x - storm_radius)); storm_to.y = (unsigned int)storm_radius + (rand() % (int)(storm_center_max_y - storm_radius)); if (bf_map->data[gm.map_pathable_position + (storm_to.y) * gm.map_dimensions[0] + (storm_to.x)] > 0) { pathable_target = true; } } storm_to.radius = storm_radius; printf("storm initialized\n"); } void storm_next(struct bit_field* bf_map, struct bit_field* bf_rw) { storm_phase_time++; if (storm_phase_time == storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]) { if (storm_phase_current + 1 < storm_phase_start_ticks.size()) { storm_phase_current++; storm_phase_time = 0; storm_current = storm_to; storm_last = storm_current; float storm_radius_new = ::min(gm.map_dimensions[0], gm.map_dimensions[1]) * storm_phase_mapratio[storm_phase_current] / 2.0f; float max_dist_from_last_center = storm_last.radius - storm_radius_new; bool pathable_target = false; while (!pathable_target) { float rand_dist = rand() / (float)RAND_MAX * max_dist_from_last_center; float rand_angle = rand() / (float)RAND_MAX * 2 * std::_Pi; storm_to.x = (unsigned int)(storm_last.x + rand_dist * std::cosf(rand_angle)); storm_to.y = (unsigned int)(storm_last.y + rand_dist * std::sinf(rand_angle)); if (bf_map->data[gm.map_pathable_position + (storm_to.y) * gm.map_dimensions[0] + (storm_to.x)] > 0) { pathable_target = true; } } storm_to.radius = storm_radius_new; } } if (storm_phase_time > storm_phase_start_ticks[storm_phase_current] && storm_phase_time < storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]) { int delta_x = (int)(((storm_phase_time - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.x - (int)storm_last.x)); int delta_y = (int)(((storm_phase_time - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.y - (int)storm_last.y)); storm_current.x = storm_last.x + delta_x; storm_current.y = storm_last.y + delta_y; storm_current.radius = storm_last.radius + ((storm_phase_time - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * (storm_to.radius - storm_last.radius); } } bool storm_is_in(vector3<float> position) { if (sqrtf((position[0]+16 - storm_current.x) * (position[0]+16 - storm_current.x) + (position[1]+16 - storm_current.y) * (position[1]+16 - storm_current.y)) >= storm_current.radius - 16) { return true; } return false; } float storm_next_move_time(vector3<float> position, float dist_per_tick) { float dist = sqrtf((position[0] + 16 - storm_to.x) * (position[0] + 16 - storm_to.x) + (position[1] + 16 - storm_to.y) * (position[1] + 16 - storm_to.y)) + 1e-5; float dist_to_next_circle = dist - storm_to.radius; if (dist_to_next_circle < -32.0f) { return 0.0f; } else { for (int i = storm_phase_time; i < storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]; i++) { float dist = sqrtf((storm_to.x - position[0]) * (storm_to.x - position[0]) + (storm_to.y - position[1]) * (storm_to.y - position[1])) + 1e-5; float delta_x = dist_per_tick * ((storm_to.x - position[0]) / dist); float delta_y = dist_per_tick * ((storm_to.y - position[1]) / dist); float p_x_at_i = position[0] + 32 + (i-storm_phase_time) * delta_x; float p_y_at_i = position[1] + 32 + (i-storm_phase_time) * delta_y; float storm_x_at_i; float storm_y_at_i; float storm_radius_at_i; if (i < storm_phase_start_ticks[storm_phase_current]) { storm_x_at_i = storm_last.x; storm_y_at_i = storm_last.y; storm_radius_at_i = storm_last.radius; } else { storm_x_at_i = storm_last.x + (int)(((i - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.x - (int)storm_last.x)); storm_y_at_i = storm_last.y + (int)(((i - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.y - (int)storm_last.y)); storm_radius_at_i = storm_last.radius + ((i - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * (storm_to.radius - storm_last.radius); } if (storm_radius_at_i < 32.0f) { storm_radius_at_i = 32.0f; } float dist_from_center = sqrtf((p_x_at_i - storm_x_at_i) * (p_x_at_i - storm_x_at_i) + (p_y_at_i - storm_y_at_i) * (p_y_at_i - storm_y_at_i)); float dist_from_target_center = sqrtf((p_x_at_i - storm_to.x) * (p_x_at_i - storm_to.x) + (p_y_at_i - storm_to.y) * (p_y_at_i - storm_to.y)); if (dist_from_center >= storm_radius_at_i - 32.0f) { return 1.0f; } else if (dist_from_target_center < storm_to.radius - 32.0f) { return 0.0f; } } /* if (((storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]) - storm_phase_time) * dist_per_tick < dist_to_next_circle - 16.0f) { return 1.0f; } if (sqrtf((position[0]+16 - storm_last.x) * (position[0]+16 - storm_last.x) + (position[1]+16 - storm_last.y) * (position[1]+16 - storm_last.y)) - storm_last.radius > -16.0f) { return 1.0f; } */ } return 0.0f; } void storm_destroy() { storm_phase_current = 0; storm_phase_time = 0; storm_phase_start_ticks.clear(); storm_phase_duration_ticks.clear(); storm_phase_mapratio.clear(); storm_phase_dps.clear(); }
be6ff16135564b1b0632233ed0317c5c92b9f18c.cu
#include "Storm.hpp" #include "Main.hpp" #include "Map.hpp" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <random> #include <ctime> #include <math.h> #define rand() myrand() using namespace std; unsigned int storm_phase_current = 0; unsigned int storm_phase_time = 0; vector<unsigned int> storm_phase_start_ticks; vector<unsigned int> storm_phase_duration_ticks; vector<float> storm_phase_mapratio; vector<float> storm_phase_dps; struct storm storm_last; struct storm storm_current; struct storm storm_to; __global__ void draw_storm_kernel(unsigned int *device_output_data, const unsigned int output_position, const unsigned int width, const unsigned int height, const unsigned int channels, const unsigned int camera_crop_x1, const unsigned int camera_crop_y1, const float camera_z, const struct storm storm_current, const struct storm storm_to, const unsigned int storm_alpha, const struct vector3<unsigned char> storm_color) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < width * height) { int current_x = (i % width); int current_y = (i / width); unsigned char* frame = (unsigned char*)&device_output_data[output_position]; unsigned int storm_alpha = 50; if (sqrtf((camera_crop_x1 + current_x*camera_z - storm_current.x) * (camera_crop_x1+ current_x * camera_z - storm_current.x) + (camera_crop_y1 + current_y * camera_z - storm_current.y) * (camera_crop_y1 + current_y * camera_z - storm_current.y)) >= storm_current.radius) { frame[current_y * (width * channels) + current_x * channels] = (255 - storm_alpha)/255.0f * frame[current_y * (width * channels) + current_x * channels] + (storm_alpha/255.0f) * storm_color[0]; frame[current_y * (width * channels) + current_x * channels + 1] = (255 - storm_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 1] + (storm_alpha / 255.0f) * storm_color[1]; frame[current_y * (width * channels) + current_x * channels + 2] = (255 - storm_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 2] + (storm_alpha / 255.0f) * storm_color[2]; } unsigned int storm_circle_alpha = 150; if (sqrtf((camera_crop_x1 + current_x * camera_z - storm_to.x) * (camera_crop_x1 + current_x * camera_z - storm_to.x) + (camera_crop_y1 + current_y * camera_z - storm_to.y) * (camera_crop_y1 + current_y * camera_z - storm_to.y)) >= storm_to.radius-2.0f && sqrtf((camera_crop_x1 + current_x * camera_z - storm_to.x) * (camera_crop_x1 + current_x * camera_z - storm_to.x) + (camera_crop_y1 + current_y * camera_z - storm_to.y) * (camera_crop_y1 + current_y * camera_z - storm_to.y)) <= storm_to.radius+2.0f ) { frame[current_y * (width * channels) + current_x * channels] = (255 - storm_circle_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels] + (storm_circle_alpha / 255.0f) * 255; frame[current_y * (width * channels) + current_x * channels + 1] = (255 - storm_circle_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 1] + (storm_circle_alpha / 255.0f) * 255; frame[current_y * (width * channels) + current_x * channels + 2] = (255 - storm_circle_alpha) / 255.0f * frame[current_y * (width * channels) + current_x * channels + 2] + (storm_circle_alpha / 255.0f) * 255; } } } void launch_draw_storm_kernel(unsigned int* device_output_data, const unsigned int output_position, const unsigned int width, const unsigned int height, const unsigned int channels, const unsigned int camera_crop_x1, const unsigned int camera_crop_y1, const float camera_z, const struct storm storm_current, const struct storm storm_to, const unsigned int storm_alpha, const struct vector3<unsigned char> storm_color) { cudaError_t err = cudaSuccess; int threadsPerBlock = 256; int blocksPerGrid = (width * height + threadsPerBlock - 1) / threadsPerBlock; draw_storm_kernel<<<blocksPerGrid, threadsPerBlock>>> (device_output_data, output_position, width, height, channels, camera_crop_x1, camera_crop_y1, camera_z, storm_current, storm_to, storm_alpha, storm_color); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed in draw_storm_kernel (error code %s)\n", cudaGetErrorString(err)); } } void storm_init(struct bit_field* bf_map, struct bit_field* bf_rw) { printf("initialising storm\n"); storm_phase_start_ticks.emplace_back(5400); storm_phase_duration_ticks.emplace_back(3600); storm_phase_mapratio.emplace_back(0.75f); storm_phase_dps.emplace_back(1.0f); storm_phase_start_ticks.emplace_back(1800); storm_phase_duration_ticks.emplace_back(1800); storm_phase_mapratio.emplace_back(0.4f); storm_phase_dps.emplace_back(2.0f); storm_phase_start_ticks.emplace_back(1800); storm_phase_duration_ticks.emplace_back(1800); storm_phase_mapratio.emplace_back(0.2f); storm_phase_dps.emplace_back(4.0f); storm_phase_start_ticks.emplace_back(1800); storm_phase_duration_ticks.emplace_back(1800); storm_phase_mapratio.emplace_back(0.0f); storm_phase_dps.emplace_back(8.0f); float storm_radius = std::min(gm.map_dimensions[0], gm.map_dimensions[1]) * storm_phase_mapratio[0]/2.0f; int storm_center_max_x = (int)floorf(std::max<float>(gm.map_dimensions[0] - storm_radius, 0.0f)); int storm_center_max_y = (int)floorf(std::max<float>(gm.map_dimensions[1] - storm_radius, 0.0f)); storm_current.x = (unsigned int)gm.map_dimensions[0] / 2.0f; storm_current.y = (unsigned int)gm.map_dimensions[1] / 2.0f; storm_current.radius = floorf(std::max<float>(gm.map_dimensions[0], gm.map_dimensions[1]) * std::sqrtf(2)/2.0f); storm_last.x = storm_current.x; storm_last.y = storm_current.y; storm_last.radius = storm_current.radius; bool pathable_target = false; while (!pathable_target) { storm_to.x = (unsigned int)storm_radius + (rand() % (int)(storm_center_max_x - storm_radius)); storm_to.y = (unsigned int)storm_radius + (rand() % (int)(storm_center_max_y - storm_radius)); if (bf_map->data[gm.map_pathable_position + (storm_to.y) * gm.map_dimensions[0] + (storm_to.x)] > 0) { pathable_target = true; } } storm_to.radius = storm_radius; printf("storm initialized\n"); } void storm_next(struct bit_field* bf_map, struct bit_field* bf_rw) { storm_phase_time++; if (storm_phase_time == storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]) { if (storm_phase_current + 1 < storm_phase_start_ticks.size()) { storm_phase_current++; storm_phase_time = 0; storm_current = storm_to; storm_last = storm_current; float storm_radius_new = std::min(gm.map_dimensions[0], gm.map_dimensions[1]) * storm_phase_mapratio[storm_phase_current] / 2.0f; float max_dist_from_last_center = storm_last.radius - storm_radius_new; bool pathable_target = false; while (!pathable_target) { float rand_dist = rand() / (float)RAND_MAX * max_dist_from_last_center; float rand_angle = rand() / (float)RAND_MAX * 2 * std::_Pi; storm_to.x = (unsigned int)(storm_last.x + rand_dist * std::cosf(rand_angle)); storm_to.y = (unsigned int)(storm_last.y + rand_dist * std::sinf(rand_angle)); if (bf_map->data[gm.map_pathable_position + (storm_to.y) * gm.map_dimensions[0] + (storm_to.x)] > 0) { pathable_target = true; } } storm_to.radius = storm_radius_new; } } if (storm_phase_time > storm_phase_start_ticks[storm_phase_current] && storm_phase_time < storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]) { int delta_x = (int)(((storm_phase_time - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.x - (int)storm_last.x)); int delta_y = (int)(((storm_phase_time - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.y - (int)storm_last.y)); storm_current.x = storm_last.x + delta_x; storm_current.y = storm_last.y + delta_y; storm_current.radius = storm_last.radius + ((storm_phase_time - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * (storm_to.radius - storm_last.radius); } } bool storm_is_in(vector3<float> position) { if (sqrtf((position[0]+16 - storm_current.x) * (position[0]+16 - storm_current.x) + (position[1]+16 - storm_current.y) * (position[1]+16 - storm_current.y)) >= storm_current.radius - 16) { return true; } return false; } float storm_next_move_time(vector3<float> position, float dist_per_tick) { float dist = sqrtf((position[0] + 16 - storm_to.x) * (position[0] + 16 - storm_to.x) + (position[1] + 16 - storm_to.y) * (position[1] + 16 - storm_to.y)) + 1e-5; float dist_to_next_circle = dist - storm_to.radius; if (dist_to_next_circle < -32.0f) { return 0.0f; } else { for (int i = storm_phase_time; i < storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]; i++) { float dist = sqrtf((storm_to.x - position[0]) * (storm_to.x - position[0]) + (storm_to.y - position[1]) * (storm_to.y - position[1])) + 1e-5; float delta_x = dist_per_tick * ((storm_to.x - position[0]) / dist); float delta_y = dist_per_tick * ((storm_to.y - position[1]) / dist); float p_x_at_i = position[0] + 32 + (i-storm_phase_time) * delta_x; float p_y_at_i = position[1] + 32 + (i-storm_phase_time) * delta_y; float storm_x_at_i; float storm_y_at_i; float storm_radius_at_i; if (i < storm_phase_start_ticks[storm_phase_current]) { storm_x_at_i = storm_last.x; storm_y_at_i = storm_last.y; storm_radius_at_i = storm_last.radius; } else { storm_x_at_i = storm_last.x + (int)(((i - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.x - (int)storm_last.x)); storm_y_at_i = storm_last.y + (int)(((i - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * ((int)storm_to.y - (int)storm_last.y)); storm_radius_at_i = storm_last.radius + ((i - storm_phase_start_ticks[storm_phase_current]) / (float)storm_phase_duration_ticks[storm_phase_current]) * (storm_to.radius - storm_last.radius); } if (storm_radius_at_i < 32.0f) { storm_radius_at_i = 32.0f; } float dist_from_center = sqrtf((p_x_at_i - storm_x_at_i) * (p_x_at_i - storm_x_at_i) + (p_y_at_i - storm_y_at_i) * (p_y_at_i - storm_y_at_i)); float dist_from_target_center = sqrtf((p_x_at_i - storm_to.x) * (p_x_at_i - storm_to.x) + (p_y_at_i - storm_to.y) * (p_y_at_i - storm_to.y)); if (dist_from_center >= storm_radius_at_i - 32.0f) { return 1.0f; } else if (dist_from_target_center < storm_to.radius - 32.0f) { return 0.0f; } } /* if (((storm_phase_start_ticks[storm_phase_current] + storm_phase_duration_ticks[storm_phase_current]) - storm_phase_time) * dist_per_tick < dist_to_next_circle - 16.0f) { return 1.0f; } if (sqrtf((position[0]+16 - storm_last.x) * (position[0]+16 - storm_last.x) + (position[1]+16 - storm_last.y) * (position[1]+16 - storm_last.y)) - storm_last.radius > -16.0f) { return 1.0f; } */ } return 0.0f; } void storm_destroy() { storm_phase_current = 0; storm_phase_time = 0; storm_phase_start_ticks.clear(); storm_phase_duration_ticks.clear(); storm_phase_mapratio.clear(); storm_phase_dps.clear(); }
44c9f21f7a3c5d78510be0f8b741a9855e9d28b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #define DCMT_SEED 4172 #define MT_RNG_PERIOD 607 typedef struct{ unsigned int matrix_a; unsigned int mask_b; unsigned int mask_c; unsigned int seed; } mt_struct_stripped; #define MT_RNG_COUNT 4096 #define MT_MM 9 #define MT_NN 19 #define MT_WMASK 0xFFFFFFFFU #define MT_UMASK 0xFFFFFFFEU #define MT_LMASK 0x1U #define MT_SHIFT0 12 #define MT_SHIFTB 7 #define MT_SHIFTC 15 #define MT_SHIFT1 18 //__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT]; //////////////////////////////////////////////////////////////////////////////// // Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random. // For coalesced global writes MT_RNG_COUNT should be a multiple of warp size. // Initial states for each generator are the same, since the states are // initialized from the global seed. In order to improve distribution properties // on small NPerRng supply dedicated (local) seed to each twister. // The local seeds, in their turn, can be extracted from global seed // by means of any simple random number generator, like LCG. //////////////////////////////////////////////////////////////////////////////// extern "C" __global__ void RandomGPU( float *d_Random, char *ds_MT_Bytes, int NPerRng, int seed ){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; mt_struct_stripped *ds_MT = (mt_struct_stripped*)ds_MT_Bytes; int iState, iState1, iStateM, iOut; unsigned int mti, mti1, mtiM, x; unsigned int mt[MT_NN]; for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){ //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; //Initialize current state mt[0] = seed; for(iState = 1; iState < MT_NN; iState++) mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK; iState = 0; mti1 = mt[0]; for(iOut = 0; iOut < NPerRng; iOut++){ iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //Convert to (0, 1] float and write to global memory d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f; } } }
44c9f21f7a3c5d78510be0f8b741a9855e9d28b4.cu
/* * Copyright 1993-2009 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation and * any modifications thereto. Any use, reproduction, disclosure, or distribution * of this software and related documentation without an express license * agreement from NVIDIA Corporation is strictly prohibited. * */ #define DCMT_SEED 4172 #define MT_RNG_PERIOD 607 typedef struct{ unsigned int matrix_a; unsigned int mask_b; unsigned int mask_c; unsigned int seed; } mt_struct_stripped; #define MT_RNG_COUNT 4096 #define MT_MM 9 #define MT_NN 19 #define MT_WMASK 0xFFFFFFFFU #define MT_UMASK 0xFFFFFFFEU #define MT_LMASK 0x1U #define MT_SHIFT0 12 #define MT_SHIFTB 7 #define MT_SHIFTC 15 #define MT_SHIFT1 18 //__device__ static mt_struct_stripped ds_MT[MT_RNG_COUNT]; //////////////////////////////////////////////////////////////////////////////// // Write MT_RNG_COUNT vertical lanes of NPerRng random numbers to *d_Random. // For coalesced global writes MT_RNG_COUNT should be a multiple of warp size. // Initial states for each generator are the same, since the states are // initialized from the global seed. In order to improve distribution properties // on small NPerRng supply dedicated (local) seed to each twister. // The local seeds, in their turn, can be extracted from global seed // by means of any simple random number generator, like LCG. //////////////////////////////////////////////////////////////////////////////// extern "C" __global__ void RandomGPU( float *d_Random, char *ds_MT_Bytes, int NPerRng, int seed ){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int THREAD_N = blockDim.x * gridDim.x; mt_struct_stripped *ds_MT = (mt_struct_stripped*)ds_MT_Bytes; int iState, iState1, iStateM, iOut; unsigned int mti, mti1, mtiM, x; unsigned int mt[MT_NN]; for(int iRng = tid; iRng < MT_RNG_COUNT; iRng += THREAD_N){ //Load bit-vector Mersenne Twister parameters mt_struct_stripped config = ds_MT[iRng]; //Initialize current state mt[0] = seed; for(iState = 1; iState < MT_NN; iState++) mt[iState] = (1812433253U * (mt[iState - 1] ^ (mt[iState - 1] >> 30)) + iState) & MT_WMASK; iState = 0; mti1 = mt[0]; for(iOut = 0; iOut < NPerRng; iOut++){ iState1 = iState + 1; iStateM = iState + MT_MM; if(iState1 >= MT_NN) iState1 -= MT_NN; if(iStateM >= MT_NN) iStateM -= MT_NN; mti = mti1; mti1 = mt[iState1]; mtiM = mt[iStateM]; x = (mti & MT_UMASK) | (mti1 & MT_LMASK); x = mtiM ^ (x >> 1) ^ ((x & 1) ? config.matrix_a : 0); mt[iState] = x; iState = iState1; //Tempering transformation x ^= (x >> MT_SHIFT0); x ^= (x << MT_SHIFTB) & config.mask_b; x ^= (x << MT_SHIFTC) & config.mask_c; x ^= (x >> MT_SHIFT1); //Convert to (0, 1] float and write to global memory d_Random[iRng + iOut * MT_RNG_COUNT] = ((float)x + 1.0f) / 4294967296.0f; } } }
f89e470d8c7a6f0f70c19bbcca6e8b27a8c5e5be.hip
// !!! This is a file automatically generated by hipify!!! # include <stdlib.h> # include <stdio.h> # include <math.h> # include <time.h> #include "common.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hip/device_functions.h> double cpu_time(void) { double value; value = (double)clock() / (double)CLOCKS_PER_SEC; return value; } int sequential(int argc, char *argv[], Result_Vect *result) { int M; int N; double ctime; double ctime1; double ctime2; double diff; double epsilon; FILE *fp; int i; int iterations; int iterations_print; int j; double mean; char output_file[80]; int success; double **u; double **w; printf("\n\nSEQUENTIAL\n"); if (argc != 5) { printf("Wrong number of arguments!\n"); return 1; } else { success = sscanf(argv[1], "%d", &M); success += sscanf(argv[2], "%d", &N); success += sscanf(argv[3], "%lf", &epsilon); success += sscanf(argv[4], "%s", output_file); if (success != 4) { printf("Wrong arguments!\n"); return 2; } } printf("\n"); printf("HEATED_PLATE\n"); printf(" C version\n"); printf(" A program to solve for the steady state temperature distribution\n"); printf(" over a rectangular plate.\n"); printf("\n"); printf(" Spatial grid of %d by %d points.\n", M, N); printf("\n"); printf(" The iteration will be repeated until the change is <= %f\n", epsilon); diff = epsilon; printf("\n"); printf(" The steady state solution will be written to %s.\n", output_file); u = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) u[i] = (double *)malloc(N * sizeof(double)); w = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) w[i] = (double *)malloc(N * sizeof(double)); /* Set the boundary values, which don't change. */ for (i = 1; i < M - 1; i++) { w[i][0] = 100.0; } for (i = 1; i < M - 1; i++) { w[i][N - 1] = 100.0; } for (j = 0; j < N; j++) { w[M - 1][j] = 100.0; } for (j = 0; j < N; j++) { w[0][j] = 0.0; } /* Average the boundary values, to come up with a reasonable initial value for the interior. */ mean = 0.0; for (i = 1; i < M - 1; i++) { mean = mean + w[i][0]; } for (i = 1; i < M - 1; i++) { mean = mean + w[i][N - 1]; } for (j = 0; j < N; j++) { mean = mean + w[M - 1][j]; } for (j = 0; j < N; j++) { mean = mean + w[0][j]; } mean = mean / (double)(2 * M + 2 * N - 4); /* Initialize the interior solution to the mean value. */ for (i = 1; i < M - 1; i++) { for (j = 1; j < N - 1; j++) { w[i][j] = mean; } } /* iterate until the new solution W differs from the old solution U by no more than EPSILON. */ iterations = 0; iterations_print = 1; printf("\n"); printf(" Iteration Change\n"); printf("\n"); ctime1 = cpu_time(); while (epsilon <= diff) { /* Save the old solution in U. */ for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { u[i][j] = w[i][j]; } } /* Determine the new estimate of the solution at the interior points. The new solution W is the average of north, south, east and west neighbors. */ diff = 0.0; for (i = 1; i < M - 1; i++) { for (j = 1; j < N - 1; j++) { w[i][j] = (u[i - 1][j] + u[i + 1][j] + u[i][j - 1] + u[i][j + 1]) / 4.0; if (diff < fabs(w[i][j] - u[i][j])) { diff = fabs(w[i][j] - u[i][j]); } } } /*for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) if (i < 2 && j < 2) { printf("seq - devvv? %d %d %llf - %llf\n", i, j, u[i][j], w[i][j]); }*/ iterations++; if (iterations == iterations_print) { printf(" %8d %f\n", iterations, diff); iterations_print = 2 * iterations_print; } /*if (iterations == 5) { diff = 0; }*/ } ctime2 = cpu_time(); ctime = ctime2 - ctime1; printf("\n"); printf(" %8d %f\n", iterations, diff); printf("\n"); printf(" Error tolerance achieved.\n"); printf(" CPU time = %f\n", ctime); /* Write the solution to the output file. */ fp = fopen(output_file, "w"); fprintf(fp, "%d\n", M); fprintf(fp, "%d\n", N); result->val_size = M*N; result->value = (double*)malloc(M*N * sizeof(double)); result->time = ctime; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { fprintf(fp, "%6.2f ", w[i][j]); result->value[i*N + j] = w[i][j]; } fputc('\n', fp); } fclose(fp); printf("\n"); printf(" Solution written to the output file %s\n", output_file); /* All done! */ printf("\n"); printf("HEATED_PLATE:\n"); printf(" Normal end of execution.\n"); return 0; } ////////////////////////// parallel //__device__ static double atomicMax(double* address, double val) //{ // unsigned long long int* address_as_i = // (unsigned long long int*)address; // unsigned long long int old = *address_as_i, assumed; // do { // assumed = old; // old = ::atomicCAS(address_as_i, assumed, // __double_as_longlong( (val > __longlong_as_double(assumed) ) ? val : __longlong_as_double(assumed))); // } while (assumed != old); // return __longlong_as_double(old); //} __device__ void atomicMax(double * const address, const double value) { if (*address >= value) { return; } unsigned long long int * const address_as_i = (unsigned long long int *)address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; if (__longlong_as_double(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); } while (assumed != old); } __global__ void heated_kernel(double *devA, double *devB, int N, int M, double *epsilon) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; //printf("nesto? %d %d\n", i ,j); if (i > 0 && j > 0 && i < M-1 && j < N-1) { devB[i*N + j] = (devA[(i - 1)*N + j] + devA[(i + 1)*N + j] + devA[i * N + j - 1] + devA[i*N + j + 1]) / 4.0; atomicMax(epsilon, devB[i*N + j] - devA[i*N + j]); atomicMax(epsilon, devA[i*N + j] - devB[i*N + j]); } /*if (i < 2 && j < 2) { printf("devvv? %d %d %llf - %llf\n", i, j, devA[i*N + j], devB[i*N + j]); }*/ } __global__ void heated_kernel2(double *devA, double *devB, int N, int M, double epsilon, int *isBigger) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; //printf("nesto? %d %d\n", i ,j); if (i > 0 && j > 0 && i < M - 1 && j < N - 1) { devB[i*N + j] = (devA[(i - 1)*N + j] + devA[(i + 1)*N + j] + devA[i * N + j - 1] + devA[i*N + j + 1]) / 4.0; if (devB[i*N + j] - devA[i*N + j] > epsilon) { *isBigger = 1; } if (devA[i*N + j] - devB[i*N + j] > epsilon) { *isBigger = 1; } } /*if (i < 2 && j < 2) { printf("devvv? %d %d %llf - %llf\n", i, j, devA[i*N + j], devB[i*N + j]); }*/ } #define USEPLATE 1 void parallel_heated_plate(double **u, double **w, int N, int M, double epsilon) { double diff = epsilon; int iterations = 0; int iterations_print = 1; int i, j; double zeroDouble = 0.0; int zeroInt = 0; int isDiff = 1; dim3 threadsPerBlock(32, 16); dim3 numBlocks((M - 2 +threadsPerBlock.x - 1) / threadsPerBlock.x, (N - 2 + threadsPerBlock.y - 1) / threadsPerBlock.y); double *devU, *devW; double *epsilonCuda; int *isDiffCuda; hipMalloc((void **)&devU, N*M * sizeof(double)); hipMalloc((void **)&devW, N*M * sizeof(double)); if (hipSuccess != hipGetLastError()) { printf("errorr------------------------------------------\n"); } hipMalloc((void **)&epsilonCuda, sizeof(double)); hipMalloc((void **)&isDiffCuda, sizeof(int)); if (hipSuccess != hipGetLastError()) { printf("errorr------------------------------------------\n"); } for (int i = 0; i < M; i++) { hipMemcpy(devU + i*N, u[i], N * sizeof(double), hipMemcpyHostToDevice); if (hipSuccess != hipGetLastError()) { printf("errorr------------------------------------------\n"); } } for (int i = 0; i < M; i++) { hipMemcpy(devW + i*N, w[i], N * sizeof(double), hipMemcpyHostToDevice); if (hipSuccess != hipGetLastError()) { printf("errorr------------------------------------------\n"); } } //hipMemcpy(devU, u, N*M * sizeof(double), hipMemcpyHostToDevice); int cnt = 0; #if USEPLATE == 1 while (epsilon <= diff) #endif #if USEPLATE == 2 while (isDiff) #endif { cnt++; if (USEPLATE == 1) { hipMemcpy(epsilonCuda, &zeroDouble, sizeof(double), hipMemcpyHostToDevice); } else if (USEPLATE == 2) { hipMemcpy(isDiffCuda, &zeroInt, sizeof(int), hipMemcpyHostToDevice); } /* Determine the new estimate of the solution at the interior points. The new solution W is the average of north, south, east and west neighbors. */ if (cnt % 2) { /*printf("%d u w\n", cnt);*/ if (USEPLATE == 1) { heated_kernel << < numBlocks, threadsPerBlock >> > (devU, devW, N, M, epsilonCuda); } else if (USEPLATE == 2) { heated_kernel2 << < numBlocks, threadsPerBlock >> > (devU, devW, N, M, epsilon, isDiffCuda); } } else { /*printf("%d w u\n", cnt);*/ if (USEPLATE == 1) { heated_kernel << < numBlocks, threadsPerBlock >> > (devW, devU, N, M, epsilonCuda); } else if (USEPLATE == 2) { heated_kernel2 << < numBlocks, threadsPerBlock >> > (devW, devU, N, M, epsilon, isDiffCuda); } } hipDeviceSynchronize(); if (hipSuccess != hipGetLastError()) { printf("errorr------------------------------------------\n"); } if (USEPLATE == 1) { hipMemcpy(&diff, epsilonCuda, sizeof(double), hipMemcpyDeviceToHost); } else if (USEPLATE == 2) { hipMemcpy(&isDiff, isDiffCuda, sizeof(int), hipMemcpyDeviceToHost); } //if (hipSuccess != hipGetLastError()) { // printf("errorr\n"); //} iterations++; if (iterations == iterations_print) { printf(" %8d %f\n", iterations, diff); iterations_print = 2 * iterations_print; } /*if (iterations == 5) { diff = 0; }*/ } if (cnt % 2) { for (int i = 0; i < M; i++) { hipMemcpy(w[i], devW + i*N, N * sizeof(double), hipMemcpyDeviceToHost); if (hipSuccess != hipGetLastError()) { printf("errorr\n"); } //printf("sta bre? %d, %llf, %llf, %llf\n", i, w[i][0], w[i][1], w[i][2]); } } else { for (int i = 0; i < M; i++) { hipMemcpy(w[i], devU + i*N, N * sizeof(double), hipMemcpyDeviceToHost); if (hipSuccess != hipGetLastError()) { printf("errorr\n"); } //printf("sta bru? %d, %llf, %llf, %llf\n", i, w[i][0], w[i][1], w[i][2]); } } printf("\n"); printf(" %8d %f\n", iterations, diff); } int parallel(int argc, char *argv[], Result_Vect *result) { int M; int N; double ctime; double ctime1; double ctime2; double diff; double epsilon; FILE *fp; int i; int iterations; int iterations_print; int j; double mean; char output_file[80]; int success; double **u; double **w; printf("\n\nPARALLEL\n"); if (argc != 5) { printf("Wrong number of arguments!\n"); return 1; } else { success = sscanf(argv[1], "%d", &M); success += sscanf(argv[2], "%d", &N); success += sscanf(argv[3], "%lf", &epsilon); success += sscanf(argv[4], "%s", output_file); if (success != 4) { printf("Wrong arguments!\n"); return 2; } } printf("\n"); printf("HEATED_PLATE\n"); printf(" C version\n"); printf(" A program to solve for the steady state temperature distribution\n"); printf(" over a rectangular plate.\n"); printf("\n"); printf(" Spatial grid of %d by %d points.\n", M, N); printf("\n"); printf(" The iteration will be repeated until the change is <= %f\n", epsilon); diff = epsilon; printf("\n"); printf(" The steady state solution will be written to %s.\n", output_file); u = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) u[i] = (double *)malloc(N * sizeof(double)); w = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) w[i] = (double *)malloc(N * sizeof(double)); /* Set the boundary values, which don't change. */ for (i = 1; i < M - 1; i++) { w[i][0] = 100.0; } for (i = 1; i < M - 1; i++) { w[i][N - 1] = 100.0; } for (j = 0; j < N; j++) { w[M - 1][j] = 100.0; } for (j = 0; j < N; j++) { w[0][j] = 0.0; } /* Average the boundary values, to come up with a reasonable initial value for the interior. */ mean = 0.0; for (i = 1; i < M - 1; i++) { mean = mean + w[i][0]; } for (i = 1; i < M - 1; i++) { mean = mean + w[i][N - 1]; } for (j = 0; j < N; j++) { mean = mean + w[M - 1][j]; } for (j = 0; j < N; j++) { mean = mean + w[0][j]; } mean = mean / (double)(2 * M + 2 * N - 4); /* Initialize the interior solution to the mean value. */ for (i = 1; i < M - 1; i++) { for (j = 1; j < N - 1; j++) { w[i][j] = mean; } } for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { u[i][j] = w[i][j]; } } /* iterate until the new solution W differs from the old solution U by no more than EPSILON. */ iterations = 0; iterations_print = 1; printf("\n"); printf(" Iteration Change\n"); printf("\n"); ctime1 = cpu_time(); parallel_heated_plate(u, w, N, M, epsilon); //while (epsilon <= diff) //{ // /* // Save the old solution in U. // */ // for (i = 0; i < M; i++) // { // for (j = 0; j < N; j++) // { // u[i][j] = w[i][j]; // } // } // /* // Determine the new estimate of the solution at the interior points. // The new solution W is the average of north, south, east and west neighbors. // */ // diff = 0.0; // for (i = 1; i < M - 1; i++) // { // for (j = 1; j < N - 1; j++) // { // w[i][j] = (u[i - 1][j] + u[i + 1][j] + u[i][j - 1] + u[i][j + 1]) / 4.0; // if (diff < fabs(w[i][j] - u[i][j])) // { // diff = fabs(w[i][j] - u[i][j]); // } // } // } // iterations++; // if (iterations == iterations_print) // { // printf(" %8d %f\n", iterations, diff); // iterations_print = 2 * iterations_print; // } //} ctime2 = cpu_time(); ctime = ctime2 - ctime1; printf("\n"); printf(" Error tolerance achieved.\n"); printf(" CPU time = %f\n", ctime); /* Write the solution to the output file. */ fp = fopen(output_file, "w"); fprintf(fp, "%d\n", M); fprintf(fp, "%d\n", N); result->val_size = M*N; result->value = (double*)malloc(M*N * sizeof(double)); result->time = ctime; for (i = 0; i < M; i++) { //printf("sta brej? %d, %llf, %llf, %llf\n", i, w[i][0], w[i][1], w[i][2]); for (j = 0; j < N; j++) { fprintf(fp, "%6.2f ", w[i][j]); result->value[i*N + j] = w[i][j]; } fputc('\n', fp); } fclose(fp); printf("\n"); printf(" Solution written to the output file %s\n", output_file); /* All done! */ printf("\n"); printf("HEATED_PLATE:\n"); printf(" Normal end of execution.\n"); return 0; } int main(int argc, char * argv[]) { Result_Vect seq_result, par_result; sequential(argc, argv, &seq_result); parallel(argc, argv, &par_result); compare_and_print_vect(seq_result, par_result, "heated plate"); }
f89e470d8c7a6f0f70c19bbcca6e8b27a8c5e5be.cu
# include <stdlib.h> # include <stdio.h> # include <math.h> # include <time.h> #include "common.h" #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <device_functions.h> double cpu_time(void) { double value; value = (double)clock() / (double)CLOCKS_PER_SEC; return value; } int sequential(int argc, char *argv[], Result_Vect *result) { int M; int N; double ctime; double ctime1; double ctime2; double diff; double epsilon; FILE *fp; int i; int iterations; int iterations_print; int j; double mean; char output_file[80]; int success; double **u; double **w; printf("\n\nSEQUENTIAL\n"); if (argc != 5) { printf("Wrong number of arguments!\n"); return 1; } else { success = sscanf(argv[1], "%d", &M); success += sscanf(argv[2], "%d", &N); success += sscanf(argv[3], "%lf", &epsilon); success += sscanf(argv[4], "%s", output_file); if (success != 4) { printf("Wrong arguments!\n"); return 2; } } printf("\n"); printf("HEATED_PLATE\n"); printf(" C version\n"); printf(" A program to solve for the steady state temperature distribution\n"); printf(" over a rectangular plate.\n"); printf("\n"); printf(" Spatial grid of %d by %d points.\n", M, N); printf("\n"); printf(" The iteration will be repeated until the change is <= %f\n", epsilon); diff = epsilon; printf("\n"); printf(" The steady state solution will be written to %s.\n", output_file); u = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) u[i] = (double *)malloc(N * sizeof(double)); w = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) w[i] = (double *)malloc(N * sizeof(double)); /* Set the boundary values, which don't change. */ for (i = 1; i < M - 1; i++) { w[i][0] = 100.0; } for (i = 1; i < M - 1; i++) { w[i][N - 1] = 100.0; } for (j = 0; j < N; j++) { w[M - 1][j] = 100.0; } for (j = 0; j < N; j++) { w[0][j] = 0.0; } /* Average the boundary values, to come up with a reasonable initial value for the interior. */ mean = 0.0; for (i = 1; i < M - 1; i++) { mean = mean + w[i][0]; } for (i = 1; i < M - 1; i++) { mean = mean + w[i][N - 1]; } for (j = 0; j < N; j++) { mean = mean + w[M - 1][j]; } for (j = 0; j < N; j++) { mean = mean + w[0][j]; } mean = mean / (double)(2 * M + 2 * N - 4); /* Initialize the interior solution to the mean value. */ for (i = 1; i < M - 1; i++) { for (j = 1; j < N - 1; j++) { w[i][j] = mean; } } /* iterate until the new solution W differs from the old solution U by no more than EPSILON. */ iterations = 0; iterations_print = 1; printf("\n"); printf(" Iteration Change\n"); printf("\n"); ctime1 = cpu_time(); while (epsilon <= diff) { /* Save the old solution in U. */ for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { u[i][j] = w[i][j]; } } /* Determine the new estimate of the solution at the interior points. The new solution W is the average of north, south, east and west neighbors. */ diff = 0.0; for (i = 1; i < M - 1; i++) { for (j = 1; j < N - 1; j++) { w[i][j] = (u[i - 1][j] + u[i + 1][j] + u[i][j - 1] + u[i][j + 1]) / 4.0; if (diff < fabs(w[i][j] - u[i][j])) { diff = fabs(w[i][j] - u[i][j]); } } } /*for (i = 0; i < 3; i++) for (j = 0; j < 3; j++) if (i < 2 && j < 2) { printf("seq - devvv? %d %d %llf - %llf\n", i, j, u[i][j], w[i][j]); }*/ iterations++; if (iterations == iterations_print) { printf(" %8d %f\n", iterations, diff); iterations_print = 2 * iterations_print; } /*if (iterations == 5) { diff = 0; }*/ } ctime2 = cpu_time(); ctime = ctime2 - ctime1; printf("\n"); printf(" %8d %f\n", iterations, diff); printf("\n"); printf(" Error tolerance achieved.\n"); printf(" CPU time = %f\n", ctime); /* Write the solution to the output file. */ fp = fopen(output_file, "w"); fprintf(fp, "%d\n", M); fprintf(fp, "%d\n", N); result->val_size = M*N; result->value = (double*)malloc(M*N * sizeof(double)); result->time = ctime; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { fprintf(fp, "%6.2f ", w[i][j]); result->value[i*N + j] = w[i][j]; } fputc('\n', fp); } fclose(fp); printf("\n"); printf(" Solution written to the output file %s\n", output_file); /* All done! */ printf("\n"); printf("HEATED_PLATE:\n"); printf(" Normal end of execution.\n"); return 0; } ////////////////////////// parallel //__device__ static double atomicMax(double* address, double val) //{ // unsigned long long int* address_as_i = // (unsigned long long int*)address; // unsigned long long int old = *address_as_i, assumed; // do { // assumed = old; // old = ::atomicCAS(address_as_i, assumed, // __double_as_longlong( (val > __longlong_as_double(assumed) ) ? val : __longlong_as_double(assumed))); // } while (assumed != old); // return __longlong_as_double(old); //} __device__ void atomicMax(double * const address, const double value) { if (*address >= value) { return; } unsigned long long int * const address_as_i = (unsigned long long int *)address; unsigned long long int old = *address_as_i, assumed; do { assumed = old; if (__longlong_as_double(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __double_as_longlong(value)); } while (assumed != old); } __global__ void heated_kernel(double *devA, double *devB, int N, int M, double *epsilon) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; //printf("nesto? %d %d\n", i ,j); if (i > 0 && j > 0 && i < M-1 && j < N-1) { devB[i*N + j] = (devA[(i - 1)*N + j] + devA[(i + 1)*N + j] + devA[i * N + j - 1] + devA[i*N + j + 1]) / 4.0; atomicMax(epsilon, devB[i*N + j] - devA[i*N + j]); atomicMax(epsilon, devA[i*N + j] - devB[i*N + j]); } /*if (i < 2 && j < 2) { printf("devvv? %d %d %llf - %llf\n", i, j, devA[i*N + j], devB[i*N + j]); }*/ } __global__ void heated_kernel2(double *devA, double *devB, int N, int M, double epsilon, int *isBigger) { int i = blockIdx.x * blockDim.x + threadIdx.x + 1; int j = blockIdx.y * blockDim.y + threadIdx.y + 1; //printf("nesto? %d %d\n", i ,j); if (i > 0 && j > 0 && i < M - 1 && j < N - 1) { devB[i*N + j] = (devA[(i - 1)*N + j] + devA[(i + 1)*N + j] + devA[i * N + j - 1] + devA[i*N + j + 1]) / 4.0; if (devB[i*N + j] - devA[i*N + j] > epsilon) { *isBigger = 1; } if (devA[i*N + j] - devB[i*N + j] > epsilon) { *isBigger = 1; } } /*if (i < 2 && j < 2) { printf("devvv? %d %d %llf - %llf\n", i, j, devA[i*N + j], devB[i*N + j]); }*/ } #define USEPLATE 1 void parallel_heated_plate(double **u, double **w, int N, int M, double epsilon) { double diff = epsilon; int iterations = 0; int iterations_print = 1; int i, j; double zeroDouble = 0.0; int zeroInt = 0; int isDiff = 1; dim3 threadsPerBlock(32, 16); dim3 numBlocks((M - 2 +threadsPerBlock.x - 1) / threadsPerBlock.x, (N - 2 + threadsPerBlock.y - 1) / threadsPerBlock.y); double *devU, *devW; double *epsilonCuda; int *isDiffCuda; cudaMalloc((void **)&devU, N*M * sizeof(double)); cudaMalloc((void **)&devW, N*M * sizeof(double)); if (cudaSuccess != cudaGetLastError()) { printf("errorr------------------------------------------\n"); } cudaMalloc((void **)&epsilonCuda, sizeof(double)); cudaMalloc((void **)&isDiffCuda, sizeof(int)); if (cudaSuccess != cudaGetLastError()) { printf("errorr------------------------------------------\n"); } for (int i = 0; i < M; i++) { cudaMemcpy(devU + i*N, u[i], N * sizeof(double), cudaMemcpyHostToDevice); if (cudaSuccess != cudaGetLastError()) { printf("errorr------------------------------------------\n"); } } for (int i = 0; i < M; i++) { cudaMemcpy(devW + i*N, w[i], N * sizeof(double), cudaMemcpyHostToDevice); if (cudaSuccess != cudaGetLastError()) { printf("errorr------------------------------------------\n"); } } //cudaMemcpy(devU, u, N*M * sizeof(double), cudaMemcpyHostToDevice); int cnt = 0; #if USEPLATE == 1 while (epsilon <= diff) #endif #if USEPLATE == 2 while (isDiff) #endif { cnt++; if (USEPLATE == 1) { cudaMemcpy(epsilonCuda, &zeroDouble, sizeof(double), cudaMemcpyHostToDevice); } else if (USEPLATE == 2) { cudaMemcpy(isDiffCuda, &zeroInt, sizeof(int), cudaMemcpyHostToDevice); } /* Determine the new estimate of the solution at the interior points. The new solution W is the average of north, south, east and west neighbors. */ if (cnt % 2) { /*printf("%d u w\n", cnt);*/ if (USEPLATE == 1) { heated_kernel << < numBlocks, threadsPerBlock >> > (devU, devW, N, M, epsilonCuda); } else if (USEPLATE == 2) { heated_kernel2 << < numBlocks, threadsPerBlock >> > (devU, devW, N, M, epsilon, isDiffCuda); } } else { /*printf("%d w u\n", cnt);*/ if (USEPLATE == 1) { heated_kernel << < numBlocks, threadsPerBlock >> > (devW, devU, N, M, epsilonCuda); } else if (USEPLATE == 2) { heated_kernel2 << < numBlocks, threadsPerBlock >> > (devW, devU, N, M, epsilon, isDiffCuda); } } cudaDeviceSynchronize(); if (cudaSuccess != cudaGetLastError()) { printf("errorr------------------------------------------\n"); } if (USEPLATE == 1) { cudaMemcpy(&diff, epsilonCuda, sizeof(double), cudaMemcpyDeviceToHost); } else if (USEPLATE == 2) { cudaMemcpy(&isDiff, isDiffCuda, sizeof(int), cudaMemcpyDeviceToHost); } //if (cudaSuccess != cudaGetLastError()) { // printf("errorr\n"); //} iterations++; if (iterations == iterations_print) { printf(" %8d %f\n", iterations, diff); iterations_print = 2 * iterations_print; } /*if (iterations == 5) { diff = 0; }*/ } if (cnt % 2) { for (int i = 0; i < M; i++) { cudaMemcpy(w[i], devW + i*N, N * sizeof(double), cudaMemcpyDeviceToHost); if (cudaSuccess != cudaGetLastError()) { printf("errorr\n"); } //printf("sta bre? %d, %llf, %llf, %llf\n", i, w[i][0], w[i][1], w[i][2]); } } else { for (int i = 0; i < M; i++) { cudaMemcpy(w[i], devU + i*N, N * sizeof(double), cudaMemcpyDeviceToHost); if (cudaSuccess != cudaGetLastError()) { printf("errorr\n"); } //printf("sta bru? %d, %llf, %llf, %llf\n", i, w[i][0], w[i][1], w[i][2]); } } printf("\n"); printf(" %8d %f\n", iterations, diff); } int parallel(int argc, char *argv[], Result_Vect *result) { int M; int N; double ctime; double ctime1; double ctime2; double diff; double epsilon; FILE *fp; int i; int iterations; int iterations_print; int j; double mean; char output_file[80]; int success; double **u; double **w; printf("\n\nPARALLEL\n"); if (argc != 5) { printf("Wrong number of arguments!\n"); return 1; } else { success = sscanf(argv[1], "%d", &M); success += sscanf(argv[2], "%d", &N); success += sscanf(argv[3], "%lf", &epsilon); success += sscanf(argv[4], "%s", output_file); if (success != 4) { printf("Wrong arguments!\n"); return 2; } } printf("\n"); printf("HEATED_PLATE\n"); printf(" C version\n"); printf(" A program to solve for the steady state temperature distribution\n"); printf(" over a rectangular plate.\n"); printf("\n"); printf(" Spatial grid of %d by %d points.\n", M, N); printf("\n"); printf(" The iteration will be repeated until the change is <= %f\n", epsilon); diff = epsilon; printf("\n"); printf(" The steady state solution will be written to %s.\n", output_file); u = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) u[i] = (double *)malloc(N * sizeof(double)); w = (double **)malloc(M * sizeof(double*)); for (i = 0; i < M; i++) w[i] = (double *)malloc(N * sizeof(double)); /* Set the boundary values, which don't change. */ for (i = 1; i < M - 1; i++) { w[i][0] = 100.0; } for (i = 1; i < M - 1; i++) { w[i][N - 1] = 100.0; } for (j = 0; j < N; j++) { w[M - 1][j] = 100.0; } for (j = 0; j < N; j++) { w[0][j] = 0.0; } /* Average the boundary values, to come up with a reasonable initial value for the interior. */ mean = 0.0; for (i = 1; i < M - 1; i++) { mean = mean + w[i][0]; } for (i = 1; i < M - 1; i++) { mean = mean + w[i][N - 1]; } for (j = 0; j < N; j++) { mean = mean + w[M - 1][j]; } for (j = 0; j < N; j++) { mean = mean + w[0][j]; } mean = mean / (double)(2 * M + 2 * N - 4); /* Initialize the interior solution to the mean value. */ for (i = 1; i < M - 1; i++) { for (j = 1; j < N - 1; j++) { w[i][j] = mean; } } for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { u[i][j] = w[i][j]; } } /* iterate until the new solution W differs from the old solution U by no more than EPSILON. */ iterations = 0; iterations_print = 1; printf("\n"); printf(" Iteration Change\n"); printf("\n"); ctime1 = cpu_time(); parallel_heated_plate(u, w, N, M, epsilon); //while (epsilon <= diff) //{ // /* // Save the old solution in U. // */ // for (i = 0; i < M; i++) // { // for (j = 0; j < N; j++) // { // u[i][j] = w[i][j]; // } // } // /* // Determine the new estimate of the solution at the interior points. // The new solution W is the average of north, south, east and west neighbors. // */ // diff = 0.0; // for (i = 1; i < M - 1; i++) // { // for (j = 1; j < N - 1; j++) // { // w[i][j] = (u[i - 1][j] + u[i + 1][j] + u[i][j - 1] + u[i][j + 1]) / 4.0; // if (diff < fabs(w[i][j] - u[i][j])) // { // diff = fabs(w[i][j] - u[i][j]); // } // } // } // iterations++; // if (iterations == iterations_print) // { // printf(" %8d %f\n", iterations, diff); // iterations_print = 2 * iterations_print; // } //} ctime2 = cpu_time(); ctime = ctime2 - ctime1; printf("\n"); printf(" Error tolerance achieved.\n"); printf(" CPU time = %f\n", ctime); /* Write the solution to the output file. */ fp = fopen(output_file, "w"); fprintf(fp, "%d\n", M); fprintf(fp, "%d\n", N); result->val_size = M*N; result->value = (double*)malloc(M*N * sizeof(double)); result->time = ctime; for (i = 0; i < M; i++) { //printf("sta brej? %d, %llf, %llf, %llf\n", i, w[i][0], w[i][1], w[i][2]); for (j = 0; j < N; j++) { fprintf(fp, "%6.2f ", w[i][j]); result->value[i*N + j] = w[i][j]; } fputc('\n', fp); } fclose(fp); printf("\n"); printf(" Solution written to the output file %s\n", output_file); /* All done! */ printf("\n"); printf("HEATED_PLATE:\n"); printf(" Normal end of execution.\n"); return 0; } int main(int argc, char * argv[]) { Result_Vect seq_result, par_result; sequential(argc, argv, &seq_result); parallel(argc, argv, &par_result); compare_and_print_vect(seq_result, par_result, "heated plate"); }
c0d0ddea572aeec46418d224632608b1b6d109f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" const unsigned char BLOCK_DIM_X = 16; const unsigned char BLOCK_DIM_Y = 16; __device__ unsigned char GetPixel(unsigned char * src, unsigned int w, unsigned int h, int x, int y, int c) { x = x < 0 ? 0 : x; x = x < w ? x : w - 1; y = y < 0 ? 0 : y; y = y < h ? y : h - 1; return src[3 * (w * y + x) + c]; } __global__ void grayscale_kernel(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { float luma = 0 + 0.2126f * GetPixel(src, w, h, x, y, 0) + 0.7152f * GetPixel(src, w, h, x, y, 1) + 0.0722f * GetPixel(src, w, h, x, y, 2); unsigned char l = (unsigned char)luma; dest[3 * (w * y + x) + 0] = l; dest[3 * (w * y + x) + 1] = l; dest[3 * (w * y + x) + 2] = l; } } __global__ void sobel_kernel(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int c = blockIdx.z; if (x < w && y < h) { /* Sobel X-filter: Sobel Y-filter: -1 0 +1 -1 -2 -1 -2 0 +2 0 0 0 -1 0 +1 +1 +2 +1 */ float Gx = 0 - 1.0 * GetPixel(src, w, h, x - 1, y - 1, c) - 2.0 * GetPixel(src, w, h, x - 1, y + 0, c) - 1.0 * GetPixel(src, w, h, x - 1, y + 1, c) + 1.0 * GetPixel(src, w, h, x + 1, y - 1, c) + 2.0 * GetPixel(src, w, h, x + 1, y + 0, c) + 1.0 * GetPixel(src, w, h, x + 1, y + 1, c); float Gy = 0 - 1.0 * GetPixel(src, w, h, x - 1, y - 1, c) - 2.0 * GetPixel(src, w, h, x + 0, y - 1, c) - 1.0 * GetPixel(src, w, h, x + 1, y - 1, c) + 1.0 * GetPixel(src, w, h, x - 1, y + 1, c) + 2.0 * GetPixel(src, w, h, x + 0, y + 1, c) + 1.0 * GetPixel(src, w, h, x + 1, y + 1, c); float G = sqrt(Gx*Gx + Gy*Gy); dest[3 * (w * y + x) + c] = G > 32 ? 255 : 0; } } __global__ void sobel_kernel_shared(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { int x_block = blockIdx.x * blockDim.x; int y_block = blockIdx.y * blockDim.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int c = blockIdx.z; __shared__ unsigned char pixels[BLOCK_DIM_Y + 2][BLOCK_DIM_X + 2]; for (unsigned int i = blockDim.x * ty + tx; i < (blockDim.x + 2) * (blockDim.y + 2); i += blockDim.x * blockDim.y) { char y_off = i / (blockDim.x + 2); char x_off = i % (blockDim.x + 2); pixels[y_off][x_off] = GetPixel(src, w, h, x_block + x_off - 1, y_block + y_off - 1, c); } __syncthreads(); if (x_block + tx < w && y_block + ty < h) { /* Sobel X-filter: Sobel Y-filter: -1 0 +1 -1 -2 -1 -2 0 +2 0 0 0 -1 0 +1 +1 +2 +1 */ float Gx = 0 - 1.0 * pixels[ty + 0][tx + 0] - 2.0 * pixels[ty + 1][tx + 0] - 1.0 * pixels[ty + 2][tx + 0] + 1.0 * pixels[ty + 0][tx + 2] + 2.0 * pixels[ty + 1][tx + 2] + 1.0 * pixels[ty + 2][tx + 2]; float Gy = 0 - 1.0 * pixels[ty + 0][tx + 0] - 2.0 * pixels[ty + 0][tx + 1] - 1.0 * pixels[ty + 0][tx + 2] + 1.0 * pixels[ty + 2][tx + 0] + 2.0 * pixels[ty + 2][tx + 1] + 1.0 * pixels[ty + 2][tx + 2]; float G = sqrt(Gx*Gx + Gy*Gy); dest[3 * (w * (y_block + ty) + x_block + tx) + c] = G > 32 ? 255 : 0; } } extern "C" void GPUFiltering(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { // dim3 BlockDim1(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim1((w - 1)/BlockDim1.x + 1, (h - 1)/BlockDim1.y + 1, 1); hipLaunchKernelGGL(( grayscale_kernel), dim3(GridDim1), dim3(BlockDim1), 0, 0, src, src, w, h); // dim3 BlockDim2(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim2((w - 1) / BlockDim2.x + 1, (h - 1) / BlockDim2.y + 1, 3); hipLaunchKernelGGL(( sobel_kernel), dim3(GridDim2), dim3(BlockDim2), 0, 0, src, dest, w, h); } extern "C" void GPUFilteringShared(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { // dim3 BlockDim1(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim1((w - 1) / BlockDim1.x + 1, (h - 1) / BlockDim1.y + 1, 1); hipLaunchKernelGGL(( grayscale_kernel) , dim3(GridDim1), dim3(BlockDim1) , 0, 0, src, src, w, h); // dim3 BlockDim2(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim2((w - 1) / BlockDim2.x + 1, (h - 1) / BlockDim2.y + 1, 3); hipLaunchKernelGGL(( sobel_kernel_shared), dim3(GridDim2), dim3(BlockDim2) , 0, 0, src, dest, w, h); }
c0d0ddea572aeec46418d224632608b1b6d109f0.cu
const unsigned char BLOCK_DIM_X = 16; const unsigned char BLOCK_DIM_Y = 16; __device__ unsigned char GetPixel(unsigned char * src, unsigned int w, unsigned int h, int x, int y, int c) { x = x < 0 ? 0 : x; x = x < w ? x : w - 1; y = y < 0 ? 0 : y; y = y < h ? y : h - 1; return src[3 * (w * y + x) + c]; } __global__ void grayscale_kernel(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < h && x < w) { float luma = 0 + 0.2126f * GetPixel(src, w, h, x, y, 0) + 0.7152f * GetPixel(src, w, h, x, y, 1) + 0.0722f * GetPixel(src, w, h, x, y, 2); unsigned char l = (unsigned char)luma; dest[3 * (w * y + x) + 0] = l; dest[3 * (w * y + x) + 1] = l; dest[3 * (w * y + x) + 2] = l; } } __global__ void sobel_kernel(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int c = blockIdx.z; if (x < w && y < h) { /* Sobel X-filter: Sobel Y-filter: -1 0 +1 -1 -2 -1 -2 0 +2 0 0 0 -1 0 +1 +1 +2 +1 */ float Gx = 0 - 1.0 * GetPixel(src, w, h, x - 1, y - 1, c) - 2.0 * GetPixel(src, w, h, x - 1, y + 0, c) - 1.0 * GetPixel(src, w, h, x - 1, y + 1, c) + 1.0 * GetPixel(src, w, h, x + 1, y - 1, c) + 2.0 * GetPixel(src, w, h, x + 1, y + 0, c) + 1.0 * GetPixel(src, w, h, x + 1, y + 1, c); float Gy = 0 - 1.0 * GetPixel(src, w, h, x - 1, y - 1, c) - 2.0 * GetPixel(src, w, h, x + 0, y - 1, c) - 1.0 * GetPixel(src, w, h, x + 1, y - 1, c) + 1.0 * GetPixel(src, w, h, x - 1, y + 1, c) + 2.0 * GetPixel(src, w, h, x + 0, y + 1, c) + 1.0 * GetPixel(src, w, h, x + 1, y + 1, c); float G = sqrt(Gx*Gx + Gy*Gy); dest[3 * (w * y + x) + c] = G > 32 ? 255 : 0; } } __global__ void sobel_kernel_shared(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { int x_block = blockIdx.x * blockDim.x; int y_block = blockIdx.y * blockDim.y; unsigned int tx = threadIdx.x; unsigned int ty = threadIdx.y; unsigned int c = blockIdx.z; __shared__ unsigned char pixels[BLOCK_DIM_Y + 2][BLOCK_DIM_X + 2]; for (unsigned int i = blockDim.x * ty + tx; i < (blockDim.x + 2) * (blockDim.y + 2); i += blockDim.x * blockDim.y) { char y_off = i / (blockDim.x + 2); char x_off = i % (blockDim.x + 2); pixels[y_off][x_off] = GetPixel(src, w, h, x_block + x_off - 1, y_block + y_off - 1, c); } __syncthreads(); if (x_block + tx < w && y_block + ty < h) { /* Sobel X-filter: Sobel Y-filter: -1 0 +1 -1 -2 -1 -2 0 +2 0 0 0 -1 0 +1 +1 +2 +1 */ float Gx = 0 - 1.0 * pixels[ty + 0][tx + 0] - 2.0 * pixels[ty + 1][tx + 0] - 1.0 * pixels[ty + 2][tx + 0] + 1.0 * pixels[ty + 0][tx + 2] + 2.0 * pixels[ty + 1][tx + 2] + 1.0 * pixels[ty + 2][tx + 2]; float Gy = 0 - 1.0 * pixels[ty + 0][tx + 0] - 2.0 * pixels[ty + 0][tx + 1] - 1.0 * pixels[ty + 0][tx + 2] + 1.0 * pixels[ty + 2][tx + 0] + 2.0 * pixels[ty + 2][tx + 1] + 1.0 * pixels[ty + 2][tx + 2]; float G = sqrt(Gx*Gx + Gy*Gy); dest[3 * (w * (y_block + ty) + x_block + tx) + c] = G > 32 ? 255 : 0; } } extern "C" void GPUFiltering(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { // преобразование в оттенки серого dim3 BlockDim1(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim1((w - 1)/BlockDim1.x + 1, (h - 1)/BlockDim1.y + 1, 1); grayscale_kernel<<<GridDim1, BlockDim1>>>(src, src, w, h); // фильтр Собеля dim3 BlockDim2(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim2((w - 1) / BlockDim2.x + 1, (h - 1) / BlockDim2.y + 1, 3); sobel_kernel<<<GridDim2, BlockDim2>>>(src, dest, w, h); } extern "C" void GPUFilteringShared(unsigned char * src, unsigned char * dest, unsigned int w, unsigned int h) { // преобразование в оттенки серого dim3 BlockDim1(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim1((w - 1) / BlockDim1.x + 1, (h - 1) / BlockDim1.y + 1, 1); grayscale_kernel <<<GridDim1, BlockDim1 >>>(src, src, w, h); // фильтр Собеля dim3 BlockDim2(BLOCK_DIM_X, BLOCK_DIM_Y, 1); dim3 GridDim2((w - 1) / BlockDim2.x + 1, (h - 1) / BlockDim2.y + 1, 3); sobel_kernel_shared<<<GridDim2, BlockDim2 >>>(src, dest, w, h); }
4106156bce225f148896fd46e3de8f1b482e36bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> const int N = 32 * 1024; const int Threads_per_block = 256; const int Blocks_per_grid = imin(32, (N + Threads_per_block - 1) / Threads_per_block); // sets to the smallest between 32 and the operation __global__ void dot(float* a, float* b, float* c) { __shared__ float cache[Threads_per_block]; // cache shared per block (each block has one) int id = threadIdx.x + blockIdx.x * blockDim.x; // thread id, moves between blocks int cacheId = threadIdx.x; // cache id, represents one block of the grid float temp = 0; while(id < Blocks_per_grid) { temp += a[id] * b[id]; id += blockDim.x * gridDim.x; } cache[cacheId] = temp; // set cache values __syncthreads(); // synchronize threads in the block int i = blockDim.x / 2; // half the block size while(i != 0) { // each thread adds two of the values in cache, and stores the result back to cache if(cacheId < i) { cache[cacheId] + = cache[cacheId + i]; } __syncthreads(); i /= 2; } if (cacheId == 0) { // the result of every sum of a block is in the first entry of the cache c[blockIdx.x] = cache[0]; } } int main(void) { float *a, *b, *partial_c, c; // CPU variables float *d_a, *d_b, *d_c; // GPU variables // Allocate memory on CPU a = (float *) malloc(sizeof(float) * N); b = (float *) malloc(sizeof(float) * N); c = (float *) malloc(sizeof(float) * Blocks_per_grid); c = 0; // Allocate memory on GPU hipMalloc((void**)&d_a, sizeof(float) * N); hipMalloc((void**)&d_b, sizeof(float) * N); hipMalloc((void**)&d_c, sizeof(float) * Blocks_per_grid); // Fill arrays for( int i = 0; i < N; i++){ a[i] = 1; b[i] = i * 2; } // Copy CPU variables to GPU hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( dot), dim3(Blocks_per_grid), dim3(Threads_per_block), 0, 0, d_a, d_b, d_c); // Copy result matrix from GPU to CPU hipMemcpy(partial_c, d_c, sizeof(float) * N, hipMemcpyDeviceToHost); // Add sum values of all the blocks for(int i = 0; i < Blocks_per_grid; i++){ c += partial_c[i]; } printf("Value calculated: %.6g.\n", c); // free CPU memory free(a); free(b); free(partial_c); // free GPU memory hipFree(d_a); hipFree(d_b); hipFree(d_c); return 0; }
4106156bce225f148896fd46e3de8f1b482e36bc.cu
#include <iostream> const int N = 32 * 1024; const int Threads_per_block = 256; const int Blocks_per_grid = imin(32, (N + Threads_per_block - 1) / Threads_per_block); // sets to the smallest between 32 and the operation __global__ void dot(float* a, float* b, float* c) { __shared__ float cache[Threads_per_block]; // cache shared per block (each block has one) int id = threadIdx.x + blockIdx.x * blockDim.x; // thread id, moves between blocks int cacheId = threadIdx.x; // cache id, represents one block of the grid float temp = 0; while(id < Blocks_per_grid) { temp += a[id] * b[id]; id += blockDim.x * gridDim.x; } cache[cacheId] = temp; // set cache values __syncthreads(); // synchronize threads in the block int i = blockDim.x / 2; // half the block size while(i != 0) { // each thread adds two of the values in cache, and stores the result back to cache if(cacheId < i) { cache[cacheId] + = cache[cacheId + i]; } __syncthreads(); i /= 2; } if (cacheId == 0) { // the result of every sum of a block is in the first entry of the cache c[blockIdx.x] = cache[0]; } } int main(void) { float *a, *b, *partial_c, c; // CPU variables float *d_a, *d_b, *d_c; // GPU variables // Allocate memory on CPU a = (float *) malloc(sizeof(float) * N); b = (float *) malloc(sizeof(float) * N); c = (float *) malloc(sizeof(float) * Blocks_per_grid); c = 0; // Allocate memory on GPU cudaMalloc((void**)&d_a, sizeof(float) * N); cudaMalloc((void**)&d_b, sizeof(float) * N); cudaMalloc((void**)&d_c, sizeof(float) * Blocks_per_grid); // Fill arrays for( int i = 0; i < N; i++){ a[i] = 1; b[i] = i * 2; } // Copy CPU variables to GPU cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); dot<<<Blocks_per_grid, Threads_per_block>>> (d_a, d_b, d_c); // Copy result matrix from GPU to CPU cudaMemcpy(partial_c, d_c, sizeof(float) * N, cudaMemcpyDeviceToHost); // Add sum values of all the blocks for(int i = 0; i < Blocks_per_grid; i++){ c += partial_c[i]; } printf("Value calculated: %.6g.\n", c); // free CPU memory free(a); free(b); free(partial_c); // free GPU memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
fad52a8e499cf66f1f9be157c2e7b0d036d3009f.hip
// !!! This is a file automatically generated by hipify!!! /*! \file subgrid_routines_3D.cu * \brief Definitions of the routines for subgrid gpu staging for 3D CTU. */ #ifdef CUDA #include<stdio.h> #include<stdlib.h> #include<math.h> #include<string.h> #include"global.h" #include"mpi_routines.h" #include"subgrid_routines_3D.h" void sub_dimensions_3D(int nx, int ny, int nz, int n_ghost, int *nx_s, int *ny_s, int *nz_s, int *block1_tot, int *block2_tot, int *block3_tot, int *remainder1, int *remainder2, int *remainder3, int n_fields) { int sx = 2; int sy = 2; int sz = 2; size_t free; size_t total; int cell_mem, max_vol; *nx_s = nx; *ny_s = ny; *nz_s = nz; // determine the amount of free memory available on the device hipMemGetInfo(&free, &total); // use that to determine the maximum subgrid block volume // memory used per cell (arrays allocated on GPU) cell_mem = 11*n_fields*sizeof(Real); cell_mem += 6*sizeof(Real); max_vol = free / cell_mem; // plus a buffer for dti array max_vol = max_vol - 400; // split if necessary - subgrid block volume cannot exceed MAX_VOL_3D // try to keep the ratio of the y & z dimensions close to 1 // do not let the ratio of the geometric mean of the y&z dimensions to the // x dimension exceed 5 - we don't want cubes, but we don't want // REALLY NOT CUBES while ((*nx_s)*(*ny_s)*(*nz_s) > max_vol) { // if the aspect ratio has gotten too large, split in x if ((*nx_s) / sqrt((*ny_s)*(*nz_s)) > 5) { *nx_s = ceil(Real (nx-2*n_ghost) / Real (sx)) + 2*n_ghost; sx++; } else { if (*ny_s > *nz_s) { *ny_s = ceil(Real (ny-2*n_ghost) / Real (sy)) + 2*n_ghost; sy++; } else { *nz_s = ceil(Real (nz-2*n_ghost) / Real (sz)) + 2*n_ghost; sz++; } } } // determine the number of blocks needed // not splitting if (*nx_s == nx && *ny_s == ny && *nz_s == nz) { *block1_tot = 1; *block2_tot = 1; *block3_tot = 1; *remainder1 = 0; *remainder2 = 0; *remainder3 = 0; return; } // splitting in x else if (*nx_s < nx && *ny_s == ny && *nz_s == nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = 1; *block3_tot = 1; // calculate the remainder *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = 0; *remainder3 = 0; } // splitting in y else if (*nx_s == nx && *ny_s < ny && *nz_s == nz) { *block1_tot = 1; *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = 1; // calculate the remainder *remainder1 = 0; *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = 0; } // splitting in z else if (*nx_s == nx && *ny_s == ny && *nz_s < nz) { *block1_tot = 1; *block2_tot = 1; *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainder *remainder1 = 0; *remainder2 = 0; *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } // splitting in x & y else if (*nx_s < nx && *ny_s < ny && *nz_s == nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = 1; // calculate the remainder *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = 0; } // splitting in y & z else if (*nx_s == nx && *ny_s < ny && *nz_s < nz) { *block1_tot = 1; *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainders *remainder1 = 0; *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } // splitting in x & z else if (*nx_s < nx && *ny_s == ny && *nz_s < nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = 1; *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainder *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = 0; *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } // splitting in x, y & z else if (*nx_s < nx && *ny_s < ny && *nz_s < nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainders *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } else { printf("Error determining number and size of subgrid blocks.\n"); exit(0); } } void get_offsets_3D(int nx_s, int ny_s, int nz_s, int n_ghost, int x_off, int y_off, int z_off, int block, int block1_tot, int block2_tot, int block3_tot, int remainder1, int remainder2, int remainder3, int *x_off_s, int *y_off_s, int *z_off_s) { int block1; int block2; int block3; // determine which row of subgrid blocks we're on for each dimension block3 = block / (block2_tot*block1_tot); // zid of current block block2 = (block - block3*block2_tot*block1_tot) / block1_tot; // yid of current block block1 = block - block3*block2_tot*block1_tot - block2*block1_tot; // xid of current block // calculate global offsets *x_off_s = x_off + (nx_s-2*n_ghost)*block1; *y_off_s = y_off + (ny_s-2*n_ghost)*block2; *z_off_s = z_off + (nz_s-2*n_ghost)*block3; // need to be careful on the last block due to remainder offsets if (remainder1 != 0 && block1 == block1_tot-1) *x_off_s = x_off + (nx_s-2*n_ghost)*(block1-1) + remainder1; if (remainder2 != 0 && block2 == block2_tot-1) *y_off_s = y_off + (ny_s-2*n_ghost)*(block2-1) + remainder2; if (remainder3 != 0 && block3 == block3_tot-1) *z_off_s = z_off + (nz_s-2*n_ghost)*(block3-1) + remainder3; } // copy the conserved variable block into the buffer void host_copy_block_3D(int nx, int ny, int nz, int nx_s, int ny_s, int nz_s, int n_ghost, int block, int block1_tot, int block2_tot, int block3_tot, int remainder1, int remainder2, int remainder3, int BLOCK_VOL, Real *host_conserved, Real *buffer, int n_fields) { int n_cells = nx*ny*nz; int block1, block2, block3; int x_offset, y_offset, z_offset; int x_host, y_host, z_host; // if no subgrid blocks, do nothing if (nx_s == nx && ny_s == ny && nz_s == nz) return; // splitting only in x else if (nx_s < nx && ny_s == ny && nz_s == nz) { block1 = block; // xid of block // if we are on the last block, make sure it doesn't go past // the bounds of the host array x_offset = 0; if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } // calculate the x location in the host array to copy from x_host = block1*(nx_s-2*n_ghost) - x_offset; // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int j=0; j<ny_s; j++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + j*nx_s + k*nx_s*ny_s], &host_conserved[x_host + ii*n_cells + j*nx + k*nx*ny], nx_s*sizeof(Real)); } } } return; } // splitting only in y else if (nx_s == nx && ny_s < ny && nz_s == nz) { block2 = block; // yid of block // if we are on the last block, make sure it doesn't go past // the bounds of the host array y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } // calculate the y location in the host array to copy from y_host = block2*nx*(ny_s-2*n_ghost) - nx*y_offset; // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + k*nx_s*ny_s], &host_conserved[y_host + ii*n_cells + k*nx*ny], nx_s*ny_s*sizeof(Real)); } } return; } // splitting only in z else if (nx_s == nx && ny_s == ny && nz_s < nz) { block3 = block; // zid of block // if we are on the last block, make sure it doesn't go past // the bounds of the host array z_offset = 0; if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } // calculate the z location in the host array to copy from z_host = block3*nx*ny*(nz_s-2*n_ghost) - nx*ny*(z_offset); // copy data from host conserved array into buffer for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL], &host_conserved[z_host + ii*n_cells], BLOCK_VOL*sizeof(Real)); } return; } // splitting in y and z else if (nx_s == nx && ny_s < ny && nz_s < nz) { block3 = block / block2_tot; // zid of current block block2 = block - block3*block2_tot; // yid of current block // if we are on the last y block, make sure it doesn't go past // the bounds of the host array y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } // calculate the y location in the host array to copy from y_host = block2*nx*(ny_s-2*n_ghost) - nx*y_offset; // if we are on the last z block, make sure it doesn't go past // the bounds of the host array z_offset = 0; if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } // calculate the z location in the host array to copy from z_host = block3*nx*ny*(nz_s-2*n_ghost) - nx*ny*(z_offset); // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + k*nx_s*ny_s], &host_conserved[z_host + y_host + ii*n_cells + k*nx*ny], nx_s*ny_s*sizeof(Real)); } } return; } // splitting in x, y, and z else if (nx_s < nx && ny_s < ny && nz_s < nz) { block3 = block / (block2_tot*block1_tot); // zid of current block block2 = (block - block3*block2_tot*block1_tot) / block1_tot; // yid of current block block1 = block - block3*block2_tot*block1_tot - block2*block1_tot; // xid of current block // if we are on the last x block, make sure it doesn't go past // the bounds of the host array x_offset = 0; if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } // calculate the x location in the host array to copy from x_host = block1*(nx_s-2*n_ghost) - x_offset; // if we are on the last y block, make sure it doesn't go past // the bounds of the host array y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } // calculate the y location in the host array to copy from y_host = block2*nx*(ny_s-2*n_ghost) - nx*y_offset; // if we are on the last z block, make sure it doesn't go past // the bounds of the host array z_offset = 0; if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } // calculate the z location in the host array to copy from z_host = block3*nx*ny*(nz_s-2*n_ghost) - nx*ny*(z_offset); // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int j=0; j<ny_s; j++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + j*nx_s + k*nx_s*ny_s], &host_conserved[x_host + y_host + z_host + ii*n_cells + j*nx + k*nx*ny], nx_s*sizeof(Real)); } } } return; } else { printf("Error copying into buffer. Unsupported grid dimensions.\n"); printf("nx: %d nx_s: %d ny: %d ny_s: %d nz: %d nz_s: %d.\n", nx, nx_s, ny, ny_s, nz, nz_s); exit(0); } } // return the values from buffer to the host_conserved array void host_return_block_3D(int nx, int ny, int nz, int nx_s, int ny_s, int nz_s, int n_ghost, int block, int block1_tot, int block2_tot, int block3_tot, int remainder1, int remainder2, int remainder3, int BLOCK_VOL, Real *host_conserved, Real *buffer, int n_fields) { int n_cells = nx*ny*nz; int block1, block2, block3; int x_offset, y_offset, z_offset; int x_host, y_host, z_host, x_gpu, y_gpu, z_gpu, host_loc, gpu_loc; int length, hid, gid; // if no subgrid blocks, do nothing if (nx_s == nx && ny_s == ny && nz_s == nz) return; // splitting only in x else if (nx_s < nx && ny_s == ny && nz_s == nz) { // return values based on current block id block1 = block; // if we just did the last x block, make sure to copy the cells to the right place x_offset = 0; if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } x_host = block1*(nx_s-2*n_ghost) + (n_ghost-x_offset); y_host = n_ghost*nx; z_host = n_ghost*nx*ny; host_loc = x_host + y_host + z_host; x_gpu = n_ghost; y_gpu = n_ghost*nx_s; z_gpu = n_ghost*nx_s*ny_s; gpu_loc = x_gpu + y_gpu + z_gpu; length = (nx_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { for (int j=0; j<ny_s-2*n_ghost; j++) { hid = j*nx + k*nx*ny; gid = j*nx_s + k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } } return; } // splitting only in y else if (nx_s == nx && ny_s < ny && nz_s == nz) { // return values based on current block id block2 = block; // if we just did the last slice, make sure to copy the cells to the right place y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } y_host = block2*nx*(ny_s-2*n_ghost) + nx*(n_ghost-y_offset); z_host = n_ghost*nx*ny; host_loc = y_host + z_host; y_gpu = n_ghost*nx_s; z_gpu = n_ghost*nx_s*ny_s; gpu_loc = y_gpu + z_gpu; length = nx_s*(ny_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { hid = k*nx*ny; gid = k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } return; } // splitting only in z else if (nx_s == nx && ny_s == ny && nz_s < nz) { // return values based on current block id block3 = block; z_offset = 0; // if we just did the last slice, make sure to copy the cells to the right place if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } z_host = block3*nx*ny*(nz_s-2*n_ghost) + nx*ny*(n_ghost-z_offset); z_gpu = n_ghost*nx_s*ny_s; length = nx_s*ny_s*(nz_s-2*n_ghost); // number of cells to copy back for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[z_host + ii*n_cells], &buffer[z_gpu + ii*BLOCK_VOL], length*sizeof(Real)); } return; } // splitting in y and z else if (nx_s == nx && ny_s < ny && nz_s < nz) { // return values based on current block id block3 = block / block2_tot; // zid of current block block2 = block - block3*block2_tot; // yid of current block z_offset = 0; // if we just did the z last slice, make sure to copy the cells to the right place if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } y_offset = 0; // if we just did the y last slice, make sure to copy the cells to the right place if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } z_host = block3*nx*ny*(nz_s-2*n_ghost) + nx*ny*(n_ghost-z_offset); y_host = block2*nx*(ny_s-2*n_ghost) + nx*(n_ghost-y_offset); host_loc = y_host + z_host; z_gpu = n_ghost*nx_s*ny_s; y_gpu = n_ghost*nx_s; gpu_loc = y_gpu + z_gpu; length = nx_s*(ny_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { hid = k*nx*ny; gid = k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } return; } // splitting in x, y, and z else if (nx_s < nx && ny_s < ny && nz_s < nz) { // return values based on current block id block3 = block / (block2_tot*block1_tot); // zid of current block block2 = (block - block3*block2_tot*block1_tot) / block1_tot; // yid of current block block1 = block - block3*block2_tot*block1_tot - block2*block1_tot; // xid of current block z_offset = 0; // if we just did the z last slice, make sure to copy the cells to the right place if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } y_offset = 0; // if we just did the y last slice, make sure to copy the cells to the right place if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } x_offset = 0; // if we just did the x last slice, make sure to copy the cells to the right place if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } z_host = block3*nx*ny*(nz_s-2*n_ghost) + nx*ny*(n_ghost-z_offset); y_host = block2*nx*(ny_s-2*n_ghost) + nx*(n_ghost-y_offset); x_host = block1*(nx_s-2*n_ghost) + (n_ghost-x_offset); host_loc = x_host + y_host + z_host; z_gpu = n_ghost*nx_s*ny_s; y_gpu = n_ghost*nx_s; x_gpu = n_ghost; gpu_loc = x_gpu + y_gpu + z_gpu; length = (nx_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { for (int j=0; j<ny_s-2*n_ghost; j++) { hid = j*nx + k*nx*ny; gid = j*nx_s + k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } } return; } else { printf("Error returning values to host. Unsupported grid dimensions.\n"); printf("nx: %d nx_s: %d ny: %d ny_s: %d nz: %d nz_s: %d.\n", nx, nx_s, ny, ny_s, nz, nz_s); exit(0); } } #endif //CUDA
fad52a8e499cf66f1f9be157c2e7b0d036d3009f.cu
/*! \file subgrid_routines_3D.cu * \brief Definitions of the routines for subgrid gpu staging for 3D CTU. */ #ifdef CUDA #include<stdio.h> #include<stdlib.h> #include<math.h> #include<string.h> #include"global.h" #include"mpi_routines.h" #include"subgrid_routines_3D.h" void sub_dimensions_3D(int nx, int ny, int nz, int n_ghost, int *nx_s, int *ny_s, int *nz_s, int *block1_tot, int *block2_tot, int *block3_tot, int *remainder1, int *remainder2, int *remainder3, int n_fields) { int sx = 2; int sy = 2; int sz = 2; size_t free; size_t total; int cell_mem, max_vol; *nx_s = nx; *ny_s = ny; *nz_s = nz; // determine the amount of free memory available on the device cudaMemGetInfo(&free, &total); // use that to determine the maximum subgrid block volume // memory used per cell (arrays allocated on GPU) cell_mem = 11*n_fields*sizeof(Real); cell_mem += 6*sizeof(Real); max_vol = free / cell_mem; // plus a buffer for dti array max_vol = max_vol - 400; // split if necessary - subgrid block volume cannot exceed MAX_VOL_3D // try to keep the ratio of the y & z dimensions close to 1 // do not let the ratio of the geometric mean of the y&z dimensions to the // x dimension exceed 5 - we don't want cubes, but we don't want // REALLY NOT CUBES while ((*nx_s)*(*ny_s)*(*nz_s) > max_vol) { // if the aspect ratio has gotten too large, split in x if ((*nx_s) / sqrt((*ny_s)*(*nz_s)) > 5) { *nx_s = ceil(Real (nx-2*n_ghost) / Real (sx)) + 2*n_ghost; sx++; } else { if (*ny_s > *nz_s) { *ny_s = ceil(Real (ny-2*n_ghost) / Real (sy)) + 2*n_ghost; sy++; } else { *nz_s = ceil(Real (nz-2*n_ghost) / Real (sz)) + 2*n_ghost; sz++; } } } // determine the number of blocks needed // not splitting if (*nx_s == nx && *ny_s == ny && *nz_s == nz) { *block1_tot = 1; *block2_tot = 1; *block3_tot = 1; *remainder1 = 0; *remainder2 = 0; *remainder3 = 0; return; } // splitting in x else if (*nx_s < nx && *ny_s == ny && *nz_s == nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = 1; *block3_tot = 1; // calculate the remainder *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = 0; *remainder3 = 0; } // splitting in y else if (*nx_s == nx && *ny_s < ny && *nz_s == nz) { *block1_tot = 1; *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = 1; // calculate the remainder *remainder1 = 0; *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = 0; } // splitting in z else if (*nx_s == nx && *ny_s == ny && *nz_s < nz) { *block1_tot = 1; *block2_tot = 1; *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainder *remainder1 = 0; *remainder2 = 0; *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } // splitting in x & y else if (*nx_s < nx && *ny_s < ny && *nz_s == nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = 1; // calculate the remainder *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = 0; } // splitting in y & z else if (*nx_s == nx && *ny_s < ny && *nz_s < nz) { *block1_tot = 1; *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainders *remainder1 = 0; *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } // splitting in x & z else if (*nx_s < nx && *ny_s == ny && *nz_s < nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = 1; *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainder *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = 0; *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } // splitting in x, y & z else if (*nx_s < nx && *ny_s < ny && *nz_s < nz) { *block1_tot = ceil(Real (nx-2*n_ghost) / Real (*nx_s-2*n_ghost) ); *block2_tot = ceil(Real (ny-2*n_ghost) / Real (*ny_s-2*n_ghost) ); *block3_tot = ceil(Real (nz-2*n_ghost) / Real (*nz_s-2*n_ghost) ); // calculate the remainders *remainder1 = (nx-2*n_ghost)%(*nx_s-2*n_ghost); *remainder2 = (ny-2*n_ghost)%(*ny_s-2*n_ghost); *remainder3 = (nz-2*n_ghost)%(*nz_s-2*n_ghost); } else { printf("Error determining number and size of subgrid blocks.\n"); exit(0); } } void get_offsets_3D(int nx_s, int ny_s, int nz_s, int n_ghost, int x_off, int y_off, int z_off, int block, int block1_tot, int block2_tot, int block3_tot, int remainder1, int remainder2, int remainder3, int *x_off_s, int *y_off_s, int *z_off_s) { int block1; int block2; int block3; // determine which row of subgrid blocks we're on for each dimension block3 = block / (block2_tot*block1_tot); // zid of current block block2 = (block - block3*block2_tot*block1_tot) / block1_tot; // yid of current block block1 = block - block3*block2_tot*block1_tot - block2*block1_tot; // xid of current block // calculate global offsets *x_off_s = x_off + (nx_s-2*n_ghost)*block1; *y_off_s = y_off + (ny_s-2*n_ghost)*block2; *z_off_s = z_off + (nz_s-2*n_ghost)*block3; // need to be careful on the last block due to remainder offsets if (remainder1 != 0 && block1 == block1_tot-1) *x_off_s = x_off + (nx_s-2*n_ghost)*(block1-1) + remainder1; if (remainder2 != 0 && block2 == block2_tot-1) *y_off_s = y_off + (ny_s-2*n_ghost)*(block2-1) + remainder2; if (remainder3 != 0 && block3 == block3_tot-1) *z_off_s = z_off + (nz_s-2*n_ghost)*(block3-1) + remainder3; } // copy the conserved variable block into the buffer void host_copy_block_3D(int nx, int ny, int nz, int nx_s, int ny_s, int nz_s, int n_ghost, int block, int block1_tot, int block2_tot, int block3_tot, int remainder1, int remainder2, int remainder3, int BLOCK_VOL, Real *host_conserved, Real *buffer, int n_fields) { int n_cells = nx*ny*nz; int block1, block2, block3; int x_offset, y_offset, z_offset; int x_host, y_host, z_host; // if no subgrid blocks, do nothing if (nx_s == nx && ny_s == ny && nz_s == nz) return; // splitting only in x else if (nx_s < nx && ny_s == ny && nz_s == nz) { block1 = block; // xid of block // if we are on the last block, make sure it doesn't go past // the bounds of the host array x_offset = 0; if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } // calculate the x location in the host array to copy from x_host = block1*(nx_s-2*n_ghost) - x_offset; // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int j=0; j<ny_s; j++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + j*nx_s + k*nx_s*ny_s], &host_conserved[x_host + ii*n_cells + j*nx + k*nx*ny], nx_s*sizeof(Real)); } } } return; } // splitting only in y else if (nx_s == nx && ny_s < ny && nz_s == nz) { block2 = block; // yid of block // if we are on the last block, make sure it doesn't go past // the bounds of the host array y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } // calculate the y location in the host array to copy from y_host = block2*nx*(ny_s-2*n_ghost) - nx*y_offset; // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + k*nx_s*ny_s], &host_conserved[y_host + ii*n_cells + k*nx*ny], nx_s*ny_s*sizeof(Real)); } } return; } // splitting only in z else if (nx_s == nx && ny_s == ny && nz_s < nz) { block3 = block; // zid of block // if we are on the last block, make sure it doesn't go past // the bounds of the host array z_offset = 0; if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } // calculate the z location in the host array to copy from z_host = block3*nx*ny*(nz_s-2*n_ghost) - nx*ny*(z_offset); // copy data from host conserved array into buffer for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL], &host_conserved[z_host + ii*n_cells], BLOCK_VOL*sizeof(Real)); } return; } // splitting in y and z else if (nx_s == nx && ny_s < ny && nz_s < nz) { block3 = block / block2_tot; // zid of current block block2 = block - block3*block2_tot; // yid of current block // if we are on the last y block, make sure it doesn't go past // the bounds of the host array y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } // calculate the y location in the host array to copy from y_host = block2*nx*(ny_s-2*n_ghost) - nx*y_offset; // if we are on the last z block, make sure it doesn't go past // the bounds of the host array z_offset = 0; if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } // calculate the z location in the host array to copy from z_host = block3*nx*ny*(nz_s-2*n_ghost) - nx*ny*(z_offset); // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + k*nx_s*ny_s], &host_conserved[z_host + y_host + ii*n_cells + k*nx*ny], nx_s*ny_s*sizeof(Real)); } } return; } // splitting in x, y, and z else if (nx_s < nx && ny_s < ny && nz_s < nz) { block3 = block / (block2_tot*block1_tot); // zid of current block block2 = (block - block3*block2_tot*block1_tot) / block1_tot; // yid of current block block1 = block - block3*block2_tot*block1_tot - block2*block1_tot; // xid of current block // if we are on the last x block, make sure it doesn't go past // the bounds of the host array x_offset = 0; if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } // calculate the x location in the host array to copy from x_host = block1*(nx_s-2*n_ghost) - x_offset; // if we are on the last y block, make sure it doesn't go past // the bounds of the host array y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } // calculate the y location in the host array to copy from y_host = block2*nx*(ny_s-2*n_ghost) - nx*y_offset; // if we are on the last z block, make sure it doesn't go past // the bounds of the host array z_offset = 0; if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } // calculate the z location in the host array to copy from z_host = block3*nx*ny*(nz_s-2*n_ghost) - nx*ny*(z_offset); // copy data from host conserved array into buffer for (int k=0; k<nz_s; k++) { for (int j=0; j<ny_s; j++) { for (int ii=0; ii<n_fields; ii++) { memcpy(&buffer[ii*BLOCK_VOL + j*nx_s + k*nx_s*ny_s], &host_conserved[x_host + y_host + z_host + ii*n_cells + j*nx + k*nx*ny], nx_s*sizeof(Real)); } } } return; } else { printf("Error copying into buffer. Unsupported grid dimensions.\n"); printf("nx: %d nx_s: %d ny: %d ny_s: %d nz: %d nz_s: %d.\n", nx, nx_s, ny, ny_s, nz, nz_s); exit(0); } } // return the values from buffer to the host_conserved array void host_return_block_3D(int nx, int ny, int nz, int nx_s, int ny_s, int nz_s, int n_ghost, int block, int block1_tot, int block2_tot, int block3_tot, int remainder1, int remainder2, int remainder3, int BLOCK_VOL, Real *host_conserved, Real *buffer, int n_fields) { int n_cells = nx*ny*nz; int block1, block2, block3; int x_offset, y_offset, z_offset; int x_host, y_host, z_host, x_gpu, y_gpu, z_gpu, host_loc, gpu_loc; int length, hid, gid; // if no subgrid blocks, do nothing if (nx_s == nx && ny_s == ny && nz_s == nz) return; // splitting only in x else if (nx_s < nx && ny_s == ny && nz_s == nz) { // return values based on current block id block1 = block; // if we just did the last x block, make sure to copy the cells to the right place x_offset = 0; if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } x_host = block1*(nx_s-2*n_ghost) + (n_ghost-x_offset); y_host = n_ghost*nx; z_host = n_ghost*nx*ny; host_loc = x_host + y_host + z_host; x_gpu = n_ghost; y_gpu = n_ghost*nx_s; z_gpu = n_ghost*nx_s*ny_s; gpu_loc = x_gpu + y_gpu + z_gpu; length = (nx_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { for (int j=0; j<ny_s-2*n_ghost; j++) { hid = j*nx + k*nx*ny; gid = j*nx_s + k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } } return; } // splitting only in y else if (nx_s == nx && ny_s < ny && nz_s == nz) { // return values based on current block id block2 = block; // if we just did the last slice, make sure to copy the cells to the right place y_offset = 0; if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } y_host = block2*nx*(ny_s-2*n_ghost) + nx*(n_ghost-y_offset); z_host = n_ghost*nx*ny; host_loc = y_host + z_host; y_gpu = n_ghost*nx_s; z_gpu = n_ghost*nx_s*ny_s; gpu_loc = y_gpu + z_gpu; length = nx_s*(ny_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { hid = k*nx*ny; gid = k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } return; } // splitting only in z else if (nx_s == nx && ny_s == ny && nz_s < nz) { // return values based on current block id block3 = block; z_offset = 0; // if we just did the last slice, make sure to copy the cells to the right place if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } z_host = block3*nx*ny*(nz_s-2*n_ghost) + nx*ny*(n_ghost-z_offset); z_gpu = n_ghost*nx_s*ny_s; length = nx_s*ny_s*(nz_s-2*n_ghost); // number of cells to copy back for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[z_host + ii*n_cells], &buffer[z_gpu + ii*BLOCK_VOL], length*sizeof(Real)); } return; } // splitting in y and z else if (nx_s == nx && ny_s < ny && nz_s < nz) { // return values based on current block id block3 = block / block2_tot; // zid of current block block2 = block - block3*block2_tot; // yid of current block z_offset = 0; // if we just did the z last slice, make sure to copy the cells to the right place if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } y_offset = 0; // if we just did the y last slice, make sure to copy the cells to the right place if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } z_host = block3*nx*ny*(nz_s-2*n_ghost) + nx*ny*(n_ghost-z_offset); y_host = block2*nx*(ny_s-2*n_ghost) + nx*(n_ghost-y_offset); host_loc = y_host + z_host; z_gpu = n_ghost*nx_s*ny_s; y_gpu = n_ghost*nx_s; gpu_loc = y_gpu + z_gpu; length = nx_s*(ny_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { hid = k*nx*ny; gid = k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } return; } // splitting in x, y, and z else if (nx_s < nx && ny_s < ny && nz_s < nz) { // return values based on current block id block3 = block / (block2_tot*block1_tot); // zid of current block block2 = (block - block3*block2_tot*block1_tot) / block1_tot; // yid of current block block1 = block - block3*block2_tot*block1_tot - block2*block1_tot; // xid of current block z_offset = 0; // if we just did the z last slice, make sure to copy the cells to the right place if (block3 == block3_tot-1 && remainder3 != 0) { z_offset = nz_s - 2*n_ghost - remainder3; } y_offset = 0; // if we just did the y last slice, make sure to copy the cells to the right place if (block2 == block2_tot-1 && remainder2 != 0) { y_offset = ny_s - 2*n_ghost - remainder2; } x_offset = 0; // if we just did the x last slice, make sure to copy the cells to the right place if (block1 == block1_tot-1 && remainder1 != 0) { x_offset = nx_s - 2*n_ghost - remainder1; } z_host = block3*nx*ny*(nz_s-2*n_ghost) + nx*ny*(n_ghost-z_offset); y_host = block2*nx*(ny_s-2*n_ghost) + nx*(n_ghost-y_offset); x_host = block1*(nx_s-2*n_ghost) + (n_ghost-x_offset); host_loc = x_host + y_host + z_host; z_gpu = n_ghost*nx_s*ny_s; y_gpu = n_ghost*nx_s; x_gpu = n_ghost; gpu_loc = x_gpu + y_gpu + z_gpu; length = (nx_s-2*n_ghost); // number of cells to copy back for (int k=0; k<nz_s-2*n_ghost; k++) { for (int j=0; j<ny_s-2*n_ghost; j++) { hid = j*nx + k*nx*ny; gid = j*nx_s + k*nx_s*ny_s; for (int ii=0; ii<n_fields; ii++) { memcpy(&host_conserved[host_loc + hid + ii*n_cells], &buffer[gpu_loc + gid + ii*BLOCK_VOL], length*sizeof(Real)); } } } return; } else { printf("Error returning values to host. Unsupported grid dimensions.\n"); printf("nx: %d nx_s: %d ny: %d ny_s: %d nz: %d nz_s: %d.\n", nx, nx_s, ny, ny_s, nz, nz_s); exit(0); } } #endif //CUDA
630b84b468be345ff1ec91162d726aa477636cad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../../include/error_util.h" #include "lbp_min_sum_kernel.cuh" #include "util.cuh" // ============================================================================ // CUDA KERNELS // ============================================================================ __global__ void lbp_cuda_forward_kernel_reduction_min_sum( KernelData cost, KernelData5 jump, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, const unsigned short x_in, const unsigned short direction, int shared_mem_offset, unsigned short delta) { unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; unsigned short x = 0; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; // y = y; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } unsigned int n = 0; float edgeWeight = edges(n, direction, y, x); // write to shared memory // compute message for every label sdata[tid] = cost(n, y, x, c); // add costs from all neighbors if(direction != RIGHT) { sdata[tid] += messages(n, RIGHT, y, x, c); } if(direction != LEFT) { sdata[tid] += messages(n, LEFT, y, x, c); } if(direction != UP) { sdata[tid] += messages(n, UP, y, x, c); } if(direction != DOWN) { sdata[tid] += messages(n, DOWN, y, x, c); } float h = sdata[tid]; __syncthreads(); // save h in shared mem sdata[tid] = h; sdata[tid + shared_mem_offset] = static_cast<float>(c); __syncthreads(); // if delta is larger or equal than this threshold use old version as it is a little faster int old_version_threshold = C; float msg = 0.0; int msg_argmin = 0; // if there is no truncation use old version if(delta >= old_version_threshold) { //FULL VERSION ///////////////////// sdata[tid] = h; __syncthreads(); msg = max_float; //minVal + jump(0, 0, 0, jump.size3 - 1) * edgeWeight; msg_argmin = 0; for(unsigned short label = 0; label < C; ++label) { // compute min in local var to avoid global mem writes float jump_cost = getJumpCost(c, label, jump, n, direction, y, x); float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump_cost * edgeWeight; msg = fminf(msg, new_msg); if(msg == new_msg) { msg_argmin = label; } } __syncthreads(); ///////////////// } else { float L2 = jump(n, direction, jump.size2 - 1, y, x); float offset = jump(n, direction, 0, y, x); unsigned short start = max(static_cast<unsigned short>(round(c - (delta - 1) + offset - 1)), 0); unsigned short stop = min(static_cast<unsigned short>(round(c + delta + offset + 1)), C); //TRUNC SPEED UP VERSION /////////////////////////////////// for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { //min parallel reduction float min_val = sdata[tid]; float min_label = sdata[tid + shared_mem_offset]; if(sdata[tid + s] <= sdata[tid]) { min_val = sdata[tid + s]; min_label = sdata[shared_mem_offset + tid + s]; } //min val parallel reduction sdata[tid] = min_val; //argmin prallel reduction sdata[shared_mem_offset + tid] = min_label; } __syncthreads(); } float min_h = sdata[threadIdx.x * blockDim.y]; int argmin_h = sdata[shared_mem_offset + threadIdx.x * blockDim.y]; __syncthreads(); msg = min_h + L2 * edgeWeight; msg_argmin = static_cast<int>(argmin_h); sdata[tid] = h; __syncthreads(); for(unsigned short label = start; label < stop; ++label) { // compute min in local var to avoid global mem writes float jump_cost = getJumpCost(c, label, jump, n, direction, y, x); float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump_cost * edgeWeight; if(new_msg <= msg) { msg = new_msg; msg_argmin = label; } } __syncthreads(); ///////////////////////////// } // compute normalization with 2nd reduction sdata[tid] = (float)exp((double)msg); __syncthreads(); for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // normalize message double sum_exp = max((double)sdata[blockDim.y * threadIdx.x], 1e-45); float logSumExp = (float)log(sum_exp); // if(sum_exp < 1e-10) // { // printf("sum exp zero: %f , logsumexp: %f msg: %f \n", sum_exp, msg); // } //float logSumExp = 0.0; if(direction == RIGHT) { messages(n, LEFT, y, x+1, c) = msg - logSumExp; messages_argmin(n, LEFT, y, x+1, c) = msg_argmin; message_scale(n, LEFT, y, x+1) = sum_exp; } if(direction == LEFT) { messages(n, RIGHT, y, x-1, c) = msg - logSumExp; messages_argmin(n, RIGHT, y, x-1, c) = msg_argmin; message_scale(n, RIGHT, y, x-1) = sum_exp; } if(direction == UP) { messages(n, DOWN, y-1, x, c) = msg - logSumExp; messages_argmin(n, DOWN, y-1, x, c) = msg_argmin; message_scale(n, DOWN, y-1, x) = sum_exp; } if(direction == DOWN) { messages(n, UP, y+1, x, c) = msg - logSumExp; messages_argmin(n, UP, y+1, x, c) = msg_argmin; message_scale(n, UP, y+1, x) = sum_exp; } } __global__ void lbp_cuda_backward_kernel_reduction_min_sum( KernelData cost, KernelData5 jump, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, KernelData5 in_grad, KernelData gradient_unary, KernelData5 gradient_pairwise, KernelData gradient_edge, KernelData gradient_accumulation, KernelData gradient_accumulation_tmp, KernelData5 saved_prev_grad_msg, const unsigned short x_in, const unsigned short direction, bool compute_cross, const unsigned int n) { //initialize utility variables unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; //unsigned int n = 0; unsigned int x; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } //calc backward message short prev_row_shift = 0; short prev_col_shift = 0; if(direction == LEFT) { prev_row_shift = 0; prev_col_shift = 1; } if(direction == RIGHT) { prev_row_shift = 0; prev_col_shift = -1; } if(direction == DOWN) { prev_row_shift = -1; prev_col_shift = 0; } if(direction == UP) { prev_row_shift = 1; prev_col_shift = 0; } int grad_xy_idx = 0; if(direction == UP) { grad_xy_idx = DOWN; } if(direction == DOWN) { grad_xy_idx = UP; } if(direction == LEFT) { grad_xy_idx = RIGHT; } if(direction == RIGHT) { grad_xy_idx = LEFT; } float edgeWeight = edges(n, grad_xy_idx, y, x); int HOR_IDX = 0; int UP_IDX = 1; int DOWN_IDX = 2; ///////////////////////in_grad normalization //////////////////////////////////////////// float original_message_val = messages(n, direction, y + prev_row_shift, x + prev_col_shift, c) + log(message_scale(n, direction, y + prev_row_shift, x + prev_col_shift)); float message_exp_sum = message_scale(n, direction, y + prev_row_shift, x + prev_col_shift); sdata[tid] = in_grad(n, direction, y + prev_row_shift, x + prev_col_shift, c); __syncthreads(); float in_grad_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //printf("tid %i label %i norm msg val %f \n", tid, label, norm_msg_val); //in_grad is in sdata in_grad_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////acc normalization //////////////////////////////////////////// sdata[tid] = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); __syncthreads(); float acc_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //in_grad is in sdata acc_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////////////// int min_index = (int)messages_argmin(n, direction, y + prev_row_shift, x + prev_col_shift, c); //int jump_idx = getJumpCostPos(c, min_index, jump.size2); float jump_cost = getJumpCost(c, min_index, jump, n, grad_xy_idx, y, x); float additive_hor = in_grad_normalized + acc_normalized; float additive_up = 0.0; float additive_down = 0.0; if(compute_cross) { additive_up = saved_prev_grad_msg(n, UP, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, UP_IDX); additive_down = saved_prev_grad_msg(n, DOWN, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, DOWN_IDX); } // so that gradient_acc is not changed before assigning __syncthreads(); //unary gradient atomicAdd(&gradient_unary(n, y, x, min_index), additive_hor); atomicAdd(&gradient_unary(n, y, x, min_index), additive_up); atomicAdd(&gradient_unary(n, y, x, min_index), additive_down); //pairwise gradient addGradientJump(c, min_index, jump, n, direction, y, x, gradient_pairwise, grad_xy_idx, edgeWeight * additive_hor, edgeWeight * additive_up, edgeWeight * additive_down); //edge gradient atomicAdd(&gradient_edge(n, grad_xy_idx, y, x), jump_cost * additive_hor); atomicAdd(&gradient_edge(n, grad_xy_idx, y, x), jump_cost * additive_up); atomicAdd(&gradient_edge(n, grad_xy_idx, y, x), jump_cost * additive_down); updateGradientAcc(gradient_accumulation_tmp, additive_hor, direction, n, y, x, min_index, HOR_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_up, direction, n, y, x, min_index, UP_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_down, direction, n, y, x, min_index, DOWN_IDX); __syncthreads(); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, HOR_IDX), direction, n, y, x, c, HOR_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, UP_IDX), direction, n, y, x, c, UP_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, DOWN_IDX), direction, n, y, x, c, DOWN_IDX); __syncthreads(); saved_prev_grad_msg(n, direction, y, x, c) = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); } // ============================================================================ // CPP KERNEL CALLS // ============================================================================ namespace cuda { std::vector<at::Tensor> lbp_reduction_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); //int max_iter = 2; auto options = at::TensorOptions(cost.options()); // at::Tensor messages = at::zeros({N, 4, H, W, C}, options); at::Tensor messages_argmin = at::zeros({N, 4, H, W, C}, options); at::Tensor message_scale = at::zeros({N, 4, H, W}, options); //cost = cost.permute({0, 2, 3, 1}).contiguous(); // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(::min(powf(2.0f, ::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(::ceil(H / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(::ceil(W / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); if(numBlocksLR.y != 1) std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1" << std::endl; const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; // to Right for(unsigned short x = 0; x < W - 1; ++x) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, x, RIGHT, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } // to LEFT for(unsigned short x = W - 1; x > 0; --x) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, x, LEFT, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } // to DOWN for(unsigned short y = 0; y < H - 1; ++y) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, y, DOWN, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } // to UP for(unsigned short y = H - 1; y > 0; --y) { // compute min messages hipLaunchKernelGGL(( lbp_cuda_forward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), 2 * threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, y, UP, threadsPerBlock, delta); cudaSafeCall(hipGetLastError()); } //auto beliefs = messages.sum({1}) + cost; std::vector<at::Tensor> output_vec; output_vec.push_back(messages); output_vec.push_back(messages_argmin); output_vec.push_back(message_scale); return output_vec; } std::vector<at::Tensor> lbp_forward_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { return lbp_reduction_min_sum(cost, jump, edge, messages, delta); } //============================================================================= // BACKWARD //============================================================================= std::vector<at::Tensor> lbp_backward_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor in_grad, at::Tensor messages, at::Tensor messages_argmin, at::Tensor message_scale) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); auto options = at::TensorOptions(cost.options()); at::Tensor gradient_unary = at::zeros({N, H, W, C}, options); at::Tensor gradient_pairwise = at::zeros({1, 4, jump.size(2), H, W}, options); at::Tensor gradient_edge = at::zeros({N, 4, H, W}, options); at::Tensor gradient_messages = at::zeros({N, 4, H, W, C}, options); gradient_messages += in_grad; at::Tensor saved_prev_grad_msg = at::zeros({N, 4, H, W, C}, options); at::Tensor gradient_accumulation; // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(::min(powf(2.0f, ::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(::ceil(H / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(::ceil(W / static_cast<float>(blockSize.x)), ::ceil(C / static_cast<float>(blockSize.y))); //printf("blockDimC %i \n", blockDimC); //printf("blockDimHW %i \n", blockDimHW); if(numBlocksLR.y != 1) std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1===" << numBlocksLR.y << std::endl; const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; const float max_float = 1e15; for(int n = 0; n < N; ++n) { ////////////////////UNARY GRADIENT//////////////////////////// //to DOWN gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = 1; y < H; ++y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, DOWN, false, n); cudaSafeCall(hipGetLastError()); } // to UP gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = H - 2; y >= 0; --y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksUD), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, UP, false, n); cudaSafeCall(hipGetLastError()); } // to LEFT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = W-2; x >= 0; --x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, LEFT, true, n); cudaSafeCall(hipGetLastError()); } // to RIGHT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = 1; x < W; ++x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); hipLaunchKernelGGL(( lbp_cuda_backward_kernel_reduction_min_sum), dim3(numBlocksLR), dim3(blockSize), threadsPerBlock * sizeof(float), 0, cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, RIGHT, true, n); cudaSafeCall(hipGetLastError()); } } std::vector<at::Tensor> output_vec; output_vec.push_back(gradient_unary); output_vec.push_back(gradient_pairwise); output_vec.push_back(gradient_edge); output_vec.push_back(gradient_messages); return output_vec; } }
630b84b468be345ff1ec91162d726aa477636cad.cu
#include "../../include/error_util.h" #include "lbp_min_sum_kernel.cuh" #include "util.cuh" // ============================================================================ // CUDA KERNELS // ============================================================================ __global__ void lbp_cuda_forward_kernel_reduction_min_sum( KernelData cost, KernelData5 jump, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, const unsigned short x_in, const unsigned short direction, int shared_mem_offset, unsigned short delta) { unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; unsigned short x = 0; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; // y = y; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } unsigned int n = 0; float edgeWeight = edges(n, direction, y, x); // write to shared memory // compute message for every label sdata[tid] = cost(n, y, x, c); // add costs from all neighbors if(direction != RIGHT) { sdata[tid] += messages(n, RIGHT, y, x, c); } if(direction != LEFT) { sdata[tid] += messages(n, LEFT, y, x, c); } if(direction != UP) { sdata[tid] += messages(n, UP, y, x, c); } if(direction != DOWN) { sdata[tid] += messages(n, DOWN, y, x, c); } float h = sdata[tid]; __syncthreads(); // save h in shared mem sdata[tid] = h; sdata[tid + shared_mem_offset] = static_cast<float>(c); __syncthreads(); // if delta is larger or equal than this threshold use old version as it is a little faster int old_version_threshold = C; float msg = 0.0; int msg_argmin = 0; // if there is no truncation use old version if(delta >= old_version_threshold) { //FULL VERSION ///////////////////// sdata[tid] = h; __syncthreads(); msg = max_float; //minVal + jump(0, 0, 0, jump.size3 - 1) * edgeWeight; msg_argmin = 0; for(unsigned short label = 0; label < C; ++label) { // compute min in local var to avoid global mem writes float jump_cost = getJumpCost(c, label, jump, n, direction, y, x); float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump_cost * edgeWeight; msg = fminf(msg, new_msg); if(msg == new_msg) { msg_argmin = label; } } __syncthreads(); ///////////////// } else { float L2 = jump(n, direction, jump.size2 - 1, y, x); float offset = jump(n, direction, 0, y, x); unsigned short start = max(static_cast<unsigned short>(round(c - (delta - 1) + offset - 1)), 0); unsigned short stop = min(static_cast<unsigned short>(round(c + delta + offset + 1)), C); //TRUNC SPEED UP VERSION /////////////////////////////////// for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { //min parallel reduction float min_val = sdata[tid]; float min_label = sdata[tid + shared_mem_offset]; if(sdata[tid + s] <= sdata[tid]) { min_val = sdata[tid + s]; min_label = sdata[shared_mem_offset + tid + s]; } //min val parallel reduction sdata[tid] = min_val; //argmin prallel reduction sdata[shared_mem_offset + tid] = min_label; } __syncthreads(); } float min_h = sdata[threadIdx.x * blockDim.y]; int argmin_h = sdata[shared_mem_offset + threadIdx.x * blockDim.y]; __syncthreads(); msg = min_h + L2 * edgeWeight; msg_argmin = static_cast<int>(argmin_h); sdata[tid] = h; __syncthreads(); for(unsigned short label = start; label < stop; ++label) { // compute min in local var to avoid global mem writes float jump_cost = getJumpCost(c, label, jump, n, direction, y, x); float new_msg = sdata[label + blockDim.y * threadIdx.x] + jump_cost * edgeWeight; if(new_msg <= msg) { msg = new_msg; msg_argmin = label; } } __syncthreads(); ///////////////////////////// } // compute normalization with 2nd reduction sdata[tid] = (float)exp((double)msg); __syncthreads(); for(unsigned int s=blockDim.y / 2; s > 0; s>>=1) { if(tid - (threadIdx.x * blockDim.y) < s && tid + s < (threadIdx.x * blockDim.y) + C) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // normalize message double sum_exp = max((double)sdata[blockDim.y * threadIdx.x], 1e-45); float logSumExp = (float)log(sum_exp); // if(sum_exp < 1e-10) // { // printf("sum exp zero: %f , logsumexp: %f msg: %f \n", sum_exp, msg); // } //float logSumExp = 0.0; if(direction == RIGHT) { messages(n, LEFT, y, x+1, c) = msg - logSumExp; messages_argmin(n, LEFT, y, x+1, c) = msg_argmin; message_scale(n, LEFT, y, x+1) = sum_exp; } if(direction == LEFT) { messages(n, RIGHT, y, x-1, c) = msg - logSumExp; messages_argmin(n, RIGHT, y, x-1, c) = msg_argmin; message_scale(n, RIGHT, y, x-1) = sum_exp; } if(direction == UP) { messages(n, DOWN, y-1, x, c) = msg - logSumExp; messages_argmin(n, DOWN, y-1, x, c) = msg_argmin; message_scale(n, DOWN, y-1, x) = sum_exp; } if(direction == DOWN) { messages(n, UP, y+1, x, c) = msg - logSumExp; messages_argmin(n, UP, y+1, x, c) = msg_argmin; message_scale(n, UP, y+1, x) = sum_exp; } } __global__ void lbp_cuda_backward_kernel_reduction_min_sum( KernelData cost, KernelData5 jump, KernelData edges, KernelData5 messages, KernelData5 messages_argmin, KernelData message_scale, KernelData5 in_grad, KernelData gradient_unary, KernelData5 gradient_pairwise, KernelData gradient_edge, KernelData gradient_accumulation, KernelData gradient_accumulation_tmp, KernelData5 saved_prev_grad_msg, const unsigned short x_in, const unsigned short direction, bool compute_cross, const unsigned int n) { //initialize utility variables unsigned short y = blockIdx.x * blockDim.x + threadIdx.x; const unsigned short c = blockIdx.y * blockDim.y + threadIdx.y; //unsigned int n = 0; unsigned int x; if(direction == UP || direction == DOWN) { x = y; y = x_in; } else { x = x_in; } // shared memory h extern __shared__ float sdata[]; // message size is N x 4 x H x W x C // cost size is N x H x W x C // edges: N x 1 x H x W // jumps: 1 x 1 x H x W const short N = cost.size0; const short H = cost.size1; const short W = cost.size2; const short C = cost.size3; const unsigned int tid = threadIdx.y + blockDim.y * threadIdx.x; const float max_float = 1e15; // check inside image if(c >= C || x >= W || y >= H) { // write large number that will never win sdata[tid] = max_float; return; } //calc backward message short prev_row_shift = 0; short prev_col_shift = 0; if(direction == LEFT) { prev_row_shift = 0; prev_col_shift = 1; } if(direction == RIGHT) { prev_row_shift = 0; prev_col_shift = -1; } if(direction == DOWN) { prev_row_shift = -1; prev_col_shift = 0; } if(direction == UP) { prev_row_shift = 1; prev_col_shift = 0; } int grad_xy_idx = 0; if(direction == UP) { grad_xy_idx = DOWN; } if(direction == DOWN) { grad_xy_idx = UP; } if(direction == LEFT) { grad_xy_idx = RIGHT; } if(direction == RIGHT) { grad_xy_idx = LEFT; } float edgeWeight = edges(n, grad_xy_idx, y, x); int HOR_IDX = 0; int UP_IDX = 1; int DOWN_IDX = 2; ///////////////////////in_grad normalization //////////////////////////////////////////// float original_message_val = messages(n, direction, y + prev_row_shift, x + prev_col_shift, c) + log(message_scale(n, direction, y + prev_row_shift, x + prev_col_shift)); float message_exp_sum = message_scale(n, direction, y + prev_row_shift, x + prev_col_shift); sdata[tid] = in_grad(n, direction, y + prev_row_shift, x + prev_col_shift, c); __syncthreads(); float in_grad_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //printf("tid %i label %i norm msg val %f \n", tid, label, norm_msg_val); //in_grad is in sdata in_grad_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////acc normalization //////////////////////////////////////////// sdata[tid] = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); __syncthreads(); float acc_normalized = 0.0; // normalization for(unsigned short label = 0; label < C; ++label) { float J_norm_factor = - (1.0 / message_exp_sum) * exp(original_message_val); if(c == label) { J_norm_factor = 1.0 - (1.0 / message_exp_sum) * exp(original_message_val); } //in_grad is in sdata acc_normalized += sdata[label + blockDim.y * threadIdx.x] * J_norm_factor; } __syncthreads(); ///////////////////////////////// int min_index = (int)messages_argmin(n, direction, y + prev_row_shift, x + prev_col_shift, c); //int jump_idx = getJumpCostPos(c, min_index, jump.size2); float jump_cost = getJumpCost(c, min_index, jump, n, grad_xy_idx, y, x); float additive_hor = in_grad_normalized + acc_normalized; float additive_up = 0.0; float additive_down = 0.0; if(compute_cross) { additive_up = saved_prev_grad_msg(n, UP, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, UP_IDX); additive_down = saved_prev_grad_msg(n, DOWN, y + prev_row_shift, x + prev_col_shift, c) + getGradientAcc(gradient_accumulation, direction, n, y, x, c, DOWN_IDX); } // so that gradient_acc is not changed before assigning __syncthreads(); //unary gradient atomicAdd(&gradient_unary(n, y, x, min_index), additive_hor); atomicAdd(&gradient_unary(n, y, x, min_index), additive_up); atomicAdd(&gradient_unary(n, y, x, min_index), additive_down); //pairwise gradient addGradientJump(c, min_index, jump, n, direction, y, x, gradient_pairwise, grad_xy_idx, edgeWeight * additive_hor, edgeWeight * additive_up, edgeWeight * additive_down); //edge gradient atomicAdd(&gradient_edge(n, grad_xy_idx, y, x), jump_cost * additive_hor); atomicAdd(&gradient_edge(n, grad_xy_idx, y, x), jump_cost * additive_up); atomicAdd(&gradient_edge(n, grad_xy_idx, y, x), jump_cost * additive_down); updateGradientAcc(gradient_accumulation_tmp, additive_hor, direction, n, y, x, min_index, HOR_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_up, direction, n, y, x, min_index, UP_IDX); updateGradientAcc(gradient_accumulation_tmp, additive_down, direction, n, y, x, min_index, DOWN_IDX); __syncthreads(); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, HOR_IDX), direction, n, y, x, c, HOR_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, UP_IDX), direction, n, y, x, c, UP_IDX); setGradientAcc(gradient_accumulation, getGradientAcc(gradient_accumulation_tmp, direction, n, y, x, c, DOWN_IDX), direction, n, y, x, c, DOWN_IDX); __syncthreads(); saved_prev_grad_msg(n, direction, y, x, c) = getGradientAcc(gradient_accumulation, direction, n, y, x, c, HOR_IDX); } // ============================================================================ // CPP KERNEL CALLS // ============================================================================ namespace cuda { std::vector<at::Tensor> lbp_reduction_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); //int max_iter = 2; auto options = at::TensorOptions(cost.options()); // at::Tensor messages = at::zeros({N, 4, H, W, C}, options); at::Tensor messages_argmin = at::zeros({N, 4, H, W, C}, options); at::Tensor message_scale = at::zeros({N, 4, H, W}, options); //cost = cost.permute({0, 2, 3, 1}).contiguous(); // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(std::min(powf(2.0f, std::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(std::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(std::ceil(H / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(std::ceil(W / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); if(numBlocksLR.y != 1) std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1" << std::endl; const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; // to Right for(unsigned short x = 0; x < W - 1; ++x) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, x, RIGHT, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } // to LEFT for(unsigned short x = W - 1; x > 0; --x) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, x, LEFT, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } // to DOWN for(unsigned short y = 0; y < H - 1; ++y) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, y, DOWN, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } // to UP for(unsigned short y = H - 1; y > 0; --y) { // compute min messages lbp_cuda_forward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, 2 * threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, y, UP, threadsPerBlock, delta); cudaSafeCall(cudaGetLastError()); } //auto beliefs = messages.sum({1}) + cost; std::vector<at::Tensor> output_vec; output_vec.push_back(messages); output_vec.push_back(messages_argmin); output_vec.push_back(message_scale); return output_vec; } std::vector<at::Tensor> lbp_forward_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor messages, unsigned short delta) { return lbp_reduction_min_sum(cost, jump, edge, messages, delta); } //============================================================================= // BACKWARD //============================================================================= std::vector<at::Tensor> lbp_backward_min_sum(at::Tensor cost, at::Tensor jump, at::Tensor edge, at::Tensor in_grad, at::Tensor messages, at::Tensor messages_argmin, at::Tensor message_scale) { int N = cost.size(0); int H = cost.size(1); int W = cost.size(2); int C = cost.size(3); auto options = at::TensorOptions(cost.options()); at::Tensor gradient_unary = at::zeros({N, H, W, C}, options); at::Tensor gradient_pairwise = at::zeros({1, 4, jump.size(2), H, W}, options); at::Tensor gradient_edge = at::zeros({N, 4, H, W}, options); at::Tensor gradient_messages = at::zeros({N, 4, H, W, C}, options); gradient_messages += in_grad; at::Tensor saved_prev_grad_msg = at::zeros({N, 4, H, W, C}, options); at::Tensor gradient_accumulation; // parallelize over image rows and disparities // block-size in disparity dimension must be >= number of disparities // then all the synchronization can be done over blocks (fast) // otherwise global synchronization is necessary int blockDimC = static_cast<int>(std::min(powf(2.0f, std::ceil(log2f(C))), 1024.0f)); int blockDimHW = static_cast<int>(std::max(static_cast<float>(1024.0f / blockDimC / 1.0f), 1.0f)); // attention: 1024 is maximal number of threads per block!! const dim3 blockSize(blockDimHW, blockDimC); const dim3 numBlocksLR(std::ceil(H / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); const dim3 numBlocksUD(std::ceil(W / static_cast<float>(blockSize.x)), std::ceil(C / static_cast<float>(blockSize.y))); //printf("blockDimC %i \n", blockDimC); //printf("blockDimHW %i \n", blockDimHW); if(numBlocksLR.y != 1) std::cout << "SOMETHING IS WRONG: Blocksize over disps is not 1===" << numBlocksLR.y << std::endl; const int threadsPerBlock = blockSize.x * blockSize.y * blockSize.z; const float max_float = 1e15; for(int n = 0; n < N; ++n) { ////////////////////UNARY GRADIENT//////////////////////////// //to DOWN gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = 1; y < H; ++y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, DOWN, false, n); cudaSafeCall(cudaGetLastError()); } // to UP gradient_accumulation = at::zeros({N, W, 3, C}, options); for(short y = H - 2; y >= 0; --y) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksUD, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, y, UP, false, n); cudaSafeCall(cudaGetLastError()); } // to LEFT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = W-2; x >= 0; --x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, LEFT, true, n); cudaSafeCall(cudaGetLastError()); } // to RIGHT gradient_accumulation = at::zeros({N, H, 3, C}, options); for(short x = 1; x < W; ++x) { // compute min messages at::Tensor gradient_accumulation_tmp = at::zeros({N, W, 3, C}, options); lbp_cuda_backward_kernel_reduction_min_sum<<<numBlocksLR, blockSize, threadsPerBlock * sizeof(float)>>>(cost, jump, edge, messages, messages_argmin, message_scale, in_grad, gradient_unary, gradient_pairwise, gradient_edge, gradient_accumulation, gradient_accumulation_tmp, saved_prev_grad_msg, x, RIGHT, true, n); cudaSafeCall(cudaGetLastError()); } } std::vector<at::Tensor> output_vec; output_vec.push_back(gradient_unary); output_vec.push_back(gradient_pairwise); output_vec.push_back(gradient_edge); output_vec.push_back(gradient_messages); return output_vec; } }
9d7a7afd424de95b37399226f94ad037d23c70d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_runtime.h" #include "vec_kernels.cuh" #include "math.h" #include "data.h" #include <cstddef> #include <cstdlib> #include <cassert> #include <iostream> #include <ctime> #define BLOCKSIZE 1024 #define GRIDSIZE(d) (((d) + ((BLOCKSIZE) - 1)) / (BLOCKSIZE)) extern "C" { void fit(double *X, double *y, double *theta, double lr, size_t m, size_t n, size_t n_iter, bool from_py) { double *Xt, *Xd, *yd, *thetad; hipMalloc(&Xt, sizeof(double) * n * m); hipMalloc(&Xd, sizeof(double) * n * m); hipMemcpy(Xd, X, sizeof(double) * n * m, hipMemcpyHostToDevice); hipMalloc(&yd, sizeof(double) * m); hipMemcpy(yd, y, sizeof(double) * m, hipMemcpyHostToDevice); hipMalloc(&thetad, sizeof(double) * n); hipMemcpy(thetad, theta, sizeof(double) * n, hipMemcpyHostToDevice); double *z, *h, *g; hipMalloc(&z, sizeof(double) * m); hipMalloc(&h, sizeof(double) * m); hipMalloc(&g, sizeof(double) * n); if (from_py) { hipMemcpy(Xt, X, sizeof(double) * m * n, hipMemcpyHostToDevice); hipLaunchKernelGGL(( mat_transpose), dim3(GRIDSIZE(m*n)), dim3(BLOCKSIZE), 0, 0, Xd, Xd, n, m); hipDeviceSynchronize(); } else { hipLaunchKernelGGL(( mat_transpose), dim3(GRIDSIZE(m*n)), dim3(BLOCKSIZE), 0, 0, Xd, Xt, m, n); hipDeviceSynchronize(); } for (size_t i = 0; i < n_iter; i++) { // dot(X, theta) hipLaunchKernelGGL(( vec_dot_mat), dim3(GRIDSIZE(m)), dim3(BLOCKSIZE), 0, 0, Xd, thetad, z, m, n); // h = sigm(z) hipLaunchKernelGGL(( vec_sigmoid), dim3(GRIDSIZE(m)), dim3(BLOCKSIZE), 0, 0, z, h, 1, m); // h = -h hipLaunchKernelGGL(( vec_scalar_mul), dim3(GRIDSIZE(m)), dim3(BLOCKSIZE), 0, 0, h, h, -1.0, 1, m); // h = y - h hipLaunchKernelGGL(( vec_add), dim3(GRIDSIZE(m)), dim3(BLOCKSIZE), 0, 0, h, yd, h, 1, m); // h = -(y - h) = h - y hipLaunchKernelGGL(( vec_scalar_mul), dim3(GRIDSIZE(m)), dim3(BLOCKSIZE), 0, 0, h, h, -1.0, 1, m); // g = dot(Xt, h) hipLaunchKernelGGL(( vec_dot_mat), dim3(GRIDSIZE(n)), dim3(BLOCKSIZE), 0, 0, Xt, h, g, n, m); // g = -(g*lr) / m hipLaunchKernelGGL(( vec_scalar_mul), dim3(GRIDSIZE(n)), dim3(BLOCKSIZE), 0, 0, g, g, -(lr / m), 1, n); // theta = theta + (-g) = theta - g hipLaunchKernelGGL(( vec_add), dim3(GRIDSIZE(n)), dim3(BLOCKSIZE), 0, 0, thetad, g, thetad, 1, n); } hipDeviceSynchronize(); hipFree(z); hipFree(h); hipFree(g); hipFree(Xd); hipFree(Xt); hipFree(yd); hipMemcpy(theta, thetad, sizeof(double) * n, hipMemcpyDeviceToHost); hipFree(thetad); } void predict_proba(double *X, double *theta, double *y, size_t m, size_t n) { double *yd; double *Xd; double *thetad; hipMallocManaged(&yd, sizeof(double) * m); hipMalloc(&Xd, sizeof(double) * m * n); hipMalloc(&thetad, sizeof(double) * n); hipMemcpy((void*) Xd, (void*) X, sizeof(double) * m * n, hipMemcpyHostToDevice); hipMemcpy((void*) thetad, (void*) theta, sizeof(double) * n, hipMemcpyHostToDevice); //MatrixMul<<<m, n>>>(Xd, thetad, yd, n, m, 1, n); hipLaunchKernelGGL(( vec_dot_mat), dim3(GRIDSIZE(m)), dim3(BLOCKSIZE), 0, 0, Xd, thetad, yd, m, n); hipDeviceSynchronize(); hipLaunchKernelGGL(( vec_sigmoid), dim3(GRIDSIZE(m)), dim3(BLOCKSIZE), 0, 0, yd, yd, 1, m); hipDeviceSynchronize(); hipMemcpy((void*) y, yd, sizeof(double) * m, hipMemcpyDeviceToHost); hipFree(Xd); hipFree(thetad); hipFree(yd); } } int main(void) { // Testing with the house_data dataset. label is y >= y.mean() int m = 21613, n = 8; double *X = (double*) malloc(sizeof(double) * m * n); double *y = (double*) malloc(sizeof(double) * m); double *theta = (double*) malloc(sizeof(double) * n); // Copy into memory memcpy(X, X_house, sizeof(double) * 21613 * 8); memcpy(y, y_house, sizeof(double) * 21613); // Call host function for fit fit(X, y, theta, 0.01, m, n, 100, false); printf("Theta after 100 iterations: "); for (int i = 0; i < n; i++) { printf("%f, ", theta[i]); } printf("\n"); printf("TEST: Asserting coeffs against known-good values. "); for (int i = 0; i < n; i++) { assert(abs(theta[i] - known_theta[i]) < 0.01); } printf("PASSED.\n"); double *yt = (double*) malloc(sizeof(double) * 21613); predict_proba(X, theta, yt, 21613, 8); int miss = 0; for (int i = 0; i < m; i++) { if (abs(yt[i] - known_yt[i]) > 0.01) miss++; } printf("TEST: %d of %d labels differ from known-good logit.\n", miss, m); printf("TEST: Scaling m (observations) (CSV):\n\nm,cpu_time\n"); size_t local_m = 21; for (int i = 0; i < 4; i++) { clock_t start = clock(); fit(X, y, theta, 0.01, local_m, n, 100, false); clock_t end = clock(); printf("%d,%f\n", local_m, ((double) (end - start)) / CLOCKS_PER_SEC); local_m *= 10; } free(X); free(y); free(theta); free(yt); }
9d7a7afd424de95b37399226f94ad037d23c70d5.cu
#include "cuda_runtime.h" #include "vec_kernels.cuh" #include "math.h" #include "data.h" #include <cstddef> #include <cstdlib> #include <cassert> #include <iostream> #include <ctime> #define BLOCKSIZE 1024 #define GRIDSIZE(d) (((d) + ((BLOCKSIZE) - 1)) / (BLOCKSIZE)) extern "C" { void fit(double *X, double *y, double *theta, double lr, size_t m, size_t n, size_t n_iter, bool from_py) { double *Xt, *Xd, *yd, *thetad; cudaMalloc(&Xt, sizeof(double) * n * m); cudaMalloc(&Xd, sizeof(double) * n * m); cudaMemcpy(Xd, X, sizeof(double) * n * m, cudaMemcpyHostToDevice); cudaMalloc(&yd, sizeof(double) * m); cudaMemcpy(yd, y, sizeof(double) * m, cudaMemcpyHostToDevice); cudaMalloc(&thetad, sizeof(double) * n); cudaMemcpy(thetad, theta, sizeof(double) * n, cudaMemcpyHostToDevice); double *z, *h, *g; cudaMalloc(&z, sizeof(double) * m); cudaMalloc(&h, sizeof(double) * m); cudaMalloc(&g, sizeof(double) * n); if (from_py) { cudaMemcpy(Xt, X, sizeof(double) * m * n, cudaMemcpyHostToDevice); mat_transpose<<<GRIDSIZE(m*n), BLOCKSIZE>>>(Xd, Xd, n, m); cudaDeviceSynchronize(); } else { mat_transpose<<<GRIDSIZE(m*n), BLOCKSIZE>>>(Xd, Xt, m, n); cudaDeviceSynchronize(); } for (size_t i = 0; i < n_iter; i++) { // dot(X, theta) vec_dot_mat<<<GRIDSIZE(m), BLOCKSIZE>>>(Xd, thetad, z, m, n); // h = sigm(z) vec_sigmoid<<<GRIDSIZE(m), BLOCKSIZE>>>(z, h, 1, m); // h = -h vec_scalar_mul<<<GRIDSIZE(m), BLOCKSIZE>>>(h, h, -1.0, 1, m); // h = y - h vec_add<<<GRIDSIZE(m), BLOCKSIZE>>>(h, yd, h, 1, m); // h = -(y - h) = h - y vec_scalar_mul<<<GRIDSIZE(m), BLOCKSIZE>>>(h, h, -1.0, 1, m); // g = dot(Xt, h) vec_dot_mat<<<GRIDSIZE(n), BLOCKSIZE>>>(Xt, h, g, n, m); // g = -(g*lr) / m vec_scalar_mul<<<GRIDSIZE(n), BLOCKSIZE>>>(g, g, -(lr / m), 1, n); // theta = theta + (-g) = theta - g vec_add<<<GRIDSIZE(n), BLOCKSIZE>>>(thetad, g, thetad, 1, n); } cudaDeviceSynchronize(); cudaFree(z); cudaFree(h); cudaFree(g); cudaFree(Xd); cudaFree(Xt); cudaFree(yd); cudaMemcpy(theta, thetad, sizeof(double) * n, cudaMemcpyDeviceToHost); cudaFree(thetad); } void predict_proba(double *X, double *theta, double *y, size_t m, size_t n) { double *yd; double *Xd; double *thetad; cudaMallocManaged(&yd, sizeof(double) * m); cudaMalloc(&Xd, sizeof(double) * m * n); cudaMalloc(&thetad, sizeof(double) * n); cudaMemcpy((void*) Xd, (void*) X, sizeof(double) * m * n, cudaMemcpyHostToDevice); cudaMemcpy((void*) thetad, (void*) theta, sizeof(double) * n, cudaMemcpyHostToDevice); //MatrixMul<<<m, n>>>(Xd, thetad, yd, n, m, 1, n); vec_dot_mat<<<GRIDSIZE(m), BLOCKSIZE>>>(Xd, thetad, yd, m, n); cudaDeviceSynchronize(); vec_sigmoid<<<GRIDSIZE(m), BLOCKSIZE>>>(yd, yd, 1, m); cudaDeviceSynchronize(); cudaMemcpy((void*) y, yd, sizeof(double) * m, cudaMemcpyDeviceToHost); cudaFree(Xd); cudaFree(thetad); cudaFree(yd); } } int main(void) { // Testing with the house_data dataset. label is y >= y.mean() int m = 21613, n = 8; double *X = (double*) malloc(sizeof(double) * m * n); double *y = (double*) malloc(sizeof(double) * m); double *theta = (double*) malloc(sizeof(double) * n); // Copy into memory memcpy(X, X_house, sizeof(double) * 21613 * 8); memcpy(y, y_house, sizeof(double) * 21613); // Call host function for fit fit(X, y, theta, 0.01, m, n, 100, false); printf("Theta after 100 iterations: "); for (int i = 0; i < n; i++) { printf("%f, ", theta[i]); } printf("\n"); printf("TEST: Asserting coeffs against known-good values. "); for (int i = 0; i < n; i++) { assert(abs(theta[i] - known_theta[i]) < 0.01); } printf("PASSED.\n"); double *yt = (double*) malloc(sizeof(double) * 21613); predict_proba(X, theta, yt, 21613, 8); int miss = 0; for (int i = 0; i < m; i++) { if (abs(yt[i] - known_yt[i]) > 0.01) miss++; } printf("TEST: %d of %d labels differ from known-good logit.\n", miss, m); printf("TEST: Scaling m (observations) (CSV):\n\nm,cpu_time\n"); size_t local_m = 21; for (int i = 0; i < 4; i++) { clock_t start = clock(); fit(X, y, theta, 0.01, local_m, n, 100, false); clock_t end = clock(); printf("%d,%f\n", local_m, ((double) (end - start)) / CLOCKS_PER_SEC); local_m *= 10; } free(X); free(y); free(theta); free(yt); }
e09759e8a2ff3646ce438a259e88b7136fb5ac39.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //============================================================================ // UPDATE //============================================================================ // 14 APR 2011 Lukasz G. Szafaryn #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <stdbool.h> // (in path known to compiler) needed by true/false #include <omp.h> // Helper functions #include "helper_cuda.h" #include "helper_string.h" #include "half.hpp" #include <hip/hip_fp16.h> #ifdef LOGS #include "log_helper.h" #endif //============================================================================= // DEFINE / INCLUDE //============================================================================= #define NUMBER_PAR_PER_BOX 192 // keep this low to allow more blocks that share shared memory to run concurrently, code does not work for larger than 110, more speedup can be achieved with larger number and no shared memory used #define NUMBER_THREADS 192 // this should be roughly equal to NUMBER_PAR_PER_BOX for best performance // STABLE #define DOT(A,B) ((A.x)*(B.x)+(A.y)*(B.y)+(A.z)*(B.z)) #define H_DOT(A,B) (__hfma((A.x), (B.x), __hfma((A.y), (B.y), __hmul((A.z), (B.z))))) #define H2_DOT(A,B) (__hfma2((A.x), (B.x), __hfma2((A.y), (B.y), __hmul2((A.z), (B.z))))) //============================================================================= // STRUCTURES //============================================================================= typedef struct { half x, y, z; } THREE_VECTOR; typedef struct { half v, x, y, z; } FOUR_VECTOR; typedef struct { half2 x, y, z; } THREE_H2_VECTOR; typedef struct { half2 v, x, y, z; } FOUR_H2_VECTOR; typedef struct nei_str { // neighbor box int x, y, z; int number; long offset; } nei_str; typedef struct box_str { // home box int x, y, z; int number; long offset; // neighbor boxes int nn; nei_str nei[26]; } box_str; typedef struct par_str { half alpha; } par_str; typedef struct dim_str { // input arguments int cur_arg; int arch_arg; int cores_arg; int boxes1d_arg; // system memory long number_boxes; long box_mem; long space_elem; long space_mem; long space_mem2; } dim_str; // Returns the current system time in microseconds /*long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; }*/ void usage(int argc, char** argv) { printf("Usage: %s -boxes=N [-generate] [-input_distances=<path>] [-input_charges=<path>] [-output_gold=<path>] [-iterations=N] [-streams=N] [-debug] [-verbose]\n", argv[0]); } void getParams(int argc, char** argv, int *boxes, int *generate, char **input_distances, char **input_charges, char **output_gold, int *iterations, int *verbose, int *fault_injection, int *nstreams) { if (argc<2) { usage(argc, argv); exit(EXIT_FAILURE); } *generate = 0; *iterations = 1000000; *nstreams = 1; *fault_injection = 0; *verbose = 0; if (checkCmdLineFlag(argc, (const char **)argv, "boxes")) { *boxes = getCmdLineArgumentInt(argc, (const char **)argv, "boxes"); if (*boxes <= 0) { printf("Invalid input size given on the command-line: %d\n", *boxes); exit(EXIT_FAILURE); } } else { usage(argc, argv); exit(EXIT_FAILURE); } if (checkCmdLineFlag(argc, (const char **)argv, "generate")) { *generate = 1; printf(">> Output will be written to file. Only stream #0 output will be considered.\n"); } if (checkCmdLineFlag(argc, (const char **)argv, "input_distances")) { getCmdLineArgumentString(argc, (const char **)argv, "input_distances", input_distances); } else { *input_distances = new char[100]; snprintf(*input_distances, 100, "hlava_distances_%i", *boxes); printf("Using default input_distances path: %s\n", *input_distances); } if (checkCmdLineFlag(argc, (const char **)argv, "input_charges")) { getCmdLineArgumentString(argc, (const char **)argv, "input_charges", input_charges); } else { *input_charges = new char[100]; snprintf(*input_charges, 100, "hlava_charges_%i", *boxes); printf("Using default input_charges path: %s\n", *input_charges); } if (checkCmdLineFlag(argc, (const char **)argv, "output_gold")) { getCmdLineArgumentString(argc, (const char **)argv, "output_gold", output_gold); } else { *output_gold = new char[100]; snprintf(*output_gold, 100, "hlava_gold_%i", *boxes); printf("Using default output_gold path: %s\n", *output_gold); } if (checkCmdLineFlag(argc, (const char **)argv, "iterations")) { *iterations = getCmdLineArgumentInt(argc, (const char **)argv, "iterations"); } if (checkCmdLineFlag(argc, (const char **)argv, "streams")) { *nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "streams"); } if (checkCmdLineFlag(argc, (const char **)argv, "verbose")) { *verbose = 1; } if (checkCmdLineFlag(argc, (const char **)argv, "debug")) { *fault_injection = 1; printf("!! Will be injected an input error\n"); } } //----------------------------------------------------------------------------- // plasmaKernel_gpu_2 //----------------------------------------------------------------------------- __global__ void kernel_gpu_cuda(par_str d_par_gpu, dim_str d_dim_gpu, box_str* d_box_gpu, FOUR_VECTOR* d_rv_gpu, half* d_qv_gpu, FOUR_VECTOR* d_fv_gpu) { //--------------------------------------------------------------------- // THREAD PARAMETERS //--------------------------------------------------------------------- int bx = blockIdx.x; // get current horizontal block index (0-n) int tx = threadIdx.x; // get current horizontal thread index (0-n) int wtx = tx; //--------------------------------------------------------------------- // DO FOR THE NUMBER OF BOXES //--------------------------------------------------------------------- if(bx<d_dim_gpu.number_boxes) { //------------------------------------------------------------- // Extract input parameters //------------------------------------------------------------- // parameters half a2 = __hmul(__hmul(__float2half(2.0), d_par_gpu.alpha), d_par_gpu.alpha); half2 h2_a2 = __half2half2(a2); // home box int first_i; FOUR_VECTOR* rA; FOUR_VECTOR* fA; FOUR_H2_VECTOR h2_fA[200]; __shared__ FOUR_H2_VECTOR h2_rA_shared[200]; // nei box int pointer; int k = 0; int first_j; FOUR_VECTOR* rB; half* qB; int j = 0; __shared__ FOUR_H2_VECTOR h2_rB_shared[100]; __shared__ half2 h2_qB_shared[100]; // common half2 r2; half2 u2; half2 vij; half2 fs; half2 fxij; half2 fyij; half2 fzij; THREE_H2_VECTOR d; //------------------------------------------------------------- // Home box //------------------------------------------------------------- //------------------------------------------------------------- // Setup parameters //------------------------------------------------------------- // home box - box parameters first_i = d_box_gpu[bx].offset; // home box - distance, force, charge and type parameters rA = &d_rv_gpu[first_i]; fA = &d_fv_gpu[first_i]; //------------------------------------------------------------- // Copy to shared memory //------------------------------------------------------------- // home box - shared memory - INCLUDES HALF2 transformation -redundant- on shared memory while(wtx<NUMBER_PAR_PER_BOX) { h2_rA_shared[wtx].x = __half2half2(rA[wtx].x); h2_rA_shared[wtx].y = __half2half2(rA[wtx].y); h2_rA_shared[wtx].z = __half2half2(rA[wtx].z); h2_rA_shared[wtx].v = __half2half2(rA[wtx].v); h2_fA[wtx].x = __half2half2(fA[wtx].x); h2_fA[wtx].y = __half2half2(fA[wtx].y); h2_fA[wtx].z = __half2half2(fA[wtx].z); h2_fA[wtx].v = __half2half2(fA[wtx].v); wtx = wtx + NUMBER_THREADS; } wtx = tx; // synchronize threads - not needed, but just to be safe __syncthreads(); //------------------------------------------------------------- // nei box loop //------------------------------------------------------------- // loop over neiing boxes of home box for (k=0; k<(1+d_box_gpu[bx].nn); k++) { //--------------------------------------------- // nei box - get pointer to the right box //--------------------------------------------- if(k==0) { pointer = bx; // set first box to be processed to home box } else { // remaining boxes are nei boxes pointer = d_box_gpu[bx].nei[k-1].number; } //----------------------------------------------------- // Setup parameters //----------------------------------------------------- // nei box - box parameters first_j = d_box_gpu[pointer].offset; // nei box - distance, (force), charge and (type) parameters rB = &d_rv_gpu[first_j]; qB = &d_qv_gpu[first_j]; //----------------------------------------------------- // Setup parameters //----------------------------------------------------- // nei box - shared memory - INCLUDES HALF2 optimization on shared memory int corrWTX; while(wtx<NUMBER_PAR_PER_BOX) { corrWTX = floor(wtx / 2.0); h2_rB_shared[corrWTX].x.x = rB[wtx + 0].x; h2_rB_shared[corrWTX].x.y = rB[wtx + NUMBER_THREADS].x; h2_rB_shared[corrWTX].y.x = rB[wtx + 0].y; h2_rB_shared[corrWTX].y.y = rB[wtx + NUMBER_THREADS].y; h2_rB_shared[corrWTX].z.x = rB[wtx + 0].z; h2_rB_shared[corrWTX].z.y = rB[wtx + NUMBER_THREADS].z; h2_rB_shared[corrWTX].v.x = rB[wtx + 0].v; h2_rB_shared[corrWTX].v.y = rB[wtx + NUMBER_THREADS].v; h2_qB_shared[corrWTX].x = qB[wtx + 0]; h2_qB_shared[corrWTX].y = qB[wtx + NUMBER_THREADS]; wtx = wtx + NUMBER_THREADS * 2.0; } wtx = tx; // synchronize threads because in next section each thread accesses data brought in by different threads here __syncthreads(); //----------------------------------------------------- // Calculation //----------------------------------------------------- // Common FOUR_H2_VECTOR h2_fA_local; // loop for the number of particles in the home box // for (int i=0; i<nTotal_i; i++){ while(wtx<NUMBER_PAR_PER_BOX) { h2_fA_local.x = __float2half2_rn(0.0); h2_fA_local.y = __float2half2_rn(0.0); h2_fA_local.z = __float2half2_rn(0.0); h2_fA_local.v = __float2half2_rn(0.0); // loop for the number of particles in the current nei box for (j=0; j<floor(NUMBER_PAR_PER_BOX / 2.0); j++) { // Convert input vars from HALF to HALF2 for local work // r2 = (half)h2_rA_shared[wtx].v + (half)h2_rB_shared[j].v - H_DOT((half)h2_rA_shared[wtx],(half)h2_rB_shared[j]); r2 = __hsub2(__hadd2(h2_rA_shared[wtx].v, h2_rB_shared[j].v), H2_DOT(h2_rA_shared[wtx], h2_rB_shared[j])); // u2 = a2*r2; u2 = __hmul2(h2_a2, r2); // vij= exp(-u2); vij= h2exp(__hneg2(u2)); // fs = 2*vij; fs = __hmul2(__float2half2_rn(2.0), vij); // d.x = (half)h2_rA_shared[wtx].x - (half)h2_rB_shared[j].x; d.x = __hsub2(h2_rA_shared[wtx].x, h2_rB_shared[j].x); // fxij=fs*d.x; fxij=__hmul2(fs, d.x); // d.y = (half)h2_rA_shared[wtx].y - (half)h2_rB_shared[j].y; d.y = __hsub2(h2_rA_shared[wtx].y, h2_rB_shared[j].y); // fyij=fs*d.y; fyij=__hmul2(fs, d.y); // d.z = (half)h2_rA_shared[wtx].z - (half)h2_rB_shared[j].z; d.z = __hsub2(h2_rA_shared[wtx].z, h2_rB_shared[j].z); // fzij=fs*d.z; fzij=__hmul2(fs, d.z); // fA[wtx].v += (half)((half)h2_qB_shared[j]*vij); h2_fA_local.v = __hfma2(h2_qB_shared[j], vij, h2_fA_local.v); // fA[wtx].x += (half)((half)h2_qB_shared[j]*fxij); h2_fA_local.x = __hfma2(h2_qB_shared[j], fxij, h2_fA_local.x); // fA[wtx].y += (half)((half)h2_qB_shared[j]*fyij); h2_fA_local.y = __hfma2(h2_qB_shared[j], fyij, h2_fA_local.y); // fA[wtx].z += (half)((half)h2_qB_shared[j]*fzij); h2_fA_local.z = __hfma2(h2_qB_shared[j], fzij, h2_fA_local.z); } // Copy back data from local memory to global memory fA[wtx].x = __hadd(fA[wtx].x, __hadd(h2_fA_local.x.x, h2_fA_local.x.y)); fA[wtx].y = __hadd(fA[wtx].y, __hadd(h2_fA_local.y.x, h2_fA_local.y.y)); fA[wtx].z = __hadd(fA[wtx].z, __hadd(h2_fA_local.z.x, h2_fA_local.z.y)); fA[wtx].v = __hadd(fA[wtx].v, __hadd(h2_fA_local.v.x, h2_fA_local.v.y)); // increment work thread index wtx = wtx + NUMBER_THREADS; } // reset work index wtx = tx; // synchronize after finishing force contributions from current nei box not to cause conflicts when starting next box __syncthreads(); //----------------------------------------------------------------------------------------------------------------------------------140 // Calculation END //----------------------------------------------------------------------------------------------------------------------------------140 } //------------------------------------------------------------------------------------------------------------------------------------------------------160 // nei box loop END //------------------------------------------------------------------------------------------------------------------------------------------------------160 } } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } void generateInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, half_float::half **qv_cpu) { // random generator seed set to random value - time in this case FILE *fp; int i; srand(time(NULL)); // input (distances) if( (fp = fopen(input_distances, "wb" )) == 0 ) { printf( "The file 'input_distances' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_distances' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); for(i=0; i<dim_cpu.space_elem; i=i+1) { half_float::half tempValue((rand()%10 + 1) / 10.0); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].v = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].v), 1, sizeof(half), fp); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].x = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].x), 1, sizeof(half), fp); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].y = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].y), 1, sizeof(half), fp); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].z = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].z), 1, sizeof(half), fp); } fclose(fp); // input (charge) if( (fp = fopen(input_charges, "wb" )) == 0 ) { printf( "The file 'input_charges' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_charges' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *qv_cpu = (half_float::half*)malloc(dim_cpu.space_mem2); for(i=0; i<dim_cpu.space_elem; i=i+1) { // get a number in the range 0.1 - 1.0 half_float::half tempValue((rand()%10 + 1) / 10.0); (*qv_cpu)[i] = *((half_float::half*)&tempValue); fwrite(&((*qv_cpu)[i]), 1, sizeof(half), fp); } fclose(fp); } void readInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, half_float::half **qv_cpu, int fault_injection) { FILE *fp; int i; size_t return_value[4]; // input (distances) if( (fp = fopen(input_distances, "rb" )) == 0 ) { printf( "The file 'input_distances' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_distances' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); if(*rv_cpu == NULL) { printf("error rv_cpu malloc\n"); #ifdef LOGS log_error_detail("error rv_cpu malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { return_value[0] = fread(&((*rv_cpu)[i].v), 1, sizeof(half), fp); return_value[1] = fread(&((*rv_cpu)[i].x), 1, sizeof(half), fp); return_value[2] = fread(&((*rv_cpu)[i].y), 1, sizeof(half), fp); return_value[3] = fread(&((*rv_cpu)[i].z), 1, sizeof(half), fp); if (return_value[0] == 0 || return_value[1] == 0 || return_value[2] == 0 || return_value[3] == 0) { printf("error reading rv_cpu from file\n"); #ifdef LOGS log_error_detail("error reading rv_cpu from file"); end_log_file(); #endif exit(1); } } fclose(fp); // input (charge) if( (fp = fopen(input_charges, "rb" )) == 0 ) { printf( "The file 'input_charges' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_charges' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *qv_cpu = (half_float::half*)malloc(dim_cpu.space_mem2); if(*qv_cpu == NULL) { printf("error qv_cpu malloc\n"); #ifdef LOGS log_error_detail("error qv_cpu malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { return_value[0] = fread(&((*qv_cpu)[i]), 1, sizeof(half), fp); if (return_value[0] == 0) { printf("error reading qv_cpu from file\n"); #ifdef LOGS log_error_detail("error reading qv_cpu from file"); end_log_file(); #endif exit(1); } } fclose(fp); // =============== Fault injection if (fault_injection) { half_float::half tempValue(0.732637263); (*qv_cpu)[2] = *((half_float::half*)&tempValue); // must be in range 0.1 - 1.0 printf("!!> Fault injection: qv_cpu[2]=%f\n", *((float*)&((*qv_cpu)[2]))); } // ======================== } void readGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu_GOLD) { FILE *fp; size_t return_value[4]; int i; if( (fp = fopen(output_gold, "rb" )) == 0 ) { printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE); } *fv_cpu_GOLD = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); if(*fv_cpu_GOLD == NULL) { printf("error fv_cpu_GOLD malloc\n"); #ifdef LOGS log_error_detail("error fv_cpu_GOLD malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { return_value[0] = fread(&((*fv_cpu_GOLD)[i].v), 1, sizeof(half), fp); return_value[1] = fread(&((*fv_cpu_GOLD)[i].x), 1, sizeof(half), fp); return_value[2] = fread(&((*fv_cpu_GOLD)[i].y), 1, sizeof(half), fp); return_value[3] = fread(&((*fv_cpu_GOLD)[i].z), 1, sizeof(half), fp); if ((float)return_value[0] == 0 || (float)return_value[1] == 0 || (float)return_value[2] == 0 || (float)return_value[3] == 0) { printf("error reading rv_cpu from file\n"); #ifdef LOGS log_error_detail("error reading rv_cpu from file"); end_log_file(); #endif exit(1); } } fclose(fp); } void writeGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu) { FILE *fp; int i; if( (fp = fopen(output_gold, "wb" )) == 0 ) { printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE); } int number_zeros = 0; for(i=0; i<dim_cpu.space_elem; i=i+1) { half_float::half tempValue; tempValue = *((half_float::half*)&((*fv_cpu)[i].v)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; tempValue = *((half_float::half*)&((*fv_cpu)[i].x)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; tempValue = *((half_float::half*)&((*fv_cpu)[i].y)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; tempValue = *((half_float::half*)&((*fv_cpu)[i].z)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; fwrite(&((*fv_cpu)[i].v), 1, sizeof(half), fp); fwrite(&((*fv_cpu)[i].x), 1, sizeof(half), fp); fwrite(&((*fv_cpu)[i].y), 1, sizeof(half), fp); fwrite(&((*fv_cpu)[i].z), 1, sizeof(half), fp); } printf("Number of zeros/NaNs written on output: %d\n", number_zeros); fclose(fp); } //============================================================================= // MAIN FUNCTION //============================================================================= int main(int argc, char *argv []) { //===================================================================== // CPU/MCPU VARIABLES //===================================================================== // timer double timestamp; // counters int i, j, k, l, m, n; int iterations; int generate, verbose, fault_injection; // system memory par_str par_cpu; dim_str dim_cpu; box_str* box_cpu; FOUR_VECTOR* rv_cpu; half_float::half* qv_cpu; FOUR_VECTOR* fv_cpu; FOUR_VECTOR* fv_cpu_GOLD; int nh; int nstreams, streamIdx; hipError_t cuda_error; const char *error_string; char *input_distances, *input_charges, *output_gold; int number_nn = 0; //===================================================================== // CHECK INPUT ARGUMENTS //===================================================================== getParams(argc, argv, &dim_cpu.boxes1d_arg, &generate, &input_distances, &input_charges, &output_gold, &iterations, &verbose, &fault_injection, &nstreams); char test_info[200]; snprintf(test_info, 200, "type:half-precision streams:%d boxes:%d block_size:%d", nstreams, dim_cpu.boxes1d_arg, NUMBER_THREADS); printf("%s\n", test_info); #ifdef LOGS if (!generate) start_log_file("cudaHLavaMD", test_info); #endif //===================================================================== // INPUTS //===================================================================== half_float::half tempValue(0.5); par_cpu.alpha = *((half*)&tempValue); //===================================================================== // DIMENSIONS //===================================================================== // total number of boxes dim_cpu.number_boxes = dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg; // how many particles space has in each direction dim_cpu.space_elem = dim_cpu.number_boxes * NUMBER_PAR_PER_BOX; dim_cpu.space_mem = dim_cpu.space_elem * sizeof(FOUR_VECTOR); dim_cpu.space_mem2 = dim_cpu.space_elem * sizeof(half); // box array dim_cpu.box_mem = dim_cpu.number_boxes * sizeof(box_str); //===================================================================== // SYSTEM MEMORY //===================================================================== //===================================================================== // BOX //===================================================================== // allocate boxes box_cpu = (box_str*)malloc(dim_cpu.box_mem); if(box_cpu == NULL) { printf("error box_cpu malloc\n"); #ifdef LOGS if (!generate) log_error_detail("error box_cpu malloc"); end_log_file(); #endif exit(1); } // initialize number of home boxes nh = 0; // home boxes in z direction for(i=0; i<dim_cpu.boxes1d_arg; i++) { // home boxes in y direction for(j=0; j<dim_cpu.boxes1d_arg; j++) { // home boxes in x direction for(k=0; k<dim_cpu.boxes1d_arg; k++) { // current home box box_cpu[nh].x = k; box_cpu[nh].y = j; box_cpu[nh].z = i; box_cpu[nh].number = nh; box_cpu[nh].offset = nh * NUMBER_PAR_PER_BOX; // initialize number of neighbor boxes box_cpu[nh].nn = 0; // neighbor boxes in z direction for(l=-1; l<2; l++) { // neighbor boxes in y direction for(m=-1; m<2; m++) { // neighbor boxes in x direction for(n=-1; n<2; n++) { // check if (this neighbor exists) and (it is not the same as home box) if( (((i+l)>=0 && (j+m)>=0 && (k+n)>=0)==true && ((i+l)<dim_cpu.boxes1d_arg && (j+m)<dim_cpu.boxes1d_arg && (k+n)<dim_cpu.boxes1d_arg)==true) && (l==0 && m==0 && n==0)==false ) { // current neighbor box box_cpu[nh].nei[box_cpu[nh].nn].x = (k+n); box_cpu[nh].nei[box_cpu[nh].nn].y = (j+m); box_cpu[nh].nei[box_cpu[nh].nn].z = (i+l); box_cpu[nh].nei[box_cpu[nh].nn].number = (box_cpu[nh].nei[box_cpu[nh].nn].z * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg) + (box_cpu[nh].nei[box_cpu[nh].nn].y * dim_cpu.boxes1d_arg) + box_cpu[nh].nei[box_cpu[nh].nn].x; box_cpu[nh].nei[box_cpu[nh].nn].offset = box_cpu[nh].nei[box_cpu[nh].nn].number * NUMBER_PAR_PER_BOX; // increment neighbor box box_cpu[nh].nn = box_cpu[nh].nn + 1; number_nn += box_cpu[nh].nn; } } // neighbor boxes in x direction } // neighbor boxes in y direction } // neighbor boxes in z direction // increment home box nh = nh + 1; } // home boxes in x direction } // home boxes in y direction } // home boxes in z direction //===================================================================== // PARAMETERS, DISTANCE, CHARGE AND FORCE //===================================================================== if (generate) { generateInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu); } else { readInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu, fault_injection); readGold(dim_cpu, output_gold, &fv_cpu_GOLD); } //===================================================================== // EXECUTION PARAMETERS //===================================================================== dim3 threads; dim3 blocks; blocks.x = dim_cpu.number_boxes; blocks.y = 1; // define the number of threads in the block threads.x = NUMBER_THREADS; threads.y = 1; hipStream_t *streams = (hipStream_t *) malloc(nstreams * sizeof(hipStream_t)); //LOOP START int loop; for(loop=0; loop<iterations; loop++) { if (verbose) { printf("[Iteration #%i]=====================================\n", loop); fflush(stdout); } double globaltimer = mysecond(); timestamp = mysecond(); // prepare host memory to receive kernel output // output (forces) fv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); if(fv_cpu == NULL) { printf("error fv_cpu malloc\n"); #ifdef LOGS if (!generate) log_error_detail("error fv_cpu malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { // set to 0, because kernels keeps adding to initial value half_float::half zeroValue(0.0); fv_cpu[i].v = *((half*)&zeroValue); fv_cpu[i].x = *((half*)&zeroValue); fv_cpu[i].y = *((half*)&zeroValue); fv_cpu[i].z = *((half*)&zeroValue); } //===================================================================== // GPU_CUDA //===================================================================== //===================================================================== // VARIABLES //===================================================================== box_str* d_box_gpu[nstreams]; FOUR_VECTOR* d_rv_gpu[nstreams]; half* d_qv_gpu[nstreams]; FOUR_VECTOR* d_fv_gpu[nstreams]; //===================================================================== // GPU SETUP //===================================================================== for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { hipStreamCreateWithFlags(&(streams[streamIdx]), hipStreamNonBlocking); //================================================== // boxes //================================================== cuda_error = hipMalloc( (void **)&(d_box_gpu[streamIdx]), dim_cpu.box_mem); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_box_gpu hipMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //================================================== // rv //================================================== cuda_error = hipMalloc( (void **)&(d_rv_gpu[streamIdx]), dim_cpu.space_mem); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_rv_gpu hipMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //================================================== // qv //================================================== cuda_error = hipMalloc( (void **)&(d_qv_gpu[streamIdx]), dim_cpu.space_mem2); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_qv_gpu hipMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //================================================== // fv //================================================== cuda_error = hipMalloc( (void **)&(d_fv_gpu[streamIdx]), dim_cpu.space_mem); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_fv_gpu hipMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //===================================================================== // GPU MEMORY COPY //===================================================================== //================================================== // boxes //================================================== cuda_error = hipMemcpy(d_box_gpu[streamIdx], box_cpu, dim_cpu.box_mem, hipMemcpyHostToDevice); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_boc_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } //================================================== // rv //================================================== cuda_error = hipMemcpy( d_rv_gpu[streamIdx], rv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_rv_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } //================================================== // qv //================================================== cuda_error = hipMemcpy( d_qv_gpu[streamIdx], qv_cpu, dim_cpu.space_mem2, hipMemcpyHostToDevice); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_qv_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } //================================================== // fv //================================================== cuda_error = hipMemcpy( d_fv_gpu[streamIdx], fv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_fv_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } } if (verbose) printf("[Iteration #%i] Setup prepare time: %.4fs\n", loop, mysecond()-timestamp); //===================================================================== // KERNEL //===================================================================== double kernel_time=mysecond(); #ifdef LOGS if (!generate) start_iteration(); #endif // launch kernel - all boxes for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { hipLaunchKernelGGL(( kernel_gpu_cuda), dim3(blocks), dim3(threads), 0, streams[streamIdx], par_cpu, dim_cpu, \ d_box_gpu[streamIdx], d_rv_gpu[streamIdx], d_qv_gpu[streamIdx], d_fv_gpu[streamIdx]); checkCudaErrors( hipPeekAtLastError() ); } //printf("All kernels were commited.\n"); for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { cuda_error = hipStreamSynchronize(streams[streamIdx]); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error logic: %s\n",error_string); #ifdef LOGS if (!generate) log_error_detail("error logic:"); end_log_file(); #endif exit(1); } checkCudaErrors( hipPeekAtLastError() ); } #ifdef LOGS if (!generate) end_iteration(); #endif kernel_time = mysecond()-kernel_time; //===================================================================== // COMPARE OUTPUTS / WRITE GOLD //===================================================================== if (generate){ cuda_error = hipMemcpy( fv_cpu, d_fv_gpu[0], dim_cpu.space_mem, hipMemcpyDeviceToHost); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error download fv_cpu\n"); exit(1); } writeGold(dim_cpu, output_gold, &fv_cpu); } else { // Check gold //int ea = 0; int thread_error = 0; int kernel_errors = 0; char error_detail[300]; timestamp = mysecond(); for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { //===================================================================== // GPU MEMORY COPY BACK //===================================================================== cuda_error = hipMemcpy( fv_cpu, d_fv_gpu[streamIdx], dim_cpu.space_mem, hipMemcpyDeviceToHost); error_string = hipGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error download fv_cpu\n"); #ifdef LOGS if (!generate) log_error_detail("error download fv_cpu"); end_log_file(); #endif exit(1); } #pragma omp parallel for for(i=0; i<dim_cpu.space_elem; i=i+1) { if(*((half_float::half*)&(fv_cpu_GOLD[i].v)) != *((half_float::half*)&(fv_cpu[i].v))) { thread_error++; } if(*((half_float::half*)&(fv_cpu_GOLD[i].x)) != *((half_float::half*)&(fv_cpu[i].x))) { thread_error++; } if(*((half_float::half*)&(fv_cpu_GOLD[i].y)) != *((half_float::half*)&(fv_cpu[i].y))) { thread_error++; } if(*((half_float::half*)&(fv_cpu_GOLD[i].z)) != *((half_float::half*)&(fv_cpu[i].z))) { thread_error++; } if (thread_error > 0) { #pragma omp critical { kernel_errors++; snprintf(error_detail, 300, "stream: %d, p: [%d], ea: %d, v_r: %1.16e, v_e: %1.16e, x_r: %1.16e, x_e: %1.16e, y_r: %1.16e, y_e: %1.16e, z_r: %1.16e, z_e: %1.16e\n", streamIdx, \ i, thread_error, (float)*((half_float::half*)&(fv_cpu[i].v)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].v)), (float)*((half_float::half*)&(fv_cpu[i].x)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].x)), (float)*((half_float::half*)&(fv_cpu[i].y)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].y)), (float)*((half_float::half*)&(fv_cpu[i].z)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].z))); if (kernel_errors<25) printf("ERROR: %s\n", error_detail); if (kernel_errors>=25 && kernel_errors % 25 == 0) printf("!"); #ifdef LOGS if (!generate) log_error_detail(error_detail); #endif thread_error = 0; } } } } #ifdef LOGS if (!generate) log_error_count(kernel_errors); #endif if (verbose) printf("[Iteration #%i] Gold check time: %f\n", loop, mysecond() - timestamp); } //================= PERF // iterate for each neighbor of a box (number_nn) double flop = number_nn; // The last for iterate NUMBER_PAR_PER_BOX times flop *= NUMBER_PAR_PER_BOX; // the last for uses 46 operations plus 2 exp() functions flop *=46; flop *= nstreams; double flops = (double)flop/kernel_time; double outputpersec = (double)dim_cpu.space_elem * 4 * nstreams / kernel_time; if (verbose) printf("[Iteration #%i] BOXES:%d BLOCK:%d OUTPUT/S:%.2f FLOPS:%.2f (GFLOPS:%.2f)\n", loop, dim_cpu.boxes1d_arg, NUMBER_THREADS, outputpersec, flops, flops/1000000000); if (verbose) printf("[Iteration #%i] kernel_time:%f\n", loop, kernel_time); //===================== printf("."); fflush(stdout); //===================================================================== // GPU MEMORY DEALLOCATION //===================================================================== for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { hipFree(d_rv_gpu[streamIdx]); hipFree(d_qv_gpu[streamIdx]); hipFree(d_fv_gpu[streamIdx]); hipFree(d_box_gpu[streamIdx]); } //===================================================================== // SYSTEM MEMORY DEALLOCATION //===================================================================== free(fv_cpu); if (verbose) printf("[Iteration #%i] Elapsed time: %.4fs\n", loop, mysecond()-globaltimer); } if (!generate) free(fv_cpu_GOLD); free(rv_cpu); free(qv_cpu); free(box_cpu); printf("\n"); #ifdef LOGS if (!generate) end_log_file(); #endif return 0; }
e09759e8a2ff3646ce438a259e88b7136fb5ac39.cu
//============================================================================ // UPDATE //============================================================================ // 14 APR 2011 Lukasz G. Szafaryn #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <stdbool.h> // (in path known to compiler) needed by true/false #include <omp.h> // Helper functions #include "helper_cuda.h" #include "helper_string.h" #include "half.hpp" #include <cuda_fp16.h> #ifdef LOGS #include "log_helper.h" #endif //============================================================================= // DEFINE / INCLUDE //============================================================================= #define NUMBER_PAR_PER_BOX 192 // keep this low to allow more blocks that share shared memory to run concurrently, code does not work for larger than 110, more speedup can be achieved with larger number and no shared memory used #define NUMBER_THREADS 192 // this should be roughly equal to NUMBER_PAR_PER_BOX for best performance // STABLE #define DOT(A,B) ((A.x)*(B.x)+(A.y)*(B.y)+(A.z)*(B.z)) #define H_DOT(A,B) (__hfma((A.x), (B.x), __hfma((A.y), (B.y), __hmul((A.z), (B.z))))) #define H2_DOT(A,B) (__hfma2((A.x), (B.x), __hfma2((A.y), (B.y), __hmul2((A.z), (B.z))))) //============================================================================= // STRUCTURES //============================================================================= typedef struct { half x, y, z; } THREE_VECTOR; typedef struct { half v, x, y, z; } FOUR_VECTOR; typedef struct { half2 x, y, z; } THREE_H2_VECTOR; typedef struct { half2 v, x, y, z; } FOUR_H2_VECTOR; typedef struct nei_str { // neighbor box int x, y, z; int number; long offset; } nei_str; typedef struct box_str { // home box int x, y, z; int number; long offset; // neighbor boxes int nn; nei_str nei[26]; } box_str; typedef struct par_str { half alpha; } par_str; typedef struct dim_str { // input arguments int cur_arg; int arch_arg; int cores_arg; int boxes1d_arg; // system memory long number_boxes; long box_mem; long space_elem; long space_mem; long space_mem2; } dim_str; // Returns the current system time in microseconds /*long long get_time() { struct timeval tv; gettimeofday(&tv, NULL); return (tv.tv_sec * 1000000) + tv.tv_usec; }*/ void usage(int argc, char** argv) { printf("Usage: %s -boxes=N [-generate] [-input_distances=<path>] [-input_charges=<path>] [-output_gold=<path>] [-iterations=N] [-streams=N] [-debug] [-verbose]\n", argv[0]); } void getParams(int argc, char** argv, int *boxes, int *generate, char **input_distances, char **input_charges, char **output_gold, int *iterations, int *verbose, int *fault_injection, int *nstreams) { if (argc<2) { usage(argc, argv); exit(EXIT_FAILURE); } *generate = 0; *iterations = 1000000; *nstreams = 1; *fault_injection = 0; *verbose = 0; if (checkCmdLineFlag(argc, (const char **)argv, "boxes")) { *boxes = getCmdLineArgumentInt(argc, (const char **)argv, "boxes"); if (*boxes <= 0) { printf("Invalid input size given on the command-line: %d\n", *boxes); exit(EXIT_FAILURE); } } else { usage(argc, argv); exit(EXIT_FAILURE); } if (checkCmdLineFlag(argc, (const char **)argv, "generate")) { *generate = 1; printf(">> Output will be written to file. Only stream #0 output will be considered.\n"); } if (checkCmdLineFlag(argc, (const char **)argv, "input_distances")) { getCmdLineArgumentString(argc, (const char **)argv, "input_distances", input_distances); } else { *input_distances = new char[100]; snprintf(*input_distances, 100, "hlava_distances_%i", *boxes); printf("Using default input_distances path: %s\n", *input_distances); } if (checkCmdLineFlag(argc, (const char **)argv, "input_charges")) { getCmdLineArgumentString(argc, (const char **)argv, "input_charges", input_charges); } else { *input_charges = new char[100]; snprintf(*input_charges, 100, "hlava_charges_%i", *boxes); printf("Using default input_charges path: %s\n", *input_charges); } if (checkCmdLineFlag(argc, (const char **)argv, "output_gold")) { getCmdLineArgumentString(argc, (const char **)argv, "output_gold", output_gold); } else { *output_gold = new char[100]; snprintf(*output_gold, 100, "hlava_gold_%i", *boxes); printf("Using default output_gold path: %s\n", *output_gold); } if (checkCmdLineFlag(argc, (const char **)argv, "iterations")) { *iterations = getCmdLineArgumentInt(argc, (const char **)argv, "iterations"); } if (checkCmdLineFlag(argc, (const char **)argv, "streams")) { *nstreams = getCmdLineArgumentInt(argc, (const char **)argv, "streams"); } if (checkCmdLineFlag(argc, (const char **)argv, "verbose")) { *verbose = 1; } if (checkCmdLineFlag(argc, (const char **)argv, "debug")) { *fault_injection = 1; printf("!! Will be injected an input error\n"); } } //----------------------------------------------------------------------------- // plasmaKernel_gpu_2 //----------------------------------------------------------------------------- __global__ void kernel_gpu_cuda(par_str d_par_gpu, dim_str d_dim_gpu, box_str* d_box_gpu, FOUR_VECTOR* d_rv_gpu, half* d_qv_gpu, FOUR_VECTOR* d_fv_gpu) { //--------------------------------------------------------------------- // THREAD PARAMETERS //--------------------------------------------------------------------- int bx = blockIdx.x; // get current horizontal block index (0-n) int tx = threadIdx.x; // get current horizontal thread index (0-n) int wtx = tx; //--------------------------------------------------------------------- // DO FOR THE NUMBER OF BOXES //--------------------------------------------------------------------- if(bx<d_dim_gpu.number_boxes) { //------------------------------------------------------------- // Extract input parameters //------------------------------------------------------------- // parameters half a2 = __hmul(__hmul(__float2half(2.0), d_par_gpu.alpha), d_par_gpu.alpha); half2 h2_a2 = __half2half2(a2); // home box int first_i; FOUR_VECTOR* rA; FOUR_VECTOR* fA; FOUR_H2_VECTOR h2_fA[200]; __shared__ FOUR_H2_VECTOR h2_rA_shared[200]; // nei box int pointer; int k = 0; int first_j; FOUR_VECTOR* rB; half* qB; int j = 0; __shared__ FOUR_H2_VECTOR h2_rB_shared[100]; __shared__ half2 h2_qB_shared[100]; // common half2 r2; half2 u2; half2 vij; half2 fs; half2 fxij; half2 fyij; half2 fzij; THREE_H2_VECTOR d; //------------------------------------------------------------- // Home box //------------------------------------------------------------- //------------------------------------------------------------- // Setup parameters //------------------------------------------------------------- // home box - box parameters first_i = d_box_gpu[bx].offset; // home box - distance, force, charge and type parameters rA = &d_rv_gpu[first_i]; fA = &d_fv_gpu[first_i]; //------------------------------------------------------------- // Copy to shared memory //------------------------------------------------------------- // home box - shared memory - INCLUDES HALF2 transformation -redundant- on shared memory while(wtx<NUMBER_PAR_PER_BOX) { h2_rA_shared[wtx].x = __half2half2(rA[wtx].x); h2_rA_shared[wtx].y = __half2half2(rA[wtx].y); h2_rA_shared[wtx].z = __half2half2(rA[wtx].z); h2_rA_shared[wtx].v = __half2half2(rA[wtx].v); h2_fA[wtx].x = __half2half2(fA[wtx].x); h2_fA[wtx].y = __half2half2(fA[wtx].y); h2_fA[wtx].z = __half2half2(fA[wtx].z); h2_fA[wtx].v = __half2half2(fA[wtx].v); wtx = wtx + NUMBER_THREADS; } wtx = tx; // synchronize threads - not needed, but just to be safe __syncthreads(); //------------------------------------------------------------- // nei box loop //------------------------------------------------------------- // loop over neiing boxes of home box for (k=0; k<(1+d_box_gpu[bx].nn); k++) { //--------------------------------------------- // nei box - get pointer to the right box //--------------------------------------------- if(k==0) { pointer = bx; // set first box to be processed to home box } else { // remaining boxes are nei boxes pointer = d_box_gpu[bx].nei[k-1].number; } //----------------------------------------------------- // Setup parameters //----------------------------------------------------- // nei box - box parameters first_j = d_box_gpu[pointer].offset; // nei box - distance, (force), charge and (type) parameters rB = &d_rv_gpu[first_j]; qB = &d_qv_gpu[first_j]; //----------------------------------------------------- // Setup parameters //----------------------------------------------------- // nei box - shared memory - INCLUDES HALF2 optimization on shared memory int corrWTX; while(wtx<NUMBER_PAR_PER_BOX) { corrWTX = floor(wtx / 2.0); h2_rB_shared[corrWTX].x.x = rB[wtx + 0].x; h2_rB_shared[corrWTX].x.y = rB[wtx + NUMBER_THREADS].x; h2_rB_shared[corrWTX].y.x = rB[wtx + 0].y; h2_rB_shared[corrWTX].y.y = rB[wtx + NUMBER_THREADS].y; h2_rB_shared[corrWTX].z.x = rB[wtx + 0].z; h2_rB_shared[corrWTX].z.y = rB[wtx + NUMBER_THREADS].z; h2_rB_shared[corrWTX].v.x = rB[wtx + 0].v; h2_rB_shared[corrWTX].v.y = rB[wtx + NUMBER_THREADS].v; h2_qB_shared[corrWTX].x = qB[wtx + 0]; h2_qB_shared[corrWTX].y = qB[wtx + NUMBER_THREADS]; wtx = wtx + NUMBER_THREADS * 2.0; } wtx = tx; // synchronize threads because in next section each thread accesses data brought in by different threads here __syncthreads(); //----------------------------------------------------- // Calculation //----------------------------------------------------- // Common FOUR_H2_VECTOR h2_fA_local; // loop for the number of particles in the home box // for (int i=0; i<nTotal_i; i++){ while(wtx<NUMBER_PAR_PER_BOX) { h2_fA_local.x = __float2half2_rn(0.0); h2_fA_local.y = __float2half2_rn(0.0); h2_fA_local.z = __float2half2_rn(0.0); h2_fA_local.v = __float2half2_rn(0.0); // loop for the number of particles in the current nei box for (j=0; j<floor(NUMBER_PAR_PER_BOX / 2.0); j++) { // Convert input vars from HALF to HALF2 for local work // r2 = (half)h2_rA_shared[wtx].v + (half)h2_rB_shared[j].v - H_DOT((half)h2_rA_shared[wtx],(half)h2_rB_shared[j]); r2 = __hsub2(__hadd2(h2_rA_shared[wtx].v, h2_rB_shared[j].v), H2_DOT(h2_rA_shared[wtx], h2_rB_shared[j])); // u2 = a2*r2; u2 = __hmul2(h2_a2, r2); // vij= exp(-u2); vij= h2exp(__hneg2(u2)); // fs = 2*vij; fs = __hmul2(__float2half2_rn(2.0), vij); // d.x = (half)h2_rA_shared[wtx].x - (half)h2_rB_shared[j].x; d.x = __hsub2(h2_rA_shared[wtx].x, h2_rB_shared[j].x); // fxij=fs*d.x; fxij=__hmul2(fs, d.x); // d.y = (half)h2_rA_shared[wtx].y - (half)h2_rB_shared[j].y; d.y = __hsub2(h2_rA_shared[wtx].y, h2_rB_shared[j].y); // fyij=fs*d.y; fyij=__hmul2(fs, d.y); // d.z = (half)h2_rA_shared[wtx].z - (half)h2_rB_shared[j].z; d.z = __hsub2(h2_rA_shared[wtx].z, h2_rB_shared[j].z); // fzij=fs*d.z; fzij=__hmul2(fs, d.z); // fA[wtx].v += (half)((half)h2_qB_shared[j]*vij); h2_fA_local.v = __hfma2(h2_qB_shared[j], vij, h2_fA_local.v); // fA[wtx].x += (half)((half)h2_qB_shared[j]*fxij); h2_fA_local.x = __hfma2(h2_qB_shared[j], fxij, h2_fA_local.x); // fA[wtx].y += (half)((half)h2_qB_shared[j]*fyij); h2_fA_local.y = __hfma2(h2_qB_shared[j], fyij, h2_fA_local.y); // fA[wtx].z += (half)((half)h2_qB_shared[j]*fzij); h2_fA_local.z = __hfma2(h2_qB_shared[j], fzij, h2_fA_local.z); } // Copy back data from local memory to global memory fA[wtx].x = __hadd(fA[wtx].x, __hadd(h2_fA_local.x.x, h2_fA_local.x.y)); fA[wtx].y = __hadd(fA[wtx].y, __hadd(h2_fA_local.y.x, h2_fA_local.y.y)); fA[wtx].z = __hadd(fA[wtx].z, __hadd(h2_fA_local.z.x, h2_fA_local.z.y)); fA[wtx].v = __hadd(fA[wtx].v, __hadd(h2_fA_local.v.x, h2_fA_local.v.y)); // increment work thread index wtx = wtx + NUMBER_THREADS; } // reset work index wtx = tx; // synchronize after finishing force contributions from current nei box not to cause conflicts when starting next box __syncthreads(); //----------------------------------------------------------------------------------------------------------------------------------140 // Calculation END //----------------------------------------------------------------------------------------------------------------------------------140 } //------------------------------------------------------------------------------------------------------------------------------------------------------160 // nei box loop END //------------------------------------------------------------------------------------------------------------------------------------------------------160 } } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } void generateInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, half_float::half **qv_cpu) { // random generator seed set to random value - time in this case FILE *fp; int i; srand(time(NULL)); // input (distances) if( (fp = fopen(input_distances, "wb" )) == 0 ) { printf( "The file 'input_distances' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_distances' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); for(i=0; i<dim_cpu.space_elem; i=i+1) { half_float::half tempValue((rand()%10 + 1) / 10.0); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].v = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].v), 1, sizeof(half), fp); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].x = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].x), 1, sizeof(half), fp); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].y = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].y), 1, sizeof(half), fp); // get a number in the range 0.1 - 1.0 tempValue = half_float::half((rand()%10 + 1) / 10.0); (*rv_cpu)[i].z = *((half*)&tempValue); fwrite(&((*rv_cpu)[i].z), 1, sizeof(half), fp); } fclose(fp); // input (charge) if( (fp = fopen(input_charges, "wb" )) == 0 ) { printf( "The file 'input_charges' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_charges' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *qv_cpu = (half_float::half*)malloc(dim_cpu.space_mem2); for(i=0; i<dim_cpu.space_elem; i=i+1) { // get a number in the range 0.1 - 1.0 half_float::half tempValue((rand()%10 + 1) / 10.0); (*qv_cpu)[i] = *((half_float::half*)&tempValue); fwrite(&((*qv_cpu)[i]), 1, sizeof(half), fp); } fclose(fp); } void readInput(dim_str dim_cpu, char *input_distances, FOUR_VECTOR **rv_cpu, char *input_charges, half_float::half **qv_cpu, int fault_injection) { FILE *fp; int i; size_t return_value[4]; // input (distances) if( (fp = fopen(input_distances, "rb" )) == 0 ) { printf( "The file 'input_distances' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_distances' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *rv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); if(*rv_cpu == NULL) { printf("error rv_cpu malloc\n"); #ifdef LOGS log_error_detail("error rv_cpu malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { return_value[0] = fread(&((*rv_cpu)[i].v), 1, sizeof(half), fp); return_value[1] = fread(&((*rv_cpu)[i].x), 1, sizeof(half), fp); return_value[2] = fread(&((*rv_cpu)[i].y), 1, sizeof(half), fp); return_value[3] = fread(&((*rv_cpu)[i].z), 1, sizeof(half), fp); if (return_value[0] == 0 || return_value[1] == 0 || return_value[2] == 0 || return_value[3] == 0) { printf("error reading rv_cpu from file\n"); #ifdef LOGS log_error_detail("error reading rv_cpu from file"); end_log_file(); #endif exit(1); } } fclose(fp); // input (charge) if( (fp = fopen(input_charges, "rb" )) == 0 ) { printf( "The file 'input_charges' was not opened\n" ); #ifdef LOGS log_error_detail("The file 'input_charges' was not opened"); end_log_file(); #endif exit(EXIT_FAILURE); } *qv_cpu = (half_float::half*)malloc(dim_cpu.space_mem2); if(*qv_cpu == NULL) { printf("error qv_cpu malloc\n"); #ifdef LOGS log_error_detail("error qv_cpu malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { return_value[0] = fread(&((*qv_cpu)[i]), 1, sizeof(half), fp); if (return_value[0] == 0) { printf("error reading qv_cpu from file\n"); #ifdef LOGS log_error_detail("error reading qv_cpu from file"); end_log_file(); #endif exit(1); } } fclose(fp); // =============== Fault injection if (fault_injection) { half_float::half tempValue(0.732637263); (*qv_cpu)[2] = *((half_float::half*)&tempValue); // must be in range 0.1 - 1.0 printf("!!> Fault injection: qv_cpu[2]=%f\n", *((float*)&((*qv_cpu)[2]))); } // ======================== } void readGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu_GOLD) { FILE *fp; size_t return_value[4]; int i; if( (fp = fopen(output_gold, "rb" )) == 0 ) { printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE); } *fv_cpu_GOLD = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); if(*fv_cpu_GOLD == NULL) { printf("error fv_cpu_GOLD malloc\n"); #ifdef LOGS log_error_detail("error fv_cpu_GOLD malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { return_value[0] = fread(&((*fv_cpu_GOLD)[i].v), 1, sizeof(half), fp); return_value[1] = fread(&((*fv_cpu_GOLD)[i].x), 1, sizeof(half), fp); return_value[2] = fread(&((*fv_cpu_GOLD)[i].y), 1, sizeof(half), fp); return_value[3] = fread(&((*fv_cpu_GOLD)[i].z), 1, sizeof(half), fp); if ((float)return_value[0] == 0 || (float)return_value[1] == 0 || (float)return_value[2] == 0 || (float)return_value[3] == 0) { printf("error reading rv_cpu from file\n"); #ifdef LOGS log_error_detail("error reading rv_cpu from file"); end_log_file(); #endif exit(1); } } fclose(fp); } void writeGold(dim_str dim_cpu, char *output_gold, FOUR_VECTOR **fv_cpu) { FILE *fp; int i; if( (fp = fopen(output_gold, "wb" )) == 0 ) { printf( "The file 'output_forces' was not opened\n" ); exit(EXIT_FAILURE); } int number_zeros = 0; for(i=0; i<dim_cpu.space_elem; i=i+1) { half_float::half tempValue; tempValue = *((half_float::half*)&((*fv_cpu)[i].v)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; tempValue = *((half_float::half*)&((*fv_cpu)[i].x)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; tempValue = *((half_float::half*)&((*fv_cpu)[i].y)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; tempValue = *((half_float::half*)&((*fv_cpu)[i].z)); if(tempValue == 0.0 || isnan(tempValue)) number_zeros++; fwrite(&((*fv_cpu)[i].v), 1, sizeof(half), fp); fwrite(&((*fv_cpu)[i].x), 1, sizeof(half), fp); fwrite(&((*fv_cpu)[i].y), 1, sizeof(half), fp); fwrite(&((*fv_cpu)[i].z), 1, sizeof(half), fp); } printf("Number of zeros/NaNs written on output: %d\n", number_zeros); fclose(fp); } //============================================================================= // MAIN FUNCTION //============================================================================= int main(int argc, char *argv []) { //===================================================================== // CPU/MCPU VARIABLES //===================================================================== // timer double timestamp; // counters int i, j, k, l, m, n; int iterations; int generate, verbose, fault_injection; // system memory par_str par_cpu; dim_str dim_cpu; box_str* box_cpu; FOUR_VECTOR* rv_cpu; half_float::half* qv_cpu; FOUR_VECTOR* fv_cpu; FOUR_VECTOR* fv_cpu_GOLD; int nh; int nstreams, streamIdx; cudaError_t cuda_error; const char *error_string; char *input_distances, *input_charges, *output_gold; int number_nn = 0; //===================================================================== // CHECK INPUT ARGUMENTS //===================================================================== getParams(argc, argv, &dim_cpu.boxes1d_arg, &generate, &input_distances, &input_charges, &output_gold, &iterations, &verbose, &fault_injection, &nstreams); char test_info[200]; snprintf(test_info, 200, "type:half-precision streams:%d boxes:%d block_size:%d", nstreams, dim_cpu.boxes1d_arg, NUMBER_THREADS); printf("%s\n", test_info); #ifdef LOGS if (!generate) start_log_file("cudaHLavaMD", test_info); #endif //===================================================================== // INPUTS //===================================================================== half_float::half tempValue(0.5); par_cpu.alpha = *((half*)&tempValue); //===================================================================== // DIMENSIONS //===================================================================== // total number of boxes dim_cpu.number_boxes = dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg; // how many particles space has in each direction dim_cpu.space_elem = dim_cpu.number_boxes * NUMBER_PAR_PER_BOX; dim_cpu.space_mem = dim_cpu.space_elem * sizeof(FOUR_VECTOR); dim_cpu.space_mem2 = dim_cpu.space_elem * sizeof(half); // box array dim_cpu.box_mem = dim_cpu.number_boxes * sizeof(box_str); //===================================================================== // SYSTEM MEMORY //===================================================================== //===================================================================== // BOX //===================================================================== // allocate boxes box_cpu = (box_str*)malloc(dim_cpu.box_mem); if(box_cpu == NULL) { printf("error box_cpu malloc\n"); #ifdef LOGS if (!generate) log_error_detail("error box_cpu malloc"); end_log_file(); #endif exit(1); } // initialize number of home boxes nh = 0; // home boxes in z direction for(i=0; i<dim_cpu.boxes1d_arg; i++) { // home boxes in y direction for(j=0; j<dim_cpu.boxes1d_arg; j++) { // home boxes in x direction for(k=0; k<dim_cpu.boxes1d_arg; k++) { // current home box box_cpu[nh].x = k; box_cpu[nh].y = j; box_cpu[nh].z = i; box_cpu[nh].number = nh; box_cpu[nh].offset = nh * NUMBER_PAR_PER_BOX; // initialize number of neighbor boxes box_cpu[nh].nn = 0; // neighbor boxes in z direction for(l=-1; l<2; l++) { // neighbor boxes in y direction for(m=-1; m<2; m++) { // neighbor boxes in x direction for(n=-1; n<2; n++) { // check if (this neighbor exists) and (it is not the same as home box) if( (((i+l)>=0 && (j+m)>=0 && (k+n)>=0)==true && ((i+l)<dim_cpu.boxes1d_arg && (j+m)<dim_cpu.boxes1d_arg && (k+n)<dim_cpu.boxes1d_arg)==true) && (l==0 && m==0 && n==0)==false ) { // current neighbor box box_cpu[nh].nei[box_cpu[nh].nn].x = (k+n); box_cpu[nh].nei[box_cpu[nh].nn].y = (j+m); box_cpu[nh].nei[box_cpu[nh].nn].z = (i+l); box_cpu[nh].nei[box_cpu[nh].nn].number = (box_cpu[nh].nei[box_cpu[nh].nn].z * dim_cpu.boxes1d_arg * dim_cpu.boxes1d_arg) + (box_cpu[nh].nei[box_cpu[nh].nn].y * dim_cpu.boxes1d_arg) + box_cpu[nh].nei[box_cpu[nh].nn].x; box_cpu[nh].nei[box_cpu[nh].nn].offset = box_cpu[nh].nei[box_cpu[nh].nn].number * NUMBER_PAR_PER_BOX; // increment neighbor box box_cpu[nh].nn = box_cpu[nh].nn + 1; number_nn += box_cpu[nh].nn; } } // neighbor boxes in x direction } // neighbor boxes in y direction } // neighbor boxes in z direction // increment home box nh = nh + 1; } // home boxes in x direction } // home boxes in y direction } // home boxes in z direction //===================================================================== // PARAMETERS, DISTANCE, CHARGE AND FORCE //===================================================================== if (generate) { generateInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu); } else { readInput(dim_cpu, input_distances, &rv_cpu, input_charges, &qv_cpu, fault_injection); readGold(dim_cpu, output_gold, &fv_cpu_GOLD); } //===================================================================== // EXECUTION PARAMETERS //===================================================================== dim3 threads; dim3 blocks; blocks.x = dim_cpu.number_boxes; blocks.y = 1; // define the number of threads in the block threads.x = NUMBER_THREADS; threads.y = 1; cudaStream_t *streams = (cudaStream_t *) malloc(nstreams * sizeof(cudaStream_t)); //LOOP START int loop; for(loop=0; loop<iterations; loop++) { if (verbose) { printf("[Iteration #%i]=====================================\n", loop); fflush(stdout); } double globaltimer = mysecond(); timestamp = mysecond(); // prepare host memory to receive kernel output // output (forces) fv_cpu = (FOUR_VECTOR*)malloc(dim_cpu.space_mem); if(fv_cpu == NULL) { printf("error fv_cpu malloc\n"); #ifdef LOGS if (!generate) log_error_detail("error fv_cpu malloc"); end_log_file(); #endif exit(1); } for(i=0; i<dim_cpu.space_elem; i=i+1) { // set to 0, because kernels keeps adding to initial value half_float::half zeroValue(0.0); fv_cpu[i].v = *((half*)&zeroValue); fv_cpu[i].x = *((half*)&zeroValue); fv_cpu[i].y = *((half*)&zeroValue); fv_cpu[i].z = *((half*)&zeroValue); } //===================================================================== // GPU_CUDA //===================================================================== //===================================================================== // VARIABLES //===================================================================== box_str* d_box_gpu[nstreams]; FOUR_VECTOR* d_rv_gpu[nstreams]; half* d_qv_gpu[nstreams]; FOUR_VECTOR* d_fv_gpu[nstreams]; //===================================================================== // GPU SETUP //===================================================================== for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { cudaStreamCreateWithFlags(&(streams[streamIdx]), cudaStreamNonBlocking); //================================================== // boxes //================================================== cuda_error = cudaMalloc( (void **)&(d_box_gpu[streamIdx]), dim_cpu.box_mem); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_box_gpu cudaMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //================================================== // rv //================================================== cuda_error = cudaMalloc( (void **)&(d_rv_gpu[streamIdx]), dim_cpu.space_mem); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_rv_gpu cudaMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //================================================== // qv //================================================== cuda_error = cudaMalloc( (void **)&(d_qv_gpu[streamIdx]), dim_cpu.space_mem2); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_qv_gpu cudaMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //================================================== // fv //================================================== cuda_error = cudaMalloc( (void **)&(d_fv_gpu[streamIdx]), dim_cpu.space_mem); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error d_fv_gpu cudaMalloc\n"); #ifdef LOGS if (!generate) log_error_detail("error d_box_gpu cudamalloc"); end_log_file(); #endif exit(1); } //===================================================================== // GPU MEMORY COPY //===================================================================== //================================================== // boxes //================================================== cuda_error = cudaMemcpy(d_box_gpu[streamIdx], box_cpu, dim_cpu.box_mem, cudaMemcpyHostToDevice); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_boc_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } //================================================== // rv //================================================== cuda_error = cudaMemcpy( d_rv_gpu[streamIdx], rv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_rv_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } //================================================== // qv //================================================== cuda_error = cudaMemcpy( d_qv_gpu[streamIdx], qv_cpu, dim_cpu.space_mem2, cudaMemcpyHostToDevice); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_qv_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } //================================================== // fv //================================================== cuda_error = cudaMemcpy( d_fv_gpu[streamIdx], fv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error load d_fv_gpu\n"); #ifdef LOGS if (!generate) log_error_detail("error load d_box_gpu"); end_log_file(); #endif exit(1); } } if (verbose) printf("[Iteration #%i] Setup prepare time: %.4fs\n", loop, mysecond()-timestamp); //===================================================================== // KERNEL //===================================================================== double kernel_time=mysecond(); #ifdef LOGS if (!generate) start_iteration(); #endif // launch kernel - all boxes for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { kernel_gpu_cuda<<<blocks, threads, 0, streams[streamIdx]>>>( par_cpu, dim_cpu, \ d_box_gpu[streamIdx], d_rv_gpu[streamIdx], d_qv_gpu[streamIdx], d_fv_gpu[streamIdx]); checkCudaErrors( cudaPeekAtLastError() ); } //printf("All kernels were commited.\n"); for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { cuda_error = cudaStreamSynchronize(streams[streamIdx]); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error logic: %s\n",error_string); #ifdef LOGS if (!generate) log_error_detail("error logic:"); end_log_file(); #endif exit(1); } checkCudaErrors( cudaPeekAtLastError() ); } #ifdef LOGS if (!generate) end_iteration(); #endif kernel_time = mysecond()-kernel_time; //===================================================================== // COMPARE OUTPUTS / WRITE GOLD //===================================================================== if (generate){ cuda_error = cudaMemcpy( fv_cpu, d_fv_gpu[0], dim_cpu.space_mem, cudaMemcpyDeviceToHost); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error download fv_cpu\n"); exit(1); } writeGold(dim_cpu, output_gold, &fv_cpu); } else { // Check gold //int ea = 0; int thread_error = 0; int kernel_errors = 0; char error_detail[300]; timestamp = mysecond(); for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { //===================================================================== // GPU MEMORY COPY BACK //===================================================================== cuda_error = cudaMemcpy( fv_cpu, d_fv_gpu[streamIdx], dim_cpu.space_mem, cudaMemcpyDeviceToHost); error_string = cudaGetErrorString(cuda_error); if(strcmp(error_string, "no error") != 0) { printf("error download fv_cpu\n"); #ifdef LOGS if (!generate) log_error_detail("error download fv_cpu"); end_log_file(); #endif exit(1); } #pragma omp parallel for for(i=0; i<dim_cpu.space_elem; i=i+1) { if(*((half_float::half*)&(fv_cpu_GOLD[i].v)) != *((half_float::half*)&(fv_cpu[i].v))) { thread_error++; } if(*((half_float::half*)&(fv_cpu_GOLD[i].x)) != *((half_float::half*)&(fv_cpu[i].x))) { thread_error++; } if(*((half_float::half*)&(fv_cpu_GOLD[i].y)) != *((half_float::half*)&(fv_cpu[i].y))) { thread_error++; } if(*((half_float::half*)&(fv_cpu_GOLD[i].z)) != *((half_float::half*)&(fv_cpu[i].z))) { thread_error++; } if (thread_error > 0) { #pragma omp critical { kernel_errors++; snprintf(error_detail, 300, "stream: %d, p: [%d], ea: %d, v_r: %1.16e, v_e: %1.16e, x_r: %1.16e, x_e: %1.16e, y_r: %1.16e, y_e: %1.16e, z_r: %1.16e, z_e: %1.16e\n", streamIdx, \ i, thread_error, (float)*((half_float::half*)&(fv_cpu[i].v)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].v)), (float)*((half_float::half*)&(fv_cpu[i].x)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].x)), (float)*((half_float::half*)&(fv_cpu[i].y)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].y)), (float)*((half_float::half*)&(fv_cpu[i].z)), (float)*((half_float::half*)&(fv_cpu_GOLD[i].z))); if (kernel_errors<25) printf("ERROR: %s\n", error_detail); if (kernel_errors>=25 && kernel_errors % 25 == 0) printf("!"); #ifdef LOGS if (!generate) log_error_detail(error_detail); #endif thread_error = 0; } } } } #ifdef LOGS if (!generate) log_error_count(kernel_errors); #endif if (verbose) printf("[Iteration #%i] Gold check time: %f\n", loop, mysecond() - timestamp); } //================= PERF // iterate for each neighbor of a box (number_nn) double flop = number_nn; // The last for iterate NUMBER_PAR_PER_BOX times flop *= NUMBER_PAR_PER_BOX; // the last for uses 46 operations plus 2 exp() functions flop *=46; flop *= nstreams; double flops = (double)flop/kernel_time; double outputpersec = (double)dim_cpu.space_elem * 4 * nstreams / kernel_time; if (verbose) printf("[Iteration #%i] BOXES:%d BLOCK:%d OUTPUT/S:%.2f FLOPS:%.2f (GFLOPS:%.2f)\n", loop, dim_cpu.boxes1d_arg, NUMBER_THREADS, outputpersec, flops, flops/1000000000); if (verbose) printf("[Iteration #%i] kernel_time:%f\n", loop, kernel_time); //===================== printf("."); fflush(stdout); //===================================================================== // GPU MEMORY DEALLOCATION //===================================================================== for (streamIdx = 0; streamIdx < nstreams; streamIdx++) { cudaFree(d_rv_gpu[streamIdx]); cudaFree(d_qv_gpu[streamIdx]); cudaFree(d_fv_gpu[streamIdx]); cudaFree(d_box_gpu[streamIdx]); } //===================================================================== // SYSTEM MEMORY DEALLOCATION //===================================================================== free(fv_cpu); if (verbose) printf("[Iteration #%i] Elapsed time: %.4fs\n", loop, mysecond()-globaltimer); } if (!generate) free(fv_cpu_GOLD); free(rv_cpu); free(qv_cpu); free(box_cpu); printf("\n"); #ifdef LOGS if (!generate) end_log_file(); #endif return 0; }
43a361247a5a6067c76af68cda494cf52d8ccdd8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> #include "rocblas.h" #define DGEMM dgemm_ #define DSPEV dspev_ #define PRINTF printf #define EXIT exit #define CLOCKS_PER_SEC_C 1000000 #define MAXTIME 2147.48 #define MAX_BLOCKS 65521 // Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) #define THREADS_PER_BLOCK 1024 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void tolerance_check(double *tol_now, double *d_Titer, double *d_mat3, int nstate_sq); __global__ void initialize_mat1(double * mat1, double * S, int nstate_sq); __global__ void initialize_identity(double * mat, double scalar, int nstate); void cputime(double *); void get_iter_Tmat(double *,double *,int ); void get_diag_Tmat(double *,double *,int ); void get_unit_Tmat(double *,int ); extern "C" { void DGEMM (char *, char *, int *, int *, int *,double *,double *, int *, double *, int *, double *, double *, int * ); } void matmul(double *X, int *LDX, int *ITYPE_X, double *Y, int *LDY, int *ITYPE_Y, double *Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA) { int m = *NRZ; int n = *NCZ; int k = *NXY; //char MATX=(ITYPE_X) ? 'N' : 'T'; //char MATY=(ITYPE_Y) ? 'N' : 'T'; // DGEMM(&MATX,&MATY,NRZ,NCZ,NXY,ALPHA,X,LDX,Y,LDY,BETA,Z,LDZ); hipblasOperation_t MATX = (ITYPE_X) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t MATY = (ITYPE_Y) ? HIPBLAS_OP_N : HIPBLAS_OP_T; // hipError_t cudaStat; // hipMalloc status // hipblasStatus_t stat; // CUBLAS functions status hipblasHandle_t handle; // CUBLAS context // Step 1: Allocate memory on the device: double *d_X, *d_Y, *d_Z; hipMalloc(&d_X, (m*k)*sizeof(double)); // X is an m x k matrix hipMalloc(&d_Y, (k*n)*sizeof(double)); // Y is a k X n matix hipMalloc(&d_Z, (m*n)*sizeof(double)); // Z is an m x n matix hipblasCreate(&handle); // initialize CUBLAS context // Step 2: Initailize device memory from host: hipblasSetMatrix(m, k, sizeof(double), X, m, d_X, m); hipblasSetMatrix(k, n, sizeof(double), Y, k, d_Y, k); hipblasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m); // Step 3: Perform operation, function launches kernel on GPU itself hipblasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); // Step 4: Copy the result back to the host: hipblasGetMatrix(m, n, sizeof(double), d_Z, m, Z, m); // Step 5: Clean up hipFree(d_X); hipFree(d_Y); hipFree(d_Z); hipblasDestroy(handle); } //DGEMM ( TRANSA, TRANSB, M, N, K, ALPHA, A, LDA, B, LDB, BETA, C, LDC ) /* hipblasStatus_t hipblasDgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) */ void device_matmul(double *d_X, int *LDX, int *ITYPE_X, double *d_Y, int *LDY, int *ITYPE_Y, double *d_Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA, hipblasHandle_t handle) { int m = *NRZ; int n = *NCZ; int k = *NXY; hipblasOperation_t MATX = (ITYPE_X) ? HIPBLAS_OP_N : HIPBLAS_OP_T; hipblasOperation_t MATY = (ITYPE_Y) ? HIPBLAS_OP_N : HIPBLAS_OP_T; //hipblasHandle_t handle; // CUBLAS context hipblasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); } #define _USE_LAPACK_ #ifdef _USE_LAPACK_ extern "C" {void DSPEV(char *, char *, int *, double [], double [], double [], int *, double [], int *);} #endif //======================================================================= //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //======================================================================= int main() //======================================================================= {// begin routine //======================================================================= // I) Set up the problem int nstate; PRINTF("\n============================================\n"); PRINTF("Enter the matrix size : "); if(scanf("%d",&nstate)){} else{printf("%s\n", "Please enter an integer");} int nstate_sq = nstate*nstate; double *S = new double[nstate_sq]; double *Tunit = new double[nstate_sq]; double *Tdiag = new double[nstate_sq]; double *Titer = new double[nstate_sq]; PRINTF("Using random input\n\n"); for(int i=0;i<nstate_sq;i++){S[i]=0.0;} for(int i=0;i<nstate;i++){int ind =i+nstate*i;S[ind]=2.0;} double seed=14571.0; srand48((long) seed); for(int i=0;i<nstate;i++){ for(int j=i;i<nstate;i++){ int ind = i+nstate*j; int indt = j+nstate*i; //int ierr=0; // n=1; -------- *** Not used ? double rand=drand48(); S[ind] += (rand-0.5)*2.0e-3; S[indt] = S[ind]; }}//endfor //======================================================================= // II) Try three methods // get_unit_Tmat(Tunit,nstate); // get_diag_Tmat(S,Tdiag,nstate); // get_iter_Tmat(S,Titer,nstate); // get_iter_Tmat(S,Titer,nstate); get_iter_Tmat(S,Titer,nstate); //======================================================================= // III) Check the error of the iterative method double err=0.0; for(int i=0;i<nstate_sq;i++){ double tmp=Tdiag[i]-Titer[i]; tmp = tmp*tmp; err = (err > tmp ? err : tmp); }//endfor err = sqrt(err); PRINTF("Maximum error in any element : %g\n",err); err=0.0; for(int i=0;i<nstate;i++){ for(int j=i;j<nstate;j++){ int ind = i + j*nstate; int indt = j + i*nstate; double tmp=Titer[ind]-Titer[indt]; tmp = tmp*tmp; err = (err > tmp ? err : tmp); }}//endfor err = sqrt(err); PRINTF("Deviation from symmetric : %g\n",err); PRINTF("============================================\n\n"); //======================================================================= }//end routine //======================================================================= //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Diagonalize S and construct T=S^{-1/2} using eigenvalues and eigenvectors //============================================================================ void get_diag_Tmat(double *S,double *T,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch double cpu1,cpu2; cputime(&cpu1); int nstate_sq = nstate*nstate; double *umat = new double[nstate_sq]; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *s_eigs = new double[nstate]; double *scr1 = new double[3*nstate]; double *scr2 = new double[3*nstate]; //========================================================================== // II. Diagonalize S using rs_ FORTRAN diagonalization routine int ifound = 0; int ierr = 0; //---------------------------------------------------------------------- // Use LAPACK : Captain Jack is Happy. #ifdef _USE_LAPACK_ ifound ++; for(int i = 1; i <= nstate; i++){ for(int j = 1; j <= i; j++){ int ind = (i-1) + (j-1)*nstate; int ind2 = (i-1) + (j-1)*(2*nstate-j)/2; scr_mat1[ind2] = S[ind]; }}//endfor char Vstuff ='V'; char Lstuff ='L'; DSPEV(&Vstuff,&Lstuff,&nstate,scr_mat1,s_eigs,umat,&nstate,scr1,&ierr); #endif if(ifound!=1 || ierr != 0){ PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); PRINTF("Error trying to diagonalize S : %d %d\n",ifound,ierr); PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); EXIT(1); }//endif //========================================================================== // III. Compute inverse square root of eigenvalues: Occupation numbers // are HACKED!!!!! //---------------------------------------------------------------------- // A) Construct diagonal matrix using eigenvalues : sqrt(2/lamba) for(int i = 0; i < nstate; i++){s_eigs[i] = sqrt(2.0/s_eigs[i]);} memset(scr_mat1,0,sizeof(double)*nstate_sq); for(int i = 0; i < nstate; i++){ int ind = i*nstate+i; scr_mat1[ind]=s_eigs[i]; }/* endfor */ //------------------------------------------------------------------------ // B) Transform matrix back to original representation using eigenvectors double alpha = 1.0; double beta = 0.0; int itransp = 0; int inorm = 1; matmul(scr_mat1,&nstate,&inorm,umat,&nstate,&itransp,scr_mat2, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); matmul(umat,&nstate,&inorm,scr_mat2,&nstate,&inorm,T, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); //============================================================================ // IV) Free allocated temporary memory delete [] umat; delete [] scr_mat1; delete [] scr_mat2; delete [] s_eigs; delete [] scr1; delete [] scr2; cputime(&cpu2); PRINTF("nstate %d : cpu time diag : %g\n\n",nstate,cpu2-cpu1); //============================================================================ } /* End function */ //============================================================================ //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Set Tmax to the Unit matrix : remove cputime overhead of diag to test // parallel performance //============================================================================ void get_unit_Tmat(double *Tunit,int nstate){ int nstate_sq = nstate*nstate; memset(Tunit,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;Tunit[ind] = 1.0;} } //============================================================================ /*==========================================================================*/ /*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/ /*==========================================================================*/ /* Kernel to check tolerance directly on device */ /*==========================================================================*/ __global__ void tolerance_check(double *d_tol_now, double *d_Titer, double *d_mat3, int nstate_sq) //============================================================================ {//begin routine //============================================================================ // I) Find location int global_idx = threadIdx.x + blockDim.x * blockIdx.x; int local_idx = threadIdx.x; // II) d_mat3 = (d_mat3 - d_Titer) ^ 2 if(global_idx < nstate_sq){ // Needed in case last block is not full double tmp = d_mat3[global_idx] - d_Titer[global_idx]; d_mat3[global_idx] = tmp * tmp; } __syncthreads(); // Every thread will execute this unconditionally // III) Reduce any sized array if(global_idx < nstate_sq){ // Needed in case last block is not full unsigned int length = THREADS_PER_BLOCK, next_length, block2_size, global_start_idx, global_split_idx = 0, global_end_idx; if((blockIdx.x + 1) == gridDim.x) { // This is the last block - may not be full length = nstate_sq - THREADS_PER_BLOCK * (gridDim.x - 1); // THREADS_PER_BLOCK == blockDim.x } for( ; length != 1; length = next_length){ // Ultimately we want host to do ~ 2 ^ 13 work, here it does 2^10 next_length = (length + 1) / 2; global_start_idx = blockDim.x * blockIdx.x; global_split_idx = global_start_idx + next_length; global_end_idx = global_start_idx + length; block2_size = length / 2; //length - next_length; if(global_split_idx <= global_idx && global_idx < global_end_idx){ d_mat3[global_idx - block2_size] += d_mat3[global_idx]; } __syncthreads(); // FIX : find way to have every thread execute this unconditionally } // IV) Only 0 thread from each block copies result back if(local_idx == 0){ d_tol_now[blockIdx.x] = d_mat3[global_idx]; } } } /*==========================================================================*/ __global__ void initialize_mat1(double * mat1, double * S, int nstate_sq) { int global_idx = threadIdx.x + blockDim.x * blockIdx.x; if(global_idx < nstate_sq){ mat1[global_idx] = S[global_idx] / 2.0; } } __global__ void initialize_identity(double * mat, double scalar, int nstate) { int i, j, global_idx = threadIdx.x + blockDim.x * blockIdx.x; i = global_idx % nstate; j = global_idx / nstate; if(global_idx < (nstate * nstate)){ if(i == j){ mat[global_idx] = scalar; // THIS FUNCTION IS ONLY CHANGING THE MAIN DIAGONAL, NEED TO BE SURE THE REST ARE 0 using blank memory } else{ mat[global_idx] = 0; } } } //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Schulz iteration for inverse sqrt root : quadratic convergence! //============================================================================ void get_iter_Tmat(double *S,double *Titer,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch on the host double cpu1,cpu2,cpu3,cpu4, mult1_start, mult1_end, mult2_start, mult2_end, mult3_start, mult3_end, total1=0, total2=0, total3=0, cpu5, cpu6; cputime(&cpu1); int nstate_sq = nstate*nstate; double *tol_now_ptr = new double[MAX_BLOCKS]; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *scr_mat3 = new double[nstate_sq]; // DO NOT NEED THESE ANYMORE, REMOVE TO TEST TIMING //============================================================================ // II) Set up CUBLAS context // hipError_t cudaStat; // hipMalloc status // hipblasStatus_t stat; // CUBLAS functions status hipblasHandle_t handle; // CUBLAS context //============================================================================ // III) Allocate memory on the device double *d_Titer, *d_mat1, *d_mat2, *d_mat3, *d_tol_now; hipMalloc(&d_Titer, nstate_sq*sizeof(double)); hipMalloc(&d_mat1, nstate_sq*sizeof(double)); hipMalloc(&d_mat2, nstate_sq*sizeof(double)); hipMalloc(&d_mat3, nstate_sq*sizeof(double)); hipMalloc(&d_tol_now, MAX_BLOCKS*sizeof(double)); hipblasCreate(&handle); // initialize CUBLAS context //============================================================================ // IV) Schulz iteration //-------------------------------------------------------------------- // A) Initailize d_mat1 and d_Titer on device cputime(&cpu3); // d_mat1 = S/2 hipblasSetMatrix(nstate, nstate, sizeof(double), S, nstate, d_mat2, nstate); hipLaunchKernelGGL(( initialize_mat1), dim3((nstate_sq+1023)/1024), dim3(1024), 0, 0, d_mat1, d_mat2, nstate_sq); // d_Titer = I = unit matrix hipLaunchKernelGGL(( initialize_identity), dim3((nstate_sq+1023)/1024), dim3(1024), 0, 0, d_Titer, 1.0, nstate); //-------------------------------------------------------------------- // B) Iterate cputime(&cpu5); int itransp = 0; int inorm = 1; double alpha0 = -1.0; double beta0 = 1.0; double alpha1 = 0.5; double beta1 = 0.0; int iter = 0; double tol_now = 1.0; while (tol_now > 1.0e-15 && iter<10){ iter++; //-------------------------------- // d_mat2 = 3*I - d_Titer*d_mat1 hipLaunchKernelGGL(( initialize_identity), dim3((nstate_sq+1023)/1024), dim3(1024), 0, 0, d_mat2, 3.0, nstate); cputime(&mult1_start); device_matmul(d_Titer,&nstate,&inorm,d_mat1,&nstate,&itransp,d_mat2, &nstate,&nstate,&nstate,&nstate,&alpha0,&beta0,handle); hipDeviceSynchronize(); cputime(&mult1_end); //-------------------------------- // d_mat1 = 0.5*d_mat1*d_mat2 = 0.5*d_mat3*d_mat2 // Run this step concurently with the next step ? hipMemcpy(d_mat3,d_mat1,nstate_sq*sizeof(double),hipMemcpyDeviceToDevice); cputime(&mult2_start); device_matmul(d_mat3,&nstate,&inorm,d_mat2,&nstate,&itransp,d_mat1, &nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle); hipDeviceSynchronize(); cputime(&mult2_end); //-------------------------------- // d_Titer = 0.5*d_mat2*d_Titer = 0.5*d_mat2*d_mat3 // if(iter >= 4){ hipMemcpy(d_mat3,d_Titer,nstate_sq*sizeof(double),hipMemcpyDeviceToDevice); // Only needed for tolerance check //} cputime(&mult3_start); device_matmul(d_mat2,&nstate,&inorm,d_mat3,&nstate,&itransp,d_Titer, &nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle); hipDeviceSynchronize(); cputime(&mult3_end); total1 += mult1_end - mult1_start; total2 += mult2_end - mult2_start; total3 += mult3_end - mult3_start; //-------------------------------- // Launch kernel to check tolerance only if iter >= 4 if(iter >= 4){ int grid_size = (nstate_sq + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK; hipLaunchKernelGGL(( tolerance_check), dim3(grid_size), dim3(THREADS_PER_BLOCK), 0, 0, d_tol_now, d_Titer, d_mat3, nstate_sq); hipMemcpy(tol_now_ptr, d_tol_now, grid_size*sizeof(double), hipMemcpyDeviceToHost); tol_now = 0.0; for(int i = 0; i < grid_size; i ++){ tol_now += tol_now_ptr[i]; } tol_now = sqrt(tol_now / nstate_sq); PRINTF("iter %d : tol %g\n",iter,tol_now); } }//endwhile cputime(&cpu6); if(tol_now>1.0e-15){ PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); PRINTF("Iterative computation of S^{-1/2} failed\n"); PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); EXIT(1); }//endif /*==========================================================================*/ // V) Copy the result back to the host hipblasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate); cputime(&cpu4); /*==========================================================================*/ // VI) Clean up device hipFree(d_Titer); hipFree(d_mat1); hipFree(d_mat2); hipFree(d_mat3); hipFree(d_tol_now); hipblasDestroy(handle); // VII) Clean up host delete [] tol_now_ptr; delete [] scr_mat1; delete [] scr_mat2; delete [] scr_mat3; cputime(&cpu2); printf("time in while loop %g : time copying result at end %g : initailization time %g : mallocing time %g\n", cpu6-cpu5, cpu4-cpu6, cpu5-cpu3, cpu3-cpu1); printf("mult1 %g : mult2 %g :mult3 %g\n", total1, total2, total3); PRINTF("nstate %d : cpu time iter : %g cpu time without hipMalloc or hipFree : %g\n\n",nstate,cpu2-cpu1, cpu4-cpu3); /*==========================================================================*/ }//end routine /*==========================================================================*/ /*==========================================================================*/ /*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/ /*==========================================================================*/ /* subroutine to time processes */ /*==========================================================================*/ void cputime(double *time) /*==========================================================================*/ { int itime; static double to=0.,tn=0.; itime = clock(); tn = (double)((double)itime/(double)CLOCKS_PER_SEC_C); *time = tn; if(tn >= 0 && to >= 0){*time=tn;} if(tn < 0 && to >= 0){*time=MAXTIME*2.0+tn;} if(tn >= 0 && to < 0){*time=tn+MAXTIME;} if(tn < 0 && to < 0){*time=MAXTIME+tn;} to = tn; } /*==========================================================================*/
43a361247a5a6067c76af68cda494cf52d8ccdd8.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <cuda_runtime.h> #include "cublas_v2.h" #define DGEMM dgemm_ #define DSPEV dspev_ #define PRINTF printf #define EXIT exit #define CLOCKS_PER_SEC_C 1000000 #define MAXTIME 2147.48 #define MAX_BLOCKS 65521 // Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) #define THREADS_PER_BLOCK 1024 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void tolerance_check(double *tol_now, double *d_Titer, double *d_mat3, int nstate_sq); __global__ void initialize_mat1(double * mat1, double * S, int nstate_sq); __global__ void initialize_identity(double * mat, double scalar, int nstate); void cputime(double *); void get_iter_Tmat(double *,double *,int ); void get_diag_Tmat(double *,double *,int ); void get_unit_Tmat(double *,int ); extern "C" { void DGEMM (char *, char *, int *, int *, int *,double *,double *, int *, double *, int *, double *, double *, int * ); } void matmul(double *X, int *LDX, int *ITYPE_X, double *Y, int *LDY, int *ITYPE_Y, double *Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA) { int m = *NRZ; int n = *NCZ; int k = *NXY; //char MATX=(ITYPE_X) ? 'N' : 'T'; //char MATY=(ITYPE_Y) ? 'N' : 'T'; // DGEMM(&MATX,&MATY,NRZ,NCZ,NXY,ALPHA,X,LDX,Y,LDY,BETA,Z,LDZ); cublasOperation_t MATX = (ITYPE_X) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t MATY = (ITYPE_Y) ? CUBLAS_OP_N : CUBLAS_OP_T; // cudaError_t cudaStat; // cudaMalloc status // cublasStatus_t stat; // CUBLAS functions status cublasHandle_t handle; // CUBLAS context // Step 1: Allocate memory on the device: double *d_X, *d_Y, *d_Z; cudaMalloc(&d_X, (m*k)*sizeof(double)); // X is an m x k matrix cudaMalloc(&d_Y, (k*n)*sizeof(double)); // Y is a k X n matix cudaMalloc(&d_Z, (m*n)*sizeof(double)); // Z is an m x n matix cublasCreate(&handle); // initialize CUBLAS context // Step 2: Initailize device memory from host: cublasSetMatrix(m, k, sizeof(double), X, m, d_X, m); cublasSetMatrix(k, n, sizeof(double), Y, k, d_Y, k); cublasSetMatrix(m, n, sizeof(double), Z, m, d_Z, m); // Step 3: Perform operation, function launches kernel on GPU itself cublasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); // Step 4: Copy the result back to the host: cublasGetMatrix(m, n, sizeof(double), d_Z, m, Z, m); // Step 5: Clean up cudaFree(d_X); cudaFree(d_Y); cudaFree(d_Z); cublasDestroy(handle); } //DGEMM ( TRANSA, TRANSB, M, N, K, ALPHA, A, LDA, B, LDB, BETA, C, LDC ) /* cublasStatus_t cublasDgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc) */ void device_matmul(double *d_X, int *LDX, int *ITYPE_X, double *d_Y, int *LDY, int *ITYPE_Y, double *d_Z, int *LDZ, int *NRZ, int *NCZ, int *NXY, double *ALPHA, double *BETA, cublasHandle_t handle) { int m = *NRZ; int n = *NCZ; int k = *NXY; cublasOperation_t MATX = (ITYPE_X) ? CUBLAS_OP_N : CUBLAS_OP_T; cublasOperation_t MATY = (ITYPE_Y) ? CUBLAS_OP_N : CUBLAS_OP_T; //cublasHandle_t handle; // CUBLAS context cublasDgemm(handle, MATX, MATY, m, n, k, ALPHA, d_X, m, d_Y, k, BETA, d_Z, m); } #define _USE_LAPACK_ #ifdef _USE_LAPACK_ extern "C" {void DSPEV(char *, char *, int *, double [], double [], double [], int *, double [], int *);} #endif //======================================================================= //ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //======================================================================= int main() //======================================================================= {// begin routine //======================================================================= // I) Set up the problem int nstate; PRINTF("\n============================================\n"); PRINTF("Enter the matrix size : "); if(scanf("%d",&nstate)){} else{printf("%s\n", "Please enter an integer");} int nstate_sq = nstate*nstate; double *S = new double[nstate_sq]; double *Tunit = new double[nstate_sq]; double *Tdiag = new double[nstate_sq]; double *Titer = new double[nstate_sq]; PRINTF("Using random input\n\n"); for(int i=0;i<nstate_sq;i++){S[i]=0.0;} for(int i=0;i<nstate;i++){int ind =i+nstate*i;S[ind]=2.0;} double seed=14571.0; srand48((long) seed); for(int i=0;i<nstate;i++){ for(int j=i;i<nstate;i++){ int ind = i+nstate*j; int indt = j+nstate*i; //int ierr=0; // n=1; -------- *** Not used ? double rand=drand48(); S[ind] += (rand-0.5)*2.0e-3; S[indt] = S[ind]; }}//endfor //======================================================================= // II) Try three methods // get_unit_Tmat(Tunit,nstate); // get_diag_Tmat(S,Tdiag,nstate); // get_iter_Tmat(S,Titer,nstate); // get_iter_Tmat(S,Titer,nstate); get_iter_Tmat(S,Titer,nstate); //======================================================================= // III) Check the error of the iterative method double err=0.0; for(int i=0;i<nstate_sq;i++){ double tmp=Tdiag[i]-Titer[i]; tmp = tmp*tmp; err = (err > tmp ? err : tmp); }//endfor err = sqrt(err); PRINTF("Maximum error in any element : %g\n",err); err=0.0; for(int i=0;i<nstate;i++){ for(int j=i;j<nstate;j++){ int ind = i + j*nstate; int indt = j + i*nstate; double tmp=Titer[ind]-Titer[indt]; tmp = tmp*tmp; err = (err > tmp ? err : tmp); }}//endfor err = sqrt(err); PRINTF("Deviation from symmetric : %g\n",err); PRINTF("============================================\n\n"); //======================================================================= }//end routine //======================================================================= //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Diagonalize S and construct T=S^{-1/2} using eigenvalues and eigenvectors //============================================================================ void get_diag_Tmat(double *S,double *T,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch double cpu1,cpu2; cputime(&cpu1); int nstate_sq = nstate*nstate; double *umat = new double[nstate_sq]; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *s_eigs = new double[nstate]; double *scr1 = new double[3*nstate]; double *scr2 = new double[3*nstate]; //========================================================================== // II. Diagonalize S using rs_ FORTRAN diagonalization routine int ifound = 0; int ierr = 0; //---------------------------------------------------------------------- // Use LAPACK : Captain Jack is Happy. #ifdef _USE_LAPACK_ ifound ++; for(int i = 1; i <= nstate; i++){ for(int j = 1; j <= i; j++){ int ind = (i-1) + (j-1)*nstate; int ind2 = (i-1) + (j-1)*(2*nstate-j)/2; scr_mat1[ind2] = S[ind]; }}//endfor char Vstuff ='V'; char Lstuff ='L'; DSPEV(&Vstuff,&Lstuff,&nstate,scr_mat1,s_eigs,umat,&nstate,scr1,&ierr); #endif if(ifound!=1 || ierr != 0){ PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); PRINTF("Error trying to diagonalize S : %d %d\n",ifound,ierr); PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); EXIT(1); }//endif //========================================================================== // III. Compute inverse square root of eigenvalues: Occupation numbers // are HACKED!!!!! //---------------------------------------------------------------------- // A) Construct diagonal matrix using eigenvalues : sqrt(2/lamba) for(int i = 0; i < nstate; i++){s_eigs[i] = sqrt(2.0/s_eigs[i]);} memset(scr_mat1,0,sizeof(double)*nstate_sq); for(int i = 0; i < nstate; i++){ int ind = i*nstate+i; scr_mat1[ind]=s_eigs[i]; }/* endfor */ //------------------------------------------------------------------------ // B) Transform matrix back to original representation using eigenvectors double alpha = 1.0; double beta = 0.0; int itransp = 0; int inorm = 1; matmul(scr_mat1,&nstate,&inorm,umat,&nstate,&itransp,scr_mat2, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); matmul(umat,&nstate,&inorm,scr_mat2,&nstate,&inorm,T, &nstate,&nstate,&nstate,&nstate,&alpha,&beta); //============================================================================ // IV) Free allocated temporary memory delete [] umat; delete [] scr_mat1; delete [] scr_mat2; delete [] s_eigs; delete [] scr1; delete [] scr2; cputime(&cpu2); PRINTF("nstate %d : cpu time diag : %g\n\n",nstate,cpu2-cpu1); //============================================================================ } /* End function */ //============================================================================ //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Set Tmax to the Unit matrix : remove cputime overhead of diag to test // parallel performance //============================================================================ void get_unit_Tmat(double *Tunit,int nstate){ int nstate_sq = nstate*nstate; memset(Tunit,0,nstate_sq*sizeof(double)); for(int i=0;i<nstate;i++){int ind = i+i*nstate;Tunit[ind] = 1.0;} } //============================================================================ /*==========================================================================*/ /*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/ /*==========================================================================*/ /* Kernel to check tolerance directly on device */ /*==========================================================================*/ __global__ void tolerance_check(double *d_tol_now, double *d_Titer, double *d_mat3, int nstate_sq) //============================================================================ {//begin routine //============================================================================ // I) Find location int global_idx = threadIdx.x + blockDim.x * blockIdx.x; int local_idx = threadIdx.x; // II) d_mat3 = (d_mat3 - d_Titer) ^ 2 if(global_idx < nstate_sq){ // Needed in case last block is not full double tmp = d_mat3[global_idx] - d_Titer[global_idx]; d_mat3[global_idx] = tmp * tmp; } __syncthreads(); // Every thread will execute this unconditionally // III) Reduce any sized array if(global_idx < nstate_sq){ // Needed in case last block is not full unsigned int length = THREADS_PER_BLOCK, next_length, block2_size, global_start_idx, global_split_idx = 0, global_end_idx; if((blockIdx.x + 1) == gridDim.x) { // This is the last block - may not be full length = nstate_sq - THREADS_PER_BLOCK * (gridDim.x - 1); // THREADS_PER_BLOCK == blockDim.x } for( ; length != 1; length = next_length){ // Ultimately we want host to do ~ 2 ^ 13 work, here it does 2^10 next_length = (length + 1) / 2; global_start_idx = blockDim.x * blockIdx.x; global_split_idx = global_start_idx + next_length; global_end_idx = global_start_idx + length; block2_size = length / 2; //length - next_length; if(global_split_idx <= global_idx && global_idx < global_end_idx){ d_mat3[global_idx - block2_size] += d_mat3[global_idx]; } __syncthreads(); // FIX : find way to have every thread execute this unconditionally } // IV) Only 0 thread from each block copies result back if(local_idx == 0){ d_tol_now[blockIdx.x] = d_mat3[global_idx]; } } } /*==========================================================================*/ __global__ void initialize_mat1(double * mat1, double * S, int nstate_sq) { int global_idx = threadIdx.x + blockDim.x * blockIdx.x; if(global_idx < nstate_sq){ mat1[global_idx] = S[global_idx] / 2.0; } } __global__ void initialize_identity(double * mat, double scalar, int nstate) { int i, j, global_idx = threadIdx.x + blockDim.x * blockIdx.x; i = global_idx % nstate; j = global_idx / nstate; if(global_idx < (nstate * nstate)){ if(i == j){ mat[global_idx] = scalar; // THIS FUNCTION IS ONLY CHANGING THE MAIN DIAGONAL, NEED TO BE SURE THE REST ARE 0 using blank memory } else{ mat[global_idx] = 0; } } } //============================================================================ //cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc //============================================================================ // Schulz iteration for inverse sqrt root : quadratic convergence! //============================================================================ void get_iter_Tmat(double *S,double *Titer,int nstate) //============================================================================ {//begin routine //============================================================================ // I) Get some scratch on the host double cpu1,cpu2,cpu3,cpu4, mult1_start, mult1_end, mult2_start, mult2_end, mult3_start, mult3_end, total1=0, total2=0, total3=0, cpu5, cpu6; cputime(&cpu1); int nstate_sq = nstate*nstate; double *tol_now_ptr = new double[MAX_BLOCKS]; double *scr_mat1 = new double[nstate_sq]; double *scr_mat2 = new double[nstate_sq]; double *scr_mat3 = new double[nstate_sq]; // DO NOT NEED THESE ANYMORE, REMOVE TO TEST TIMING //============================================================================ // II) Set up CUBLAS context // cudaError_t cudaStat; // cudaMalloc status // cublasStatus_t stat; // CUBLAS functions status cublasHandle_t handle; // CUBLAS context //============================================================================ // III) Allocate memory on the device double *d_Titer, *d_mat1, *d_mat2, *d_mat3, *d_tol_now; cudaMalloc(&d_Titer, nstate_sq*sizeof(double)); cudaMalloc(&d_mat1, nstate_sq*sizeof(double)); cudaMalloc(&d_mat2, nstate_sq*sizeof(double)); cudaMalloc(&d_mat3, nstate_sq*sizeof(double)); cudaMalloc(&d_tol_now, MAX_BLOCKS*sizeof(double)); cublasCreate(&handle); // initialize CUBLAS context //============================================================================ // IV) Schulz iteration //-------------------------------------------------------------------- // A) Initailize d_mat1 and d_Titer on device cputime(&cpu3); // d_mat1 = S/2 cublasSetMatrix(nstate, nstate, sizeof(double), S, nstate, d_mat2, nstate); initialize_mat1<<<(nstate_sq+1023)/1024, 1024>>>(d_mat1, d_mat2, nstate_sq); // d_Titer = I = unit matrix initialize_identity<<<(nstate_sq+1023)/1024, 1024>>>(d_Titer, 1.0, nstate); //-------------------------------------------------------------------- // B) Iterate cputime(&cpu5); int itransp = 0; int inorm = 1; double alpha0 = -1.0; double beta0 = 1.0; double alpha1 = 0.5; double beta1 = 0.0; int iter = 0; double tol_now = 1.0; while (tol_now > 1.0e-15 && iter<10){ iter++; //-------------------------------- // d_mat2 = 3*I - d_Titer*d_mat1 initialize_identity<<<(nstate_sq+1023)/1024, 1024>>>(d_mat2, 3.0, nstate); cputime(&mult1_start); device_matmul(d_Titer,&nstate,&inorm,d_mat1,&nstate,&itransp,d_mat2, &nstate,&nstate,&nstate,&nstate,&alpha0,&beta0,handle); cudaThreadSynchronize(); cputime(&mult1_end); //-------------------------------- // d_mat1 = 0.5*d_mat1*d_mat2 = 0.5*d_mat3*d_mat2 // Run this step concurently with the next step ? cudaMemcpy(d_mat3,d_mat1,nstate_sq*sizeof(double),cudaMemcpyDeviceToDevice); cputime(&mult2_start); device_matmul(d_mat3,&nstate,&inorm,d_mat2,&nstate,&itransp,d_mat1, &nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle); cudaThreadSynchronize(); cputime(&mult2_end); //-------------------------------- // d_Titer = 0.5*d_mat2*d_Titer = 0.5*d_mat2*d_mat3 // if(iter >= 4){ cudaMemcpy(d_mat3,d_Titer,nstate_sq*sizeof(double),cudaMemcpyDeviceToDevice); // Only needed for tolerance check //} cputime(&mult3_start); device_matmul(d_mat2,&nstate,&inorm,d_mat3,&nstate,&itransp,d_Titer, &nstate,&nstate,&nstate,&nstate,&alpha1,&beta1,handle); cudaThreadSynchronize(); cputime(&mult3_end); total1 += mult1_end - mult1_start; total2 += mult2_end - mult2_start; total3 += mult3_end - mult3_start; //-------------------------------- // Launch kernel to check tolerance only if iter >= 4 if(iter >= 4){ int grid_size = (nstate_sq + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK; tolerance_check<<<grid_size, THREADS_PER_BLOCK>>>(d_tol_now, d_Titer, d_mat3, nstate_sq); cudaMemcpy(tol_now_ptr, d_tol_now, grid_size*sizeof(double), cudaMemcpyDeviceToHost); tol_now = 0.0; for(int i = 0; i < grid_size; i ++){ tol_now += tol_now_ptr[i]; } tol_now = sqrt(tol_now / nstate_sq); PRINTF("iter %d : tol %g\n",iter,tol_now); } }//endwhile cputime(&cpu6); if(tol_now>1.0e-15){ PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); PRINTF("Iterative computation of S^{-1/2} failed\n"); PRINTF("@@@@@@@@@@@@@@@@@@@@_error_@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n"); EXIT(1); }//endif /*==========================================================================*/ // V) Copy the result back to the host cublasGetMatrix(nstate, nstate, sizeof(double), d_Titer, nstate, Titer, nstate); cputime(&cpu4); /*==========================================================================*/ // VI) Clean up device cudaFree(d_Titer); cudaFree(d_mat1); cudaFree(d_mat2); cudaFree(d_mat3); cudaFree(d_tol_now); cublasDestroy(handle); // VII) Clean up host delete [] tol_now_ptr; delete [] scr_mat1; delete [] scr_mat2; delete [] scr_mat3; cputime(&cpu2); printf("time in while loop %g : time copying result at end %g : initailization time %g : mallocing time %g\n", cpu6-cpu5, cpu4-cpu6, cpu5-cpu3, cpu3-cpu1); printf("mult1 %g : mult2 %g :mult3 %g\n", total1, total2, total3); PRINTF("nstate %d : cpu time iter : %g cpu time without cudaMalloc or cudaFree : %g\n\n",nstate,cpu2-cpu1, cpu4-cpu3); /*==========================================================================*/ }//end routine /*==========================================================================*/ /*==========================================================================*/ /*cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc*/ /*==========================================================================*/ /* subroutine to time processes */ /*==========================================================================*/ void cputime(double *time) /*==========================================================================*/ { int itime; static double to=0.,tn=0.; itime = clock(); tn = (double)((double)itime/(double)CLOCKS_PER_SEC_C); *time = tn; if(tn >= 0 && to >= 0){*time=tn;} if(tn < 0 && to >= 0){*time=MAXTIME*2.0+tn;} if(tn >= 0 && to < 0){*time=tn+MAXTIME;} if(tn < 0 && to < 0){*time=MAXTIME+tn;} to = tn; } /*==========================================================================*/
e737ca14dee21b34c5113dc97820c33bee95d7a4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <helper_functions.h> #include <time.h> #include <math.h> #define AVE_CHARGE 32 /* */ #define THREAD_SIZE_X 256 #define THREAD_SIZE_Y 32 #define THREAD_SIZE_Z 1 #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 8 #define BLOCK_SIZE_Z 1 #define thread_num THREAD_SIZE_X * THREAD_SIZE_Y * BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z #define list_num thread_num * AVE_CHARGE __global__ void prime_cal( unsigned int *device_result , unsigned int *start_num , unsigned int *end_num ); /* timer */ int timer(void){ time_t now = time(NULL); struct tm *pnow = localtime(&now); char buff[128]=""; sprintf(buff,"%d:%d:%d",pnow->tm_hour,pnow->tm_min,pnow->tm_sec); printf("%s\n",buff); return 0; } int main(int argc, char** argv){ FILE *outputfile; outputfile = fopen("./prime_data_cuda_uni.txt", "w"); if (outputfile == NULL) { printf("cannot open\n"); exit(1); } timer(); printf("Max : %d\n",list_num); printf("Thread : %d\n",thread_num); /**/ unsigned int str_size = list_num * sizeof(unsigned int); unsigned int thread_size = thread_num * sizeof(unsigned int); unsigned int *host_result; /**/ unsigned int *device_result; unsigned int *start_num; unsigned int *end_num; /* */ unsigned int start_host[thread_num] = {0}; unsigned int end_host[thread_num] = {0}; double charge = pow(list_num,1.5) / thread_num; start_host[0] = 2; end_host[0] = pow(charge ,0.667); for ( unsigned int i = 1 ; i < thread_num ; i++ ){ start_host[i] = end_host[i-1]+1; end_host[i] = pow( (charge + pow(start_host[i],1.5)) , 0.667 ); if ( end_host[i] > list_num ){ start_host[i]=0; end_host[i]=0; break; } } /**/ checkCudaErrors(hipMalloc((void**)&device_result, str_size)); checkCudaErrors(hipMalloc((void**)&start_num, str_size)); checkCudaErrors(hipMalloc((void**)&end_num, str_size)); checkCudaErrors(hipMemcpy(start_num, start_host, thread_size , hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(end_num, end_host, thread_size , hipMemcpyHostToDevice)); /**/ dim3 threads(THREAD_SIZE_X,THREAD_SIZE_Y,THREAD_SIZE_Z); dim3 blocks(BLOCK_SIZE_X,BLOCK_SIZE_Y,BLOCK_SIZE_Z); /**/ hipEvent_t start; hipEvent_t stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); checkCudaErrors(hipEventRecord(start, NULL)); /**/ hipLaunchKernelGGL(( prime_cal), dim3(blocks) , dim3(threads), 0, 0, device_result,start_num,end_num); hipDeviceSynchronize(); /**/ checkCudaErrors(hipEventRecord(stop, NULL)); checkCudaErrors(hipEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); printf("Processing time: %f (msec)\n", msecTotal); /**/ checkCudaErrors(hipEventRecord(start, NULL)); /**/ host_result = (unsigned int*)malloc(str_size); checkCudaErrors(hipMemcpy(host_result, device_result, str_size , hipMemcpyDeviceToHost)); /**/ checkCudaErrors(hipEventRecord(stop, NULL)); checkCudaErrors(hipEventSynchronize(stop)); checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop)); printf("Memory copy time: %f (msec)\n", msecTotal); printf("Now Writing...\n"); for( unsigned int l = 0; l < list_num ; l++ ){ if ( host_result[l] != 0 ){ fprintf(outputfile,"%u\n", host_result[l]); } } fclose(outputfile); /**/ free(host_result); checkCudaErrors(hipFree(device_result)); checkCudaErrors(hipFree(start_num)); checkCudaErrors(hipFree(end_num)); timer(); /**/ hipDeviceReset(); exit(1); } __global__ void prime_cal( unsigned int *device_result , unsigned int *start_num , unsigned int *end_num ){ unsigned int dev = 0; unsigned int flag = 0; unsigned int list = 0; /* */ for ( list = 0 ; list < list_num ; list++ ){ device_result[list] = 0; } /*ID*/ //unsigned int thread_x = blockIdx.x * blockDim.x + threadIdx.x; //unsigned int thread_y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; /**/ for ( unsigned int scan_idx = start_num[thread_id] ; scan_idx < end_num[thread_id] + 1 ; scan_idx++ ) { flag = 0; if ( scan_idx == 1 ){ device_result[scan_idx] = 0; }else if ( scan_idx == 2 ){ device_result[scan_idx] = 2; }else if ( scan_idx % 2 == 0 ){ device_result[scan_idx] = 0; }else{ dev = 3; while ( ( dev * dev ) <= scan_idx ){ if ( scan_idx % dev == 0 ){ flag=1; break; } dev += 2; } if (flag == 0){ device_result[scan_idx] = scan_idx; }else if (flag == 1){ device_result[scan_idx] = 0; } } __syncthreads(); } }
e737ca14dee21b34c5113dc97820c33bee95d7a4.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <helper_cuda.h> #include <helper_functions.h> #include <time.h> #include <math.h> #define AVE_CHARGE 32 /* 各スレッドの平均分担量 */ #define THREAD_SIZE_X 256 #define THREAD_SIZE_Y 32 #define THREAD_SIZE_Z 1 #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 8 #define BLOCK_SIZE_Z 1 #define thread_num THREAD_SIZE_X * THREAD_SIZE_Y * BLOCK_SIZE_X * BLOCK_SIZE_Y * BLOCK_SIZE_Z #define list_num thread_num * AVE_CHARGE __global__ void prime_cal( unsigned int *device_result , unsigned int *start_num , unsigned int *end_num ); /* timer */ int timer(void){ time_t now = time(NULL); struct tm *pnow = localtime(&now); char buff[128]=""; sprintf(buff,"%d:%d:%d",pnow->tm_hour,pnow->tm_min,pnow->tm_sec); printf("%s\n",buff); return 0; } int main(int argc, char** argv){ FILE *outputfile; outputfile = fopen("./prime_data_cuda_uni.txt", "w"); if (outputfile == NULL) { printf("cannot open\n"); exit(1); } timer(); printf("Max : %d\n",list_num); printf("Thread : %d\n",thread_num); /*ホスト側の変数設定*/ unsigned int str_size = list_num * sizeof(unsigned int); unsigned int thread_size = thread_num * sizeof(unsigned int); unsigned int *host_result; /*デバイス側の変数設定*/ unsigned int *device_result; unsigned int *start_num; unsigned int *end_num; /* 計算量が全スレッドで均等になるよう配分 */ unsigned int start_host[thread_num] = {0}; unsigned int end_host[thread_num] = {0}; double charge = pow(list_num,1.5) / thread_num; start_host[0] = 2; end_host[0] = pow(charge ,0.667); for ( unsigned int i = 1 ; i < thread_num ; i++ ){ start_host[i] = end_host[i-1]+1; end_host[i] = pow( (charge + pow(start_host[i],1.5)) , 0.667 ); if ( end_host[i] > list_num ){ start_host[i]=0; end_host[i]=0; break; } } /*デバイスメモリ領域の確保*/ checkCudaErrors(cudaMalloc((void**)&device_result, str_size)); checkCudaErrors(cudaMalloc((void**)&start_num, str_size)); checkCudaErrors(cudaMalloc((void**)&end_num, str_size)); checkCudaErrors(cudaMemcpy(start_num, start_host, thread_size , cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(end_num, end_host, thread_size , cudaMemcpyHostToDevice)); /*ブロックサイズとグリッドサイズの設定*/ dim3 threads(THREAD_SIZE_X,THREAD_SIZE_Y,THREAD_SIZE_Z); dim3 blocks(BLOCK_SIZE_X,BLOCK_SIZE_Y,BLOCK_SIZE_Z); /*タイマーを作成して計測開始*/ cudaEvent_t start; cudaEvent_t stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); checkCudaErrors(cudaEventRecord(start, NULL)); /*カーネルの起動*/ prime_cal<<<blocks , threads>>>(device_result,start_num,end_num); cudaThreadSynchronize(); /*タイマーを停止し実行時間を表示*/ checkCudaErrors(cudaEventRecord(stop, NULL)); checkCudaErrors(cudaEventSynchronize(stop)); float msecTotal = 0.0f; checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); printf("Processing time: %f (msec)\n", msecTotal); /*再度タイマー開始*/ checkCudaErrors(cudaEventRecord(start, NULL)); /*結果の領域確保とデバイス側からのメモリ転送*/ host_result = (unsigned int*)malloc(str_size); checkCudaErrors(cudaMemcpy(host_result, device_result, str_size , cudaMemcpyDeviceToHost)); /*タイマーを停止し実行時間を表示*/ checkCudaErrors(cudaEventRecord(stop, NULL)); checkCudaErrors(cudaEventSynchronize(stop)); checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop)); printf("Memory copy time: %f (msec)\n", msecTotal); printf("Now Writing...\n"); for( unsigned int l = 0; l < list_num ; l++ ){ if ( host_result[l] != 0 ){ fprintf(outputfile,"%u\n", host_result[l]); } } fclose(outputfile); /*ホスト・デバイスメモリの開放*/ free(host_result); checkCudaErrors(cudaFree(device_result)); checkCudaErrors(cudaFree(start_num)); checkCudaErrors(cudaFree(end_num)); timer(); /*終了処理*/ cudaThreadExit(); exit(1); } __global__ void prime_cal( unsigned int *device_result , unsigned int *start_num , unsigned int *end_num ){ unsigned int dev = 0; unsigned int flag = 0; unsigned int list = 0; /* 配列初期化 */ for ( list = 0 ; list < list_num ; list++ ){ device_result[list] = 0; } /*スレッドIDの割り当て*/ //unsigned int thread_x = blockIdx.x * blockDim.x + threadIdx.x; //unsigned int thread_y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; /*素数判定を行う*/ for ( unsigned int scan_idx = start_num[thread_id] ; scan_idx < end_num[thread_id] + 1 ; scan_idx++ ) { flag = 0; if ( scan_idx == 1 ){ device_result[scan_idx] = 0; }else if ( scan_idx == 2 ){ device_result[scan_idx] = 2; }else if ( scan_idx % 2 == 0 ){ device_result[scan_idx] = 0; }else{ dev = 3; while ( ( dev * dev ) <= scan_idx ){ if ( scan_idx % dev == 0 ){ flag=1; break; } dev += 2; } if (flag == 0){ device_result[scan_idx] = scan_idx; }else if (flag == 1){ device_result[scan_idx] = 0; } } __syncthreads(); } }
69bcb166a7dd0bda354b004a53abba0ec5a12c65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <sys/time.h> #include "support.h" #include <stdlib.h> __global__ void check(int * d_out , int * d_in) { extern __shared__ int ionCurrent[]; int d_NUM_DUST=10; int thid = threadIdx.x; while(thid <d_NUM_DUST){ ionCurrent[thid]=0; thid+=blockDim.x; } __syncthreads(); int idx = blockIdx.x*blockDim.x + threadIdx.x; if( d_in[idx] > 0 ) {int R = d_in[idx]-1 ; atomicAdd(&(ionCurrent[R]),1); d_out[R]=ionCurrent[R]; //d_out[R]+=1; } } int main(int argc, char ** argv) { Timer timer; hipError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); // Initialize host variables ---------------------------------------------- int h_NUM_DUST = 1000000; int ionCurrent[h_NUM_DUST]; for (int k = 0; k <h_NUM_DUST; k++){ ionCurrent[k] = 0; } int boundsIon[h_NUM_DUST]; for(int k=0; k<h_NUM_DUST; k++){ if((rand()%5) <= 2){boundsIon[k]=0;} else { boundsIon[k]=rand()% h_NUM_DUST;} // const int ARRAY_BYTES =h_NUM_DUST * sizeof(int); // declare GPU memory pointers int * d_in; int * d_out; //stopTime(&timer); printf("%f s\n", elapsedTime(timer)); //allocate GPU memory; cuda_ret = hipMalloc((void **) &d_in, ARRAY_BYTES); if(cuda_ret !=hipSuccess) FATAL("unable to allocate device memory" ); cuda_ret =hipMalloc((void **) &d_out, ARRAY_BYTES); if(cuda_ret !=hipSuccess) FATAL("unable to allocate device memory" ); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // transfer the array to the GPU printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); cuda_ret = hipMemcpy(d_in, boundsIon , ARRAY_BYTES, hipMemcpyHostToDevice); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to device"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); //launch the kernel printf("Launching kernel..."); fflush(stdout); startTime(&timer); const unsigned int THREADS_PER_BLOCK = 512; const unsigned int numBlocks = (h_NUM_DUST-1)/THREADS_PER_BLOCK +1; dim3 gridDim(numBlocks, 1, 1), blockDim(THREADS_PER_BLOCK, 1, 1);hipLaunchKernelGGL(( check), dim3(numBlocks), dim3(THREADS_PER_BLOCK), ARRAY_BYTES, 0, d_out,d_in); cuda_ret = hipDeviceSynchronize(); if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); //copy back the result array to the cpu printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); cuda_ret = hipMemcpy(ionCurrent , d_out , ARRAY_BYTES, hipMemcpyDeviceToHost); if(cuda_ret != hipSuccess) FATAL("Unable to copy memory from device"); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("time of the end printing ...."); fflush(stdout); startTime(&timer); hipDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer));*/ //free GPU memory allocation printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, matArow, matAcol, matBcol);*/ hipFree(d_in); hipFree(d_out); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); return 0; }
69bcb166a7dd0bda354b004a53abba0ec5a12c65.cu
#include <stdio.h> #include <sys/time.h> #include "support.h" #include <stdlib.h> __global__ void check(int * d_out , int * d_in) { extern __shared__ int ionCurrent[]; int d_NUM_DUST=10; int thid = threadIdx.x; while(thid <d_NUM_DUST){ ionCurrent[thid]=0; thid+=blockDim.x; } __syncthreads(); int idx = blockIdx.x*blockDim.x + threadIdx.x; if( d_in[idx] > 0 ) {int R = d_in[idx]-1 ; atomicAdd(&(ionCurrent[R]),1); d_out[R]=ionCurrent[R]; //d_out[R]+=1; } } int main(int argc, char ** argv) { Timer timer; cudaError_t cuda_ret; // Initialize host variables ---------------------------------------------- printf("\nSetting up the problem..."); fflush(stdout); startTime(&timer); // Initialize host variables ---------------------------------------------- int h_NUM_DUST = 1000000; int ionCurrent[h_NUM_DUST]; for (int k = 0; k <h_NUM_DUST; k++){ ionCurrent[k] = 0; } int boundsIon[h_NUM_DUST]; for(int k=0; k<h_NUM_DUST; k++){ if((rand()%5) <= 2){boundsIon[k]=0;} else { boundsIon[k]=rand()% h_NUM_DUST;} // const int ARRAY_BYTES =h_NUM_DUST * sizeof(int); // declare GPU memory pointers int * d_in; int * d_out; //stopTime(&timer); printf("%f s\n", elapsedTime(timer)); //allocate GPU memory; cuda_ret = cudaMalloc((void **) &d_in, ARRAY_BYTES); if(cuda_ret !=cudaSuccess) FATAL("unable to allocate device memory" ); cuda_ret =cudaMalloc((void **) &d_out, ARRAY_BYTES); if(cuda_ret !=cudaSuccess) FATAL("unable to allocate device memory" ); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); // transfer the array to the GPU printf("Copying data from host to device..."); fflush(stdout); startTime(&timer); cuda_ret = cudaMemcpy(d_in, boundsIon , ARRAY_BYTES, cudaMemcpyHostToDevice); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to device"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); //launch the kernel printf("Launching kernel..."); fflush(stdout); startTime(&timer); const unsigned int THREADS_PER_BLOCK = 512; const unsigned int numBlocks = (h_NUM_DUST-1)/THREADS_PER_BLOCK +1; dim3 gridDim(numBlocks, 1, 1), blockDim(THREADS_PER_BLOCK, 1, 1); check<<<numBlocks, THREADS_PER_BLOCK, ARRAY_BYTES>>>(d_out,d_in); cuda_ret = cudaDeviceSynchronize(); if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel"); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); //copy back the result array to the cpu printf("Copying data from device to host..."); fflush(stdout); startTime(&timer); cuda_ret = cudaMemcpy(ionCurrent , d_out , ARRAY_BYTES, cudaMemcpyDeviceToHost); if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory from device"); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); printf("time of the end printing ...."); fflush(stdout); startTime(&timer); cudaDeviceSynchronize(); stopTime(&timer); printf("%f s\n", elapsedTime(timer));*/ //free GPU memory allocation printf("Verifying results..."); fflush(stdout); verify(A_h, B_h, C_h, matArow, matAcol, matBcol);*/ cudaFree(d_in); cudaFree(d_out); stopTime(&timer); printf("%f s\n", elapsedTime(timer)); return 0; }
905cc4cee471c8951b14edf9588afb7449a175a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* __any_sync,__all_sync() Deprecation notice: __any, __all, and __ballot have been deprecated in CUDA 9.0 for all devices. Removal notice: When targeting devices with compute capability 7.x or higher, __any, __all, and __ballot are no longer available and their sync variants should be used instead. __ any__ all__ballotCUDA9.0 7.x__ any__ all__ballot */ #include <stdio.h> #include <stdlib.h> __global__ void anyatest(int *A,int *B) { unsigned int tx = threadIdx.x; //unsigned int mask = 0xffffffff; unsigned int mask = 0x0000001f; B[tx] = __any_sync(mask, A[tx]); } int main() { int N = 128; int* h_A = (int*)malloc(N * sizeof(int)); int* h_B = (int*)malloc(N * sizeof(int)); int *d_A,*d_B; hipMalloc(&d_A, N * sizeof(int)); hipMalloc(&d_B, N * sizeof(int)); for(int i=0;i<N;i++) h_A[i]=0; h_A[30]=1; h_A[31]=1; h_A[32+2]=1; //HostToDevice hipMemcpy(d_A, h_A, N * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( anyatest) , dim3(1), dim3(N) , 0, 0, d_A,d_B); hipMemcpy(h_B, d_B, N * sizeof(int), hipMemcpyDeviceToHost); for(int i=0;i<N;i++){ printf("%d",h_B[i]); if (i%16==15)printf("\n"); } hipFree(d_A); hipFree(d_B); return 0; }
905cc4cee471c8951b14edf9588afb7449a175a3.cu
/* __any_sync,__all_syncのサンプル。しかし仕様がよくわからない。多分もう使わない(使えない) Deprecation notice: __any, __all, and __ballot have been deprecated in CUDA 9.0 for all devices. Removal notice: When targeting devices with compute capability 7.x or higher, __any, __all, and __ballot are no longer available and their sync variants should be used instead. 非推奨の通知:__ any、__ all、および__ballotは、すべてのデバイスのCUDA9.0で非推奨になりました。 削除通知:コンピューティング機能7.x以降のデバイスを対象とする場合、__ any、__ all、および__ballotは使用できなくなり、代わりにそれらの同期バリアントを使用する必要があります。 */ #include <stdio.h> #include <stdlib.h> __global__ void anyatest(int *A,int *B) { unsigned int tx = threadIdx.x; //unsigned int mask = 0xffffffff; unsigned int mask = 0x0000001f; B[tx] = __any_sync(mask, A[tx]); } int main() { int N = 128; int* h_A = (int*)malloc(N * sizeof(int)); int* h_B = (int*)malloc(N * sizeof(int)); int *d_A,*d_B; cudaMalloc(&d_A, N * sizeof(int)); cudaMalloc(&d_B, N * sizeof(int)); for(int i=0;i<N;i++) h_A[i]=0; h_A[30]=1; h_A[31]=1; h_A[32+2]=1; //HostToDevice cudaMemcpy(d_A, h_A, N * sizeof(int), cudaMemcpyHostToDevice); anyatest <<<1, N >>> (d_A,d_B); cudaMemcpy(h_B, d_B, N * sizeof(int), cudaMemcpyDeviceToHost); for(int i=0;i<N;i++){ printf("%d",h_B[i]); if (i%16==15)printf("\n"); } cudaFree(d_A); cudaFree(d_B); return 0; }