hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e9033ba5dda1e3f07bdd7ed8de7ea57d2a523a96.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include "hip/hip_runtime.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
#include <rocm_smi/rocm_smi.h>
#include <assert.h>
void check_error (const char* message) {
hipError_t error = hipGetLastError ();
if (error != hipSuccess) {
printf ("CUDA error : %s, %s\n", message, hipGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (float * __restrict__ flux_0, float * __restrict__ flux_1, float * __restrict__ flux_2, float * __restrict__ flux_3, float * __restrict__ flux_4, float * __restrict__ cons_1, float * __restrict__ cons_2, float * __restrict__ cons_3, float * __restrict__ cons_4, float * __restrict__ q_1, float * __restrict__ q_2, float * __restrict__ q_3, float * __restrict__ q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-8);
int j = j0 + (int)(threadIdx.y);
//Declarations
float reg_cons_1_m4=0, reg_cons_1_m3=0, reg_cons_1_m2=0, reg_cons_1_m1=0, __shared__ sh_cons_1_c0[16][32], reg_cons_1_p1=0, reg_cons_1_p2=0, reg_cons_1_p3=0, reg_cons_1_p4=0;
float reg_cons_2_m4=0, reg_cons_2_m3=0, reg_cons_2_m2=0, reg_cons_2_m1=0, __shared__ sh_cons_2_c0[16][32], reg_cons_2_p1=0, reg_cons_2_p2=0, reg_cons_2_p3=0, reg_cons_2_p4=0;
float reg_cons_3_m4=0, reg_cons_3_m3=0, reg_cons_3_m2=0, reg_cons_3_m1=0, __shared__ sh_cons_3_c0[16][32], reg_cons_3_p1=0, reg_cons_3_p2=0, reg_cons_3_p3=0, reg_cons_3_p4=0;
float reg_cons_4_m4=0, reg_cons_4_m3=0, reg_cons_4_m2=0, reg_cons_4_m1=0, __shared__ sh_cons_4_c0[16][32], reg_cons_4_p1=0, reg_cons_4_p2=0, reg_cons_4_p3=0, reg_cons_4_p4=0;
float __shared__ sh_q_1_c0[16][32];
float __shared__ sh_q_2_c0[16][32];
float reg_q_3_m4=0, reg_q_3_m3=0, reg_q_3_m2=0, reg_q_3_m1=0, reg_q_3_c0=0, reg_q_3_p1=0, reg_q_3_p2=0, reg_q_3_p3=0, reg_q_3_p4=0;
float reg_q_4_m4=0, reg_q_4_m3=0, reg_q_4_m2=0, reg_q_4_m1=0, __shared__ sh_q_4_c0[16][32], reg_q_4_p1=0, reg_q_4_p2=0, reg_q_4_p3=0, reg_q_4_p4=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = cons_1[0 + j*N + i];
reg_cons_1_m3 = cons_1[1*M*N + j*N + i];
reg_cons_1_m2 = cons_1[2*M*N + j*N + i];
reg_cons_1_m1 = cons_1[3*M*N + j*N + i];
sh_cons_1_c0[j-j0][i-i0] = cons_1[4*M*N + j*N + i];
reg_cons_1_p1 = cons_1[5*M*N + j*N + i];
reg_cons_1_p2 = cons_1[6*M*N + j*N + i];
reg_cons_1_p3 = cons_1[7*M*N + j*N + i];
reg_cons_2_m4 = cons_2[0 + j*N + i];
reg_cons_2_m3 = cons_2[1*M*N + j*N + i];
reg_cons_2_m2 = cons_2[2*M*N + j*N + i];
reg_cons_2_m1 = cons_2[3*M*N + j*N + i];
sh_cons_2_c0[j-j0][i-i0] = cons_2[4*M*N + j*N + i];
reg_cons_2_p1 = cons_2[5*M*N + j*N + i];
reg_cons_2_p2 = cons_2[6*M*N + j*N + i];
reg_cons_2_p3 = cons_2[7*M*N + j*N + i];
reg_cons_3_m4 = cons_3[0 + j*N + i];
reg_cons_3_m3 = cons_3[1*M*N + j*N + i];
reg_cons_3_m2 = cons_3[2*M*N + j*N + i];
reg_cons_3_m1 = cons_3[3*M*N + j*N + i];
sh_cons_3_c0[j-j0][i-i0] = cons_3[4*M*N + j*N + i];
reg_cons_3_p1 = cons_3[5*M*N + j*N + i];
reg_cons_3_p2 = cons_3[6*M*N + j*N + i];
reg_cons_3_p3 = cons_3[7*M*N + j*N + i];
reg_cons_4_m4 = cons_4[0 + j*N + i];
reg_cons_4_m3 = cons_4[1*M*N + j*N + i];
reg_cons_4_m2 = cons_4[2*M*N + j*N + i];
reg_cons_4_m1 = cons_4[3*M*N + j*N + i];
sh_cons_4_c0[j-j0][i-i0] = cons_4[4*M*N + j*N + i];
reg_cons_4_p1 = cons_4[5*M*N + j*N + i];
reg_cons_4_p2 = cons_4[6*M*N + j*N + i];
reg_cons_4_p3 = cons_4[7*M*N + j*N + i];
reg_q_3_m4 = q_3[0 + j*N + i];
reg_q_3_m3 = q_3[1*M*N + j*N + i];
reg_q_3_m2 = q_3[2*M*N + j*N + i];
reg_q_3_m1 = q_3[3*M*N + j*N + i];
reg_q_3_c0 = q_3[4*M*N + j*N + i];
reg_q_3_p1 = q_3[5*M*N + j*N + i];
reg_q_3_p2 = q_3[6*M*N + j*N + i];
reg_q_3_p3 = q_3[7*M*N + j*N + i];
reg_q_4_m4 = q_4[0 + j*N + i];
reg_q_4_m3 = q_4[1*M*N + j*N + i];
reg_q_4_m2 = q_4[2*M*N + j*N + i];
reg_q_4_m1 = q_4[3*M*N + j*N + i];
sh_q_4_c0[j-j0][i-i0] = q_4[4*M*N + j*N + i];
reg_q_4_p1 = q_4[5*M*N + j*N + i];
reg_q_4_p2 = q_4[6*M*N + j*N + i];
reg_q_4_p3 = q_4[7*M*N + j*N + i];
}
//Rest of the computation
for (int k=4; k<=L-5; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_p4 = cons_1[(k+4)*M*N + j*N + i];
reg_cons_2_p4 = cons_2[(k+4)*M*N + j*N + i];
reg_cons_3_p4 = cons_3[(k+4)*M*N + j*N + i];
reg_cons_4_p4 = cons_4[(k+4)*M*N + j*N + i];
sh_q_1_c0[j-j0][i-i0] = q_1[k*M*N + j*N + i];
sh_q_2_c0[j-j0][i-i0] = q_2[k*M*N + j*N + i];
reg_q_3_p4 = q_3[(k+4)*M*N + j*N + i];
reg_q_4_p4 = q_4[(k+4)*M*N + j*N + i];
}
__syncthreads ();
if (j >= j0+4 & j <= min (j0+blockdim_j-5, M-1) & i >= i0+4 & i <= min (i0+blockdim_i-5, N-5)) {
float f0 = -(((((0.8f * (sh_cons_1_c0[j-j0][i-i0+1] - sh_cons_1_c0[j-j0][i-i0-1])) - (0.2f * (sh_cons_1_c0[j-j0][i-i0+2] - sh_cons_1_c0[j-j0][i-i0-2]))) + (0.038f * (sh_cons_1_c0[j-j0][i-i0+3] - sh_cons_1_c0[j-j0][i-i0-3]))) - (0.0035f * (sh_cons_1_c0[j-j0][i-i0+4] - sh_cons_1_c0[j-j0][i-i0-4]))) * dxinv0);
f0 -= (((((0.8f * (sh_cons_2_c0[j-j0+1][i-i0] - sh_cons_2_c0[j-j0-1][i-i0])) - (0.2f * (sh_cons_2_c0[j-j0+2][i-i0] - sh_cons_2_c0[j-j0-2][i-i0]))) + (0.038f * (sh_cons_2_c0[j-j0+3][i-i0] - sh_cons_2_c0[j-j0-3][i-i0]))) - (0.0035f * (sh_cons_2_c0[j-j0+4][i-i0] - sh_cons_2_c0[j-j0-4][i-i0]))) * dxinv1);
f0 -= (((((0.8f * (reg_cons_3_p1 - reg_cons_3_m1)) - (0.2f * (reg_cons_3_p2 - reg_cons_3_m2))) + (0.038f * (reg_cons_3_p3 - reg_cons_3_m3))) - (0.0035f * (reg_cons_3_p4 - reg_cons_3_m4))) * dxinv2);
flux_0[k*M*N + j*N + i] = f0;
float f1 = -(((((0.8f * (((sh_cons_1_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_1_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + (sh_q_4_c0[j-j0][i-i0+1] - sh_q_4_c0[j-j0][i-i0-1]))) - (0.2f * (((sh_cons_1_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_1_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + (sh_q_4_c0[j-j0][i-i0+2] - sh_q_4_c0[j-j0][i-i0-2])))) + (0.038f * (((sh_cons_1_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_1_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + (sh_q_4_c0[j-j0][i-i0+3] - sh_q_4_c0[j-j0][i-i0-3])))) - (0.0035f * (((sh_cons_1_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_1_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + (sh_q_4_c0[j-j0][i-i0+4] - sh_q_4_c0[j-j0][i-i0-4])))) * dxinv0);
f1 -= (((((0.8f * ((sh_cons_1_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_1_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_1_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_1_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_1_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_1_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_1_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_1_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f1 -= (((((0.8f * ((reg_cons_1_p1 * reg_q_3_p1) - (reg_cons_1_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_1_p2 * reg_q_3_p2) - (reg_cons_1_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_1_p3 * reg_q_3_p3) - (reg_cons_1_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_1_p4 * reg_q_3_p4) - (reg_cons_1_m4 * reg_q_3_m4)))) * dxinv2);
flux_1[k*M*N + j*N + i] = f1;
float f2 = -(((((0.8f * ((sh_cons_2_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_2_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_2_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_2_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_2_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_2_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_2_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_2_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f2 -= (((((0.8f * (((sh_cons_2_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_2_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + (sh_q_4_c0[j-j0+1][i-i0] - sh_q_4_c0[j-j0-1][i-i0]))) - (0.2f * (((sh_cons_2_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_2_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + (sh_q_4_c0[j-j0+2][i-i0] - sh_q_4_c0[j-j0-2][i-i0])))) + (0.038f * (((sh_cons_2_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_2_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + (sh_q_4_c0[j-j0+3][i-i0] - sh_q_4_c0[j-j0-3][i-i0])))) - (0.0035f * (((sh_cons_2_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_2_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + (sh_q_4_c0[j-j0+4][i-i0] - sh_q_4_c0[j-j0-4][i-i0])))) * dxinv1);
f2 -= (((((0.8f * ((reg_cons_2_p1 * reg_q_3_p1) - (reg_cons_2_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_2_p2 * reg_q_3_p2) - (reg_cons_2_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_2_p3 * reg_q_3_p3) - (reg_cons_2_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_2_p4 * reg_q_3_p4) - (reg_cons_2_m4 * reg_q_3_m4)))) * dxinv2);
flux_2[k*M*N + j*N + i] = f2;
float f3 = -(((((0.8f * ((sh_cons_3_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_3_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_3_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_3_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_3_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_3_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_3_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_3_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_3_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_3_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_3_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_3_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_3_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_3_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f3 -= (((((0.8f * (((reg_cons_3_p1 * reg_q_3_p1) - (reg_cons_3_m1 * reg_q_3_m1)) + (reg_q_4_p1 - reg_q_4_m1))) - (0.2f * (((reg_cons_3_p2 * reg_q_3_p2) - (reg_cons_3_m2 * reg_q_3_m2)) + (reg_q_4_p2 - reg_q_4_m2)))) + (0.038f * (((reg_cons_3_p3 * reg_q_3_p3) - (reg_cons_3_m3 * reg_q_3_m3)) + (reg_q_4_p3 - reg_q_4_m3)))) - (0.0035f * (((reg_cons_3_p4 * reg_q_3_p4) - (reg_cons_3_m4 * reg_q_3_m4)) + (reg_q_4_p4 - reg_q_4_m4)))) * dxinv2);
flux_3[k*M*N + j*N + i] = f3;
float f4 = -(((((0.8f * (((sh_cons_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + ((sh_q_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_q_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])))) - (0.2f * (((sh_cons_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + ((sh_q_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_q_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2]))))) + (0.038f * (((sh_cons_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + ((sh_q_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_q_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3]))))) - (0.0035f * (((sh_cons_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + ((sh_q_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_q_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4]))))) * dxinv0);
f4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + ((sh_q_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_q_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])))) - (0.2f * (((sh_cons_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + ((sh_q_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_q_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0]))))) + (0.038f * (((sh_cons_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + ((sh_q_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_q_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + ((sh_q_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_q_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0]))))) * dxinv1);
f4 -= (((((0.8f * (((reg_cons_4_p1 * reg_q_3_p1) - (reg_cons_4_m1 * reg_q_3_m1)) + ((reg_q_4_p1 * reg_q_3_p1) - (reg_q_4_m1 * reg_q_3_m1)))) - (0.2f * (((reg_cons_4_p2 * reg_q_3_p2) - (reg_cons_4_m2 * reg_q_3_m2)) + ((reg_q_4_p2 * reg_q_3_p2) - (reg_q_4_m2 * reg_q_3_m2))))) + (0.038f * (((reg_cons_4_p3 * reg_q_3_p3) - (reg_cons_4_m3 * reg_q_3_m3)) + ((reg_q_4_p3 * reg_q_3_p3) - (reg_q_4_m3 * reg_q_3_m3))))) - (0.0035f * (((reg_cons_4_p4 * reg_q_3_p4) - (reg_cons_4_m4 * reg_q_3_m4)) + ((reg_q_4_p4 * reg_q_3_p4) - (reg_q_4_m4 * reg_q_3_m4))))) * dxinv2);
flux_4[k*M*N + j*N + i] = f4;
}
__syncthreads ();
//Value rotation
reg_cons_1_m4 = reg_cons_1_m3;
reg_cons_1_m3 = reg_cons_1_m2;
reg_cons_1_m2 = reg_cons_1_m1;
reg_cons_1_m1 = sh_cons_1_c0[j-j0][i-i0];
sh_cons_1_c0[j-j0][i-i0] = reg_cons_1_p1;
reg_cons_1_p1 = reg_cons_1_p2;
reg_cons_1_p2 = reg_cons_1_p3;
reg_cons_1_p3 = reg_cons_1_p4;
reg_cons_2_m4 = reg_cons_2_m3;
reg_cons_2_m3 = reg_cons_2_m2;
reg_cons_2_m2 = reg_cons_2_m1;
reg_cons_2_m1 = sh_cons_2_c0[j-j0][i-i0];
sh_cons_2_c0[j-j0][i-i0] = reg_cons_2_p1;
reg_cons_2_p1 = reg_cons_2_p2;
reg_cons_2_p2 = reg_cons_2_p3;
reg_cons_2_p3 = reg_cons_2_p4;
reg_cons_3_m4 = reg_cons_3_m3;
reg_cons_3_m3 = reg_cons_3_m2;
reg_cons_3_m2 = reg_cons_3_m1;
reg_cons_3_m1 = sh_cons_3_c0[j-j0][i-i0];
sh_cons_3_c0[j-j0][i-i0] = reg_cons_3_p1;
reg_cons_3_p1 = reg_cons_3_p2;
reg_cons_3_p2 = reg_cons_3_p3;
reg_cons_3_p3 = reg_cons_3_p4;
reg_cons_4_m4 = reg_cons_4_m3;
reg_cons_4_m3 = reg_cons_4_m2;
reg_cons_4_m2 = reg_cons_4_m1;
reg_cons_4_m1 = sh_cons_4_c0[j-j0][i-i0];
sh_cons_4_c0[j-j0][i-i0] = reg_cons_4_p1;
reg_cons_4_p1 = reg_cons_4_p2;
reg_cons_4_p2 = reg_cons_4_p3;
reg_cons_4_p3 = reg_cons_4_p4;
reg_q_3_m4 = reg_q_3_m3;
reg_q_3_m3 = reg_q_3_m2;
reg_q_3_m2 = reg_q_3_m1;
reg_q_3_m1 = reg_q_3_c0;
reg_q_3_c0 = reg_q_3_p1;
reg_q_3_p1 = reg_q_3_p2;
reg_q_3_p2 = reg_q_3_p3;
reg_q_3_p3 = reg_q_3_p4;
reg_q_4_m4 = reg_q_4_m3;
reg_q_4_m3 = reg_q_4_m2;
reg_q_4_m2 = reg_q_4_m1;
reg_q_4_m1 = sh_q_4_c0[j-j0][i-i0];
sh_q_4_c0[j-j0][i-i0] = reg_q_4_p1;
reg_q_4_p1 = reg_q_4_p2;
reg_q_4_p2 = reg_q_4_p3;
reg_q_4_p3 = reg_q_4_p4;
}
}
extern "C" void host_code (float *h_flux_0, float *h_flux_1, float *h_flux_2, float *h_flux_3, float *h_flux_4, float *h_cons_1, float *h_cons_2, float *h_cons_3, float *h_cons_4, float *h_q_1, float *h_q_2, float *h_q_3, float *h_q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) {
float *flux_0;
hipMalloc (&flux_0, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
hipMemcpy (flux_0, h_flux_0, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *flux_1;
hipMalloc (&flux_1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
hipMemcpy (flux_1, h_flux_1, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *flux_2;
hipMalloc (&flux_2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
hipMemcpy (flux_2, h_flux_2, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *flux_3;
hipMalloc (&flux_3, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
hipMemcpy (flux_3, h_flux_3, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *flux_4;
hipMalloc (&flux_4, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
hipMemcpy (flux_4, h_flux_4, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *cons_1;
hipMalloc (&cons_1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
hipMemcpy (cons_1, h_cons_1, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *cons_2;
hipMalloc (&cons_2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
hipMemcpy (cons_2, h_cons_2, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *cons_3;
hipMalloc (&cons_3, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
hipMemcpy (cons_3, h_cons_3, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *cons_4;
hipMalloc (&cons_4, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
hipMemcpy (cons_4, h_cons_4, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *q_1;
hipMalloc (&q_1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
hipMemcpy (q_1, h_q_1, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *q_2;
hipMalloc (&q_2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
hipMemcpy (q_2, h_q_2, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *q_3;
hipMalloc (&q_3, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
hipMemcpy (q_3, h_q_3, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
float *q_4;
hipMalloc (&q_4, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
hipMemcpy (q_4, h_q_4, sizeof(float)*L*M*N, hipMemcpyHostToDevice);
dim3 blockconfig_1 (32, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M, blockconfig_1.y-8), 1);
unsigned int power1, power2;
rsmi_status_t result;
uint32_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(RSMI_STATUS_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(RSMI_STATUS_SUCCESS == result);
hipDeviceSynchronize();
for (int x=0; x<500; x++) {
hipLaunchKernelGGL(( hypterm) , dim3(gridconfig_1), dim3(blockconfig_1), 0, 0, flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
}
hipDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(RSMI_STATUS_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
hipMemcpy (h_flux_0, flux_0, sizeof(float)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_1, flux_1, sizeof(float)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_2, flux_2, sizeof(float)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_3, flux_3, sizeof(float)*L*M*N, hipMemcpyDeviceToHost);
hipMemcpy (h_flux_4, flux_4, sizeof(float)*L*M*N, hipMemcpyDeviceToHost);
//Free allocated memory
hipFree (flux_0);
hipFree (flux_1);
hipFree (flux_2);
hipFree (flux_3);
hipFree (flux_4);
hipFree (cons_1);
hipFree (cons_2);
hipFree (cons_3);
hipFree (cons_4);
hipFree (q_1);
hipFree (q_2);
hipFree (q_3);
hipFree (q_4);
}
| e9033ba5dda1e3f07bdd7ed8de7ea57d2a523a96.cu | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
#include <nvml.h>
#include <assert.h>
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm (float * __restrict__ flux_0, float * __restrict__ flux_1, float * __restrict__ flux_2, float * __restrict__ flux_3, float * __restrict__ flux_4, float * __restrict__ cons_1, float * __restrict__ cons_2, float * __restrict__ cons_3, float * __restrict__ cons_4, float * __restrict__ q_1, float * __restrict__ q_2, float * __restrict__ q_3, float * __restrict__ q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) {
//Determining the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i-8);
int i = i0 + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j-8);
int j = j0 + (int)(threadIdx.y);
//Declarations
float reg_cons_1_m4=0, reg_cons_1_m3=0, reg_cons_1_m2=0, reg_cons_1_m1=0, __shared__ sh_cons_1_c0[16][32], reg_cons_1_p1=0, reg_cons_1_p2=0, reg_cons_1_p3=0, reg_cons_1_p4=0;
float reg_cons_2_m4=0, reg_cons_2_m3=0, reg_cons_2_m2=0, reg_cons_2_m1=0, __shared__ sh_cons_2_c0[16][32], reg_cons_2_p1=0, reg_cons_2_p2=0, reg_cons_2_p3=0, reg_cons_2_p4=0;
float reg_cons_3_m4=0, reg_cons_3_m3=0, reg_cons_3_m2=0, reg_cons_3_m1=0, __shared__ sh_cons_3_c0[16][32], reg_cons_3_p1=0, reg_cons_3_p2=0, reg_cons_3_p3=0, reg_cons_3_p4=0;
float reg_cons_4_m4=0, reg_cons_4_m3=0, reg_cons_4_m2=0, reg_cons_4_m1=0, __shared__ sh_cons_4_c0[16][32], reg_cons_4_p1=0, reg_cons_4_p2=0, reg_cons_4_p3=0, reg_cons_4_p4=0;
float __shared__ sh_q_1_c0[16][32];
float __shared__ sh_q_2_c0[16][32];
float reg_q_3_m4=0, reg_q_3_m3=0, reg_q_3_m2=0, reg_q_3_m1=0, reg_q_3_c0=0, reg_q_3_p1=0, reg_q_3_p2=0, reg_q_3_p3=0, reg_q_3_p4=0;
float reg_q_4_m4=0, reg_q_4_m3=0, reg_q_4_m2=0, reg_q_4_m1=0, __shared__ sh_q_4_c0[16][32], reg_q_4_p1=0, reg_q_4_p2=0, reg_q_4_p3=0, reg_q_4_p4=0;
//Value Initialization
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_m4 = cons_1[0 + j*N + i];
reg_cons_1_m3 = cons_1[1*M*N + j*N + i];
reg_cons_1_m2 = cons_1[2*M*N + j*N + i];
reg_cons_1_m1 = cons_1[3*M*N + j*N + i];
sh_cons_1_c0[j-j0][i-i0] = cons_1[4*M*N + j*N + i];
reg_cons_1_p1 = cons_1[5*M*N + j*N + i];
reg_cons_1_p2 = cons_1[6*M*N + j*N + i];
reg_cons_1_p3 = cons_1[7*M*N + j*N + i];
reg_cons_2_m4 = cons_2[0 + j*N + i];
reg_cons_2_m3 = cons_2[1*M*N + j*N + i];
reg_cons_2_m2 = cons_2[2*M*N + j*N + i];
reg_cons_2_m1 = cons_2[3*M*N + j*N + i];
sh_cons_2_c0[j-j0][i-i0] = cons_2[4*M*N + j*N + i];
reg_cons_2_p1 = cons_2[5*M*N + j*N + i];
reg_cons_2_p2 = cons_2[6*M*N + j*N + i];
reg_cons_2_p3 = cons_2[7*M*N + j*N + i];
reg_cons_3_m4 = cons_3[0 + j*N + i];
reg_cons_3_m3 = cons_3[1*M*N + j*N + i];
reg_cons_3_m2 = cons_3[2*M*N + j*N + i];
reg_cons_3_m1 = cons_3[3*M*N + j*N + i];
sh_cons_3_c0[j-j0][i-i0] = cons_3[4*M*N + j*N + i];
reg_cons_3_p1 = cons_3[5*M*N + j*N + i];
reg_cons_3_p2 = cons_3[6*M*N + j*N + i];
reg_cons_3_p3 = cons_3[7*M*N + j*N + i];
reg_cons_4_m4 = cons_4[0 + j*N + i];
reg_cons_4_m3 = cons_4[1*M*N + j*N + i];
reg_cons_4_m2 = cons_4[2*M*N + j*N + i];
reg_cons_4_m1 = cons_4[3*M*N + j*N + i];
sh_cons_4_c0[j-j0][i-i0] = cons_4[4*M*N + j*N + i];
reg_cons_4_p1 = cons_4[5*M*N + j*N + i];
reg_cons_4_p2 = cons_4[6*M*N + j*N + i];
reg_cons_4_p3 = cons_4[7*M*N + j*N + i];
reg_q_3_m4 = q_3[0 + j*N + i];
reg_q_3_m3 = q_3[1*M*N + j*N + i];
reg_q_3_m2 = q_3[2*M*N + j*N + i];
reg_q_3_m1 = q_3[3*M*N + j*N + i];
reg_q_3_c0 = q_3[4*M*N + j*N + i];
reg_q_3_p1 = q_3[5*M*N + j*N + i];
reg_q_3_p2 = q_3[6*M*N + j*N + i];
reg_q_3_p3 = q_3[7*M*N + j*N + i];
reg_q_4_m4 = q_4[0 + j*N + i];
reg_q_4_m3 = q_4[1*M*N + j*N + i];
reg_q_4_m2 = q_4[2*M*N + j*N + i];
reg_q_4_m1 = q_4[3*M*N + j*N + i];
sh_q_4_c0[j-j0][i-i0] = q_4[4*M*N + j*N + i];
reg_q_4_p1 = q_4[5*M*N + j*N + i];
reg_q_4_p2 = q_4[6*M*N + j*N + i];
reg_q_4_p3 = q_4[7*M*N + j*N + i];
}
//Rest of the computation
for (int k=4; k<=L-5; ++k) {
//Fetch new plane
if (j <= min (j0+blockdim_j-1, M-1) & i <= min (i0+blockdim_i-1, N-1)) {
reg_cons_1_p4 = cons_1[(k+4)*M*N + j*N + i];
reg_cons_2_p4 = cons_2[(k+4)*M*N + j*N + i];
reg_cons_3_p4 = cons_3[(k+4)*M*N + j*N + i];
reg_cons_4_p4 = cons_4[(k+4)*M*N + j*N + i];
sh_q_1_c0[j-j0][i-i0] = q_1[k*M*N + j*N + i];
sh_q_2_c0[j-j0][i-i0] = q_2[k*M*N + j*N + i];
reg_q_3_p4 = q_3[(k+4)*M*N + j*N + i];
reg_q_4_p4 = q_4[(k+4)*M*N + j*N + i];
}
__syncthreads ();
if (j >= j0+4 & j <= min (j0+blockdim_j-5, M-1) & i >= i0+4 & i <= min (i0+blockdim_i-5, N-5)) {
float f0 = -(((((0.8f * (sh_cons_1_c0[j-j0][i-i0+1] - sh_cons_1_c0[j-j0][i-i0-1])) - (0.2f * (sh_cons_1_c0[j-j0][i-i0+2] - sh_cons_1_c0[j-j0][i-i0-2]))) + (0.038f * (sh_cons_1_c0[j-j0][i-i0+3] - sh_cons_1_c0[j-j0][i-i0-3]))) - (0.0035f * (sh_cons_1_c0[j-j0][i-i0+4] - sh_cons_1_c0[j-j0][i-i0-4]))) * dxinv0);
f0 -= (((((0.8f * (sh_cons_2_c0[j-j0+1][i-i0] - sh_cons_2_c0[j-j0-1][i-i0])) - (0.2f * (sh_cons_2_c0[j-j0+2][i-i0] - sh_cons_2_c0[j-j0-2][i-i0]))) + (0.038f * (sh_cons_2_c0[j-j0+3][i-i0] - sh_cons_2_c0[j-j0-3][i-i0]))) - (0.0035f * (sh_cons_2_c0[j-j0+4][i-i0] - sh_cons_2_c0[j-j0-4][i-i0]))) * dxinv1);
f0 -= (((((0.8f * (reg_cons_3_p1 - reg_cons_3_m1)) - (0.2f * (reg_cons_3_p2 - reg_cons_3_m2))) + (0.038f * (reg_cons_3_p3 - reg_cons_3_m3))) - (0.0035f * (reg_cons_3_p4 - reg_cons_3_m4))) * dxinv2);
flux_0[k*M*N + j*N + i] = f0;
float f1 = -(((((0.8f * (((sh_cons_1_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_1_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + (sh_q_4_c0[j-j0][i-i0+1] - sh_q_4_c0[j-j0][i-i0-1]))) - (0.2f * (((sh_cons_1_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_1_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + (sh_q_4_c0[j-j0][i-i0+2] - sh_q_4_c0[j-j0][i-i0-2])))) + (0.038f * (((sh_cons_1_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_1_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + (sh_q_4_c0[j-j0][i-i0+3] - sh_q_4_c0[j-j0][i-i0-3])))) - (0.0035f * (((sh_cons_1_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_1_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + (sh_q_4_c0[j-j0][i-i0+4] - sh_q_4_c0[j-j0][i-i0-4])))) * dxinv0);
f1 -= (((((0.8f * ((sh_cons_1_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_1_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_1_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_1_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_1_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_1_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_1_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_1_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f1 -= (((((0.8f * ((reg_cons_1_p1 * reg_q_3_p1) - (reg_cons_1_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_1_p2 * reg_q_3_p2) - (reg_cons_1_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_1_p3 * reg_q_3_p3) - (reg_cons_1_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_1_p4 * reg_q_3_p4) - (reg_cons_1_m4 * reg_q_3_m4)))) * dxinv2);
flux_1[k*M*N + j*N + i] = f1;
float f2 = -(((((0.8f * ((sh_cons_2_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_2_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_2_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_2_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_2_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_2_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_2_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_2_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f2 -= (((((0.8f * (((sh_cons_2_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_2_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + (sh_q_4_c0[j-j0+1][i-i0] - sh_q_4_c0[j-j0-1][i-i0]))) - (0.2f * (((sh_cons_2_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_2_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + (sh_q_4_c0[j-j0+2][i-i0] - sh_q_4_c0[j-j0-2][i-i0])))) + (0.038f * (((sh_cons_2_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_2_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + (sh_q_4_c0[j-j0+3][i-i0] - sh_q_4_c0[j-j0-3][i-i0])))) - (0.0035f * (((sh_cons_2_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_2_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + (sh_q_4_c0[j-j0+4][i-i0] - sh_q_4_c0[j-j0-4][i-i0])))) * dxinv1);
f2 -= (((((0.8f * ((reg_cons_2_p1 * reg_q_3_p1) - (reg_cons_2_m1 * reg_q_3_m1))) - (0.2f * ((reg_cons_2_p2 * reg_q_3_p2) - (reg_cons_2_m2 * reg_q_3_m2)))) + (0.038f * ((reg_cons_2_p3 * reg_q_3_p3) - (reg_cons_2_m3 * reg_q_3_m3)))) - (0.0035f * ((reg_cons_2_p4 * reg_q_3_p4) - (reg_cons_2_m4 * reg_q_3_m4)))) * dxinv2);
flux_2[k*M*N + j*N + i] = f2;
float f3 = -(((((0.8f * ((sh_cons_3_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_3_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1]))) - (0.2f * ((sh_cons_3_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_3_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])))) + (0.038f * ((sh_cons_3_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_3_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])))) - (0.0035f * ((sh_cons_3_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_3_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])))) * dxinv0);
f3 -= (((((0.8f * ((sh_cons_3_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_3_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0]))) - (0.2f * ((sh_cons_3_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_3_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])))) + (0.038f * ((sh_cons_3_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_3_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])))) - (0.0035f * ((sh_cons_3_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_3_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])))) * dxinv1);
f3 -= (((((0.8f * (((reg_cons_3_p1 * reg_q_3_p1) - (reg_cons_3_m1 * reg_q_3_m1)) + (reg_q_4_p1 - reg_q_4_m1))) - (0.2f * (((reg_cons_3_p2 * reg_q_3_p2) - (reg_cons_3_m2 * reg_q_3_m2)) + (reg_q_4_p2 - reg_q_4_m2)))) + (0.038f * (((reg_cons_3_p3 * reg_q_3_p3) - (reg_cons_3_m3 * reg_q_3_m3)) + (reg_q_4_p3 - reg_q_4_m3)))) - (0.0035f * (((reg_cons_3_p4 * reg_q_3_p4) - (reg_cons_3_m4 * reg_q_3_m4)) + (reg_q_4_p4 - reg_q_4_m4)))) * dxinv2);
flux_3[k*M*N + j*N + i] = f3;
float f4 = -(((((0.8f * (((sh_cons_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_cons_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])) + ((sh_q_4_c0[j-j0][i-i0+1] * sh_q_1_c0[j-j0][i-i0+1]) - (sh_q_4_c0[j-j0][i-i0-1] * sh_q_1_c0[j-j0][i-i0-1])))) - (0.2f * (((sh_cons_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_cons_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2])) + ((sh_q_4_c0[j-j0][i-i0+2] * sh_q_1_c0[j-j0][i-i0+2]) - (sh_q_4_c0[j-j0][i-i0-2] * sh_q_1_c0[j-j0][i-i0-2]))))) + (0.038f * (((sh_cons_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_cons_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3])) + ((sh_q_4_c0[j-j0][i-i0+3] * sh_q_1_c0[j-j0][i-i0+3]) - (sh_q_4_c0[j-j0][i-i0-3] * sh_q_1_c0[j-j0][i-i0-3]))))) - (0.0035f * (((sh_cons_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_cons_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4])) + ((sh_q_4_c0[j-j0][i-i0+4] * sh_q_1_c0[j-j0][i-i0+4]) - (sh_q_4_c0[j-j0][i-i0-4] * sh_q_1_c0[j-j0][i-i0-4]))))) * dxinv0);
f4 -= (((((0.8f * (((sh_cons_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_cons_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])) + ((sh_q_4_c0[j-j0+1][i-i0] * sh_q_2_c0[j-j0+1][i-i0]) - (sh_q_4_c0[j-j0-1][i-i0] * sh_q_2_c0[j-j0-1][i-i0])))) - (0.2f * (((sh_cons_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_cons_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0])) + ((sh_q_4_c0[j-j0+2][i-i0] * sh_q_2_c0[j-j0+2][i-i0]) - (sh_q_4_c0[j-j0-2][i-i0] * sh_q_2_c0[j-j0-2][i-i0]))))) + (0.038f * (((sh_cons_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_cons_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0])) + ((sh_q_4_c0[j-j0+3][i-i0] * sh_q_2_c0[j-j0+3][i-i0]) - (sh_q_4_c0[j-j0-3][i-i0] * sh_q_2_c0[j-j0-3][i-i0]))))) - (0.0035f * (((sh_cons_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_cons_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0])) + ((sh_q_4_c0[j-j0+4][i-i0] * sh_q_2_c0[j-j0+4][i-i0]) - (sh_q_4_c0[j-j0-4][i-i0] * sh_q_2_c0[j-j0-4][i-i0]))))) * dxinv1);
f4 -= (((((0.8f * (((reg_cons_4_p1 * reg_q_3_p1) - (reg_cons_4_m1 * reg_q_3_m1)) + ((reg_q_4_p1 * reg_q_3_p1) - (reg_q_4_m1 * reg_q_3_m1)))) - (0.2f * (((reg_cons_4_p2 * reg_q_3_p2) - (reg_cons_4_m2 * reg_q_3_m2)) + ((reg_q_4_p2 * reg_q_3_p2) - (reg_q_4_m2 * reg_q_3_m2))))) + (0.038f * (((reg_cons_4_p3 * reg_q_3_p3) - (reg_cons_4_m3 * reg_q_3_m3)) + ((reg_q_4_p3 * reg_q_3_p3) - (reg_q_4_m3 * reg_q_3_m3))))) - (0.0035f * (((reg_cons_4_p4 * reg_q_3_p4) - (reg_cons_4_m4 * reg_q_3_m4)) + ((reg_q_4_p4 * reg_q_3_p4) - (reg_q_4_m4 * reg_q_3_m4))))) * dxinv2);
flux_4[k*M*N + j*N + i] = f4;
}
__syncthreads ();
//Value rotation
reg_cons_1_m4 = reg_cons_1_m3;
reg_cons_1_m3 = reg_cons_1_m2;
reg_cons_1_m2 = reg_cons_1_m1;
reg_cons_1_m1 = sh_cons_1_c0[j-j0][i-i0];
sh_cons_1_c0[j-j0][i-i0] = reg_cons_1_p1;
reg_cons_1_p1 = reg_cons_1_p2;
reg_cons_1_p2 = reg_cons_1_p3;
reg_cons_1_p3 = reg_cons_1_p4;
reg_cons_2_m4 = reg_cons_2_m3;
reg_cons_2_m3 = reg_cons_2_m2;
reg_cons_2_m2 = reg_cons_2_m1;
reg_cons_2_m1 = sh_cons_2_c0[j-j0][i-i0];
sh_cons_2_c0[j-j0][i-i0] = reg_cons_2_p1;
reg_cons_2_p1 = reg_cons_2_p2;
reg_cons_2_p2 = reg_cons_2_p3;
reg_cons_2_p3 = reg_cons_2_p4;
reg_cons_3_m4 = reg_cons_3_m3;
reg_cons_3_m3 = reg_cons_3_m2;
reg_cons_3_m2 = reg_cons_3_m1;
reg_cons_3_m1 = sh_cons_3_c0[j-j0][i-i0];
sh_cons_3_c0[j-j0][i-i0] = reg_cons_3_p1;
reg_cons_3_p1 = reg_cons_3_p2;
reg_cons_3_p2 = reg_cons_3_p3;
reg_cons_3_p3 = reg_cons_3_p4;
reg_cons_4_m4 = reg_cons_4_m3;
reg_cons_4_m3 = reg_cons_4_m2;
reg_cons_4_m2 = reg_cons_4_m1;
reg_cons_4_m1 = sh_cons_4_c0[j-j0][i-i0];
sh_cons_4_c0[j-j0][i-i0] = reg_cons_4_p1;
reg_cons_4_p1 = reg_cons_4_p2;
reg_cons_4_p2 = reg_cons_4_p3;
reg_cons_4_p3 = reg_cons_4_p4;
reg_q_3_m4 = reg_q_3_m3;
reg_q_3_m3 = reg_q_3_m2;
reg_q_3_m2 = reg_q_3_m1;
reg_q_3_m1 = reg_q_3_c0;
reg_q_3_c0 = reg_q_3_p1;
reg_q_3_p1 = reg_q_3_p2;
reg_q_3_p2 = reg_q_3_p3;
reg_q_3_p3 = reg_q_3_p4;
reg_q_4_m4 = reg_q_4_m3;
reg_q_4_m3 = reg_q_4_m2;
reg_q_4_m2 = reg_q_4_m1;
reg_q_4_m1 = sh_q_4_c0[j-j0][i-i0];
sh_q_4_c0[j-j0][i-i0] = reg_q_4_p1;
reg_q_4_p1 = reg_q_4_p2;
reg_q_4_p2 = reg_q_4_p3;
reg_q_4_p3 = reg_q_4_p4;
}
}
extern "C" void host_code (float *h_flux_0, float *h_flux_1, float *h_flux_2, float *h_flux_3, float *h_flux_4, float *h_cons_1, float *h_cons_2, float *h_cons_3, float *h_cons_4, float *h_q_1, float *h_q_2, float *h_q_3, float *h_q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) {
float *flux_0;
cudaMalloc (&flux_0, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *flux_1;
cudaMalloc (&flux_1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *flux_2;
cudaMalloc (&flux_2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *flux_3;
cudaMalloc (&flux_3, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *flux_4;
cudaMalloc (&flux_4, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *cons_1;
cudaMalloc (&cons_1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *cons_2;
cudaMalloc (&cons_2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *cons_3;
cudaMalloc (&cons_3, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *cons_4;
cudaMalloc (&cons_4, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *q_1;
cudaMalloc (&q_1, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *q_2;
cudaMalloc (&q_2, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *q_3;
cudaMalloc (&q_3, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
float *q_4;
cudaMalloc (&q_4, sizeof(float)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(float)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig_1 (32, 16, 1);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x-8), ceil(M, blockconfig_1.y-8), 1);
unsigned int power1, power2;
nvmlReturn_t result;
nvmlDevice_t device;
nvmlEnableState_t mode;
result=nvmlInit();
result = nvmlDeviceGetHandleByIndex(0, &device);
assert(NVML_SUCCESS == result);
result=nvmlDeviceGetPowerManagementMode(device, &mode);
printf("enabled = %d\n", mode);
result=nvmlDeviceGetPowerUsage(device,&power1);
assert(NVML_SUCCESS == result);
cudaDeviceSynchronize();
for (int x=0; x<500; x++) {
hypterm <<<gridconfig_1, blockconfig_1>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N);
}
cudaDeviceSynchronize();
result=nvmlDeviceGetPowerUsage(device,&power2);
assert(NVML_SUCCESS == result);
power2 -= power1;
printf("%u\n", power2);
nvmlShutdown();
cudaMemcpy (h_flux_0, flux_0, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost);
//Free allocated memory
cudaFree (flux_0);
cudaFree (flux_1);
cudaFree (flux_2);
cudaFree (flux_3);
cudaFree (flux_4);
cudaFree (cons_1);
cudaFree (cons_2);
cudaFree (cons_3);
cudaFree (cons_4);
cudaFree (q_1);
cudaFree (q_2);
cudaFree (q_3);
cudaFree (q_4);
}
|
bfb6f9dc2541716bcbef79f5c7beaf4183b2fe32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// @file rgb2hsv_gpu.cu
// @brief RGB2HSV
// @author Samuel Albanie
/*
Copyright (C) 2017- Samuel Albanie.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "rgb2hsv_impl.hpp"
#include <bits/datacu.hpp>
#include <bits/mexutils.h>
#include <bits/data.hpp>
#include <assert.h>
#include <float.h>
#include <cstdio>
#include <math.h>
#include <string.h>
/* ------------------------------------------------------------ */
/* kernels */
/* ------------------------------------------------------------ */
template<typename T> __global__ void
rgb2hsv_kernel(T* output,
const T* data,
const int volume,
bool* valid_range,
const int height,
const int width,
const int size)
{
int hsvIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (hsvIndex < volume) {
int depth = 3 ; // RGB input
int area = height * width ;
int s = hsvIndex % area ; // spatial offset
int b = hsvIndex / (area * depth) ; // batch element
int rIdx = (b * area * 3) + s ;
int gIdx = rIdx + area ;
int bIdx = rIdx + 2 * area ;
int c = (hsvIndex / area) % depth ;
T R = data[rIdx] ;
T G = data[gIdx] ;
T B = data[bIdx] ;
// check input ranges
bool valid_R_range = R <= 1 && R >= 0 ;
bool valid_G_range = G <= 1 && G >= 0 ;
bool valid_B_range = B <= 1 && B >= 0 ;
if (!(valid_R_range && valid_G_range && valid_B_range)) {
valid_range[0] = 1 ;
}
T out, sat ;
T maxRGB = max(R, max(G, B)) ;
T minRGB = min(R, min(G, B)) ;
T delta = maxRGB - minRGB ;
// it is safer to do slightly extra work here
if (maxRGB == 0) {
sat = 0 ;
} else {
sat = delta / maxRGB ;
}
switch (c) { // H, S or V output
case 0: // Compute hue
if (sat == 0) {
out = 0 ; // fix hue to zero
} else {
if (R == maxRGB) {
out = (G - B) / delta ;
} else if (G == maxRGB) {
out = 2 + ( B - R ) / delta ;
} else { // B max
out = 4 + ( R - G ) / delta ;
}
out = out / 6 ; // use [0,1], rather than 360 degrees
if (out < 0) {
out = out + 1 ;
}
}
break ;
case 1: // compute saturation
out = sat ;
break ;
case 2: // compute value
out = maxRGB ; // store value
break ;
}
output[hsvIndex] = out ;
}
}
template<typename T> __global__ void
hsv2rgb_kernel(T* output,
const T* data,
const int volume,
bool* valid_range,
const int height,
const int width,
const int size)
{
int rgbIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (rgbIndex < volume) {
int depth = 3 ; // HSV input
int area = height * width ;
int s = rgbIndex % area ; // spatial offset
int b = rgbIndex / (area * depth) ; // batch element
int hIdx = (b * area * 3) + s ;
int sIdx = hIdx + area ;
int vIdx = hIdx + 2 * area ;
int c = (rgbIndex / area) % depth ;
T H = data[hIdx] ;
T S = data[sIdx] ;
T V = data[vIdx] ;
// check input ranges
bool valid_H_range = H <= 1 && H >= 0 ;
bool valid_S_range = S <= 1 && S >= 0 ;
bool valid_V_range = V <= 1 && V >= 0 ;
if (!(valid_H_range && valid_S_range && valid_V_range)) {
valid_range[0] = 1 ;
}
T out ;
T H_ = H * 6 ; // follow standard convention for Hue computation
int cRegion = (int) trunc(H_) ; // map into one of six color regions
T rem = H_ - cRegion ; // store remainder
// incorporate the post addition of (V - chroma) into quantities
// that can be assigned directly
T Q1 = V * (1 - S) ;
T Q2 = V * (1 - (S * rem)) ;
T Q3 = V * (1 - (S * (1 - rem))) ;
switch (cRegion) {
case 0:
switch (c) { // RGB switch
case 0: out = V ; break ;
case 1: out = Q3 ; break ;
case 2: out = Q1 ; break ;
} break ;
case 1:
switch (c) { // RGB switch
case 0: out = Q2 ; break ;
case 1: out = V ; break ;
case 2: out = Q1 ; break ;
} break ;
case 2:
switch (c) { // RGB switch
case 0: out = Q1 ; break ;
case 1: out = V ; break ;
case 2: out = Q3 ; break ;
} break ;
case 3:
switch (c) { // RGB switch
case 0: out = Q1 ; break ;
case 1: out = Q2 ; break ;
case 2: out = V ; break ;
} break ;
case 4:
switch (c) { // RGB switch
case 0: out = Q3 ; break ;
case 1: out = Q1 ; break ;
case 2: out = V ; break ;
} break ;
case 5:
switch (c) { // RGB switch
case 0: out = V ; break ;
case 1: out = Q1 ; break ;
case 2: out = Q2 ; break ;
} break ;
case 6: // match MATLAB convention here
switch (c) { // RGB switch
case 0: out = V ; break ;
case 1: out = Q3 ; break ;
case 2: out = Q1 ; break ;
} break ;
}
output[rgbIndex] = out ;
}
}
namespace vl { namespace impl {
/* ------------------------------------------------------------ */
/* rgb2hsv */
/* ------------------------------------------------------------ */
template<typename T>
struct rgb2hsv<vl::VLDT_GPU,T>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
size_t height,
size_t width,
size_t size)
{
int volume = height * width * 3 * size ;
// set flag for input checking
bool* valid_range ;
hipMalloc( (void **) &valid_range, sizeof(bool)) ;
hipMemset(valid_range, 0, sizeof(bool)) ; // init to zero
hipLaunchKernelGGL(( rgb2hsv_kernel<T>), vl::divideAndRoundUp(volume,
VL_CUDA_NUM_THREADS), dim3(VL_CUDA_NUM_THREADS) , 0, 0, output, data,
volume, valid_range, height, width, size) ;
bool* h_valid_range = new bool[1] ;
hipMemcpy(h_valid_range, valid_range, sizeof(bool), hipMemcpyDeviceToHost) ;
hipError_t status = hipPeekAtLastError() ;
// TODO: clean up error handling here
// currently the input validation is done on the device to prevent a speed
// overhead, but requires a slightly ungainlly use of error codes.
if ((status != hipSuccess) || (h_valid_range[0] != 0)) {
if (h_valid_range[0] != 0) {
mexPrintf("invalid RGB input values (must lie in [0,1]) \n") ;
}
return vl::VLE_Cuda ;
} else {
return vl::VLE_Success ;
}
}
} ;
/* ------------------------------------------------------------ */
/* hsv2rgb */
/* ------------------------------------------------------------ */
template<typename T>
struct hsv2rgb<vl::VLDT_GPU,T>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
size_t height,
size_t width,
size_t size)
{
int volume = height * width * 3 * size ;
// set flag for input checking
bool* valid_range ;
hipMalloc( (void **) &valid_range, sizeof(bool)) ;
hipMemset(valid_range, 0, sizeof(bool)) ; // init to zero
hipLaunchKernelGGL(( hsv2rgb_kernel<T>), vl::divideAndRoundUp(volume,
VL_CUDA_NUM_THREADS), dim3(VL_CUDA_NUM_THREADS) , 0, 0, output, data,
volume, valid_range, height, width, size) ;
bool* h_valid_range = new bool[1] ;
hipMemcpy(h_valid_range, valid_range, sizeof(bool), hipMemcpyDeviceToHost) ;
hipError_t status = hipPeekAtLastError() ;
// TODO: clean up error handling here
// currently the input validation is done on the device to prevent a speed
// overhead, but requires a slightly ungainlly use of error codes.
if ((status != hipSuccess) || (h_valid_range[0] != 0)) {
if (h_valid_range[0] != 0) {
mexPrintf("invalid RGB input values (must lie in [0,1]) \n") ;
}
return vl::VLE_Cuda ;
} else {
return vl::VLE_Success ;
}
}
} ;
} } // namespace vl::impl
template struct vl::impl::rgb2hsv<vl::VLDT_GPU, float> ;
template struct vl::impl::hsv2rgb<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::rgb2hsv<vl::VLDT_GPU, double> ;
template struct vl::impl::hsv2rgb<vl::VLDT_GPU, double> ;
#endif
| bfb6f9dc2541716bcbef79f5c7beaf4183b2fe32.cu | // @file rgb2hsv_gpu.cu
// @brief RGB2HSV
// @author Samuel Albanie
/*
Copyright (C) 2017- Samuel Albanie.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#include "rgb2hsv_impl.hpp"
#include <bits/datacu.hpp>
#include <bits/mexutils.h>
#include <bits/data.hpp>
#include <assert.h>
#include <float.h>
#include <cstdio>
#include <math.h>
#include <string.h>
/* ------------------------------------------------------------ */
/* kernels */
/* ------------------------------------------------------------ */
template<typename T> __global__ void
rgb2hsv_kernel(T* output,
const T* data,
const int volume,
bool* valid_range,
const int height,
const int width,
const int size)
{
int hsvIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (hsvIndex < volume) {
int depth = 3 ; // RGB input
int area = height * width ;
int s = hsvIndex % area ; // spatial offset
int b = hsvIndex / (area * depth) ; // batch element
int rIdx = (b * area * 3) + s ;
int gIdx = rIdx + area ;
int bIdx = rIdx + 2 * area ;
int c = (hsvIndex / area) % depth ;
T R = data[rIdx] ;
T G = data[gIdx] ;
T B = data[bIdx] ;
// check input ranges
bool valid_R_range = R <= 1 && R >= 0 ;
bool valid_G_range = G <= 1 && G >= 0 ;
bool valid_B_range = B <= 1 && B >= 0 ;
if (!(valid_R_range && valid_G_range && valid_B_range)) {
valid_range[0] = 1 ;
}
T out, sat ;
T maxRGB = max(R, max(G, B)) ;
T minRGB = min(R, min(G, B)) ;
T delta = maxRGB - minRGB ;
// it is safer to do slightly extra work here
if (maxRGB == 0) {
sat = 0 ;
} else {
sat = delta / maxRGB ;
}
switch (c) { // H, S or V output
case 0: // Compute hue
if (sat == 0) {
out = 0 ; // fix hue to zero
} else {
if (R == maxRGB) {
out = (G - B) / delta ;
} else if (G == maxRGB) {
out = 2 + ( B - R ) / delta ;
} else { // B max
out = 4 + ( R - G ) / delta ;
}
out = out / 6 ; // use [0,1], rather than 360 degrees
if (out < 0) {
out = out + 1 ;
}
}
break ;
case 1: // compute saturation
out = sat ;
break ;
case 2: // compute value
out = maxRGB ; // store value
break ;
}
output[hsvIndex] = out ;
}
}
template<typename T> __global__ void
hsv2rgb_kernel(T* output,
const T* data,
const int volume,
bool* valid_range,
const int height,
const int width,
const int size)
{
int rgbIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (rgbIndex < volume) {
int depth = 3 ; // HSV input
int area = height * width ;
int s = rgbIndex % area ; // spatial offset
int b = rgbIndex / (area * depth) ; // batch element
int hIdx = (b * area * 3) + s ;
int sIdx = hIdx + area ;
int vIdx = hIdx + 2 * area ;
int c = (rgbIndex / area) % depth ;
T H = data[hIdx] ;
T S = data[sIdx] ;
T V = data[vIdx] ;
// check input ranges
bool valid_H_range = H <= 1 && H >= 0 ;
bool valid_S_range = S <= 1 && S >= 0 ;
bool valid_V_range = V <= 1 && V >= 0 ;
if (!(valid_H_range && valid_S_range && valid_V_range)) {
valid_range[0] = 1 ;
}
T out ;
T H_ = H * 6 ; // follow standard convention for Hue computation
int cRegion = (int) trunc(H_) ; // map into one of six color regions
T rem = H_ - cRegion ; // store remainder
// incorporate the post addition of (V - chroma) into quantities
// that can be assigned directly
T Q1 = V * (1 - S) ;
T Q2 = V * (1 - (S * rem)) ;
T Q3 = V * (1 - (S * (1 - rem))) ;
switch (cRegion) {
case 0:
switch (c) { // RGB switch
case 0: out = V ; break ;
case 1: out = Q3 ; break ;
case 2: out = Q1 ; break ;
} break ;
case 1:
switch (c) { // RGB switch
case 0: out = Q2 ; break ;
case 1: out = V ; break ;
case 2: out = Q1 ; break ;
} break ;
case 2:
switch (c) { // RGB switch
case 0: out = Q1 ; break ;
case 1: out = V ; break ;
case 2: out = Q3 ; break ;
} break ;
case 3:
switch (c) { // RGB switch
case 0: out = Q1 ; break ;
case 1: out = Q2 ; break ;
case 2: out = V ; break ;
} break ;
case 4:
switch (c) { // RGB switch
case 0: out = Q3 ; break ;
case 1: out = Q1 ; break ;
case 2: out = V ; break ;
} break ;
case 5:
switch (c) { // RGB switch
case 0: out = V ; break ;
case 1: out = Q1 ; break ;
case 2: out = Q2 ; break ;
} break ;
case 6: // match MATLAB convention here
switch (c) { // RGB switch
case 0: out = V ; break ;
case 1: out = Q3 ; break ;
case 2: out = Q1 ; break ;
} break ;
}
output[rgbIndex] = out ;
}
}
namespace vl { namespace impl {
/* ------------------------------------------------------------ */
/* rgb2hsv */
/* ------------------------------------------------------------ */
template<typename T>
struct rgb2hsv<vl::VLDT_GPU,T>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
size_t height,
size_t width,
size_t size)
{
int volume = height * width * 3 * size ;
// set flag for input checking
bool* valid_range ;
cudaMalloc( (void **) &valid_range, sizeof(bool)) ;
cudaMemset(valid_range, 0, sizeof(bool)) ; // init to zero
rgb2hsv_kernel<T><<< vl::divideAndRoundUp(volume,
VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>(output, data,
volume, valid_range, height, width, size) ;
bool* h_valid_range = new bool[1] ;
cudaMemcpy(h_valid_range, valid_range, sizeof(bool), cudaMemcpyDeviceToHost) ;
cudaError_t status = cudaPeekAtLastError() ;
// TODO: clean up error handling here
// currently the input validation is done on the device to prevent a speed
// overhead, but requires a slightly ungainlly use of error codes.
if ((status != cudaSuccess) || (h_valid_range[0] != 0)) {
if (h_valid_range[0] != 0) {
mexPrintf("invalid RGB input values (must lie in [0,1]) \n") ;
}
return vl::VLE_Cuda ;
} else {
return vl::VLE_Success ;
}
}
} ;
/* ------------------------------------------------------------ */
/* hsv2rgb */
/* ------------------------------------------------------------ */
template<typename T>
struct hsv2rgb<vl::VLDT_GPU,T>
{
static vl::ErrorCode
forward(Context& context,
T* output,
T const* data,
size_t height,
size_t width,
size_t size)
{
int volume = height * width * 3 * size ;
// set flag for input checking
bool* valid_range ;
cudaMalloc( (void **) &valid_range, sizeof(bool)) ;
cudaMemset(valid_range, 0, sizeof(bool)) ; // init to zero
hsv2rgb_kernel<T><<< vl::divideAndRoundUp(volume,
VL_CUDA_NUM_THREADS), VL_CUDA_NUM_THREADS >>>(output, data,
volume, valid_range, height, width, size) ;
bool* h_valid_range = new bool[1] ;
cudaMemcpy(h_valid_range, valid_range, sizeof(bool), cudaMemcpyDeviceToHost) ;
cudaError_t status = cudaPeekAtLastError() ;
// TODO: clean up error handling here
// currently the input validation is done on the device to prevent a speed
// overhead, but requires a slightly ungainlly use of error codes.
if ((status != cudaSuccess) || (h_valid_range[0] != 0)) {
if (h_valid_range[0] != 0) {
mexPrintf("invalid RGB input values (must lie in [0,1]) \n") ;
}
return vl::VLE_Cuda ;
} else {
return vl::VLE_Success ;
}
}
} ;
} } // namespace vl::impl
template struct vl::impl::rgb2hsv<vl::VLDT_GPU, float> ;
template struct vl::impl::hsv2rgb<vl::VLDT_GPU, float> ;
#ifdef ENABLE_DOUBLE
template struct vl::impl::rgb2hsv<vl::VLDT_GPU, double> ;
template struct vl::impl::hsv2rgb<vl::VLDT_GPU, double> ;
#endif
|
dfb5006693652a5568171c851990e40817ea1a33.hip | // !!! This is a file automatically generated by hipify!!!
/* second version of N body simulation using CUDA */
#include <iostream>
#include <fstream>
#include <iomanip>
#include <math.h>
#include <hip/hip_runtime.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <ctime>
using namespace std;
/* define the global constants */
const double G = 6.67 * pow(10, -11);
const double e = 0.00001;
const double period = 1;
/* define the structure of particle */
struct particle
{
double m;
double pos_x;
double pos_y;
double v_x;
double v_y;
double a_x;
double a_y;
particle(double m = 0, double pos_x = 0, double pos_y = 0,
double v_x = 0, double v_y = 0, double a_x = 0, double a_y = 0)
{
this->m = m;
this->pos_x = pos_x;
this->pos_y = pos_y;
this->v_x = v_x;
this->v_y = v_y;
this->a_x = a_x;
this->a_y = a_y;
}
};
struct my_double2
{
double x, y;
__device__ my_double2(double x = 0, double y = 0)
{
this->x = x;
this->y = y;
}
};
/* define the global data */
int g_N; // number of particles
int g_P; // number of particles in a tile
thrust::host_vector<particle> g_pv; // particle vector
void setUp();
/* calculate the interaction between two bodies */
__device__ my_double2 bodyBodyAcceleration(double G, double e, particle b1, particle b2, my_double2 acceleration)
{
double r_2 = pow((b1.pos_x - b2.pos_x),2) + pow((b1.pos_y - b2.pos_y),2);
b1.a_x = (-1) * G * b2.m * (b1.pos_x - b2.pos_x) / (pow(r_2 + e, 1.5));
b1.a_y = (-1) * G * b2.m * (b1.pos_y - b2.pos_y) / (pow(r_2 + e, 1.5));
acceleration.x += b1.a_x;
acceleration.y += b1.a_y;
return acceleration;
}
/* calculate the interaction inside a P*P block */
__device__ my_double2 tileAcceleration(double G, double e, particle b, my_double2 acceleration)
{
extern __shared__ particle shParticles[];
for(int i = 0; i < blockDim.x; ++i)
{
acceleration = bodyBodyAcceleration(G, e, b, shParticles[i], acceleration);
}
return acceleration;
}
/* update the position */
__device__ void updatePosition(double period, particle* particle_arr)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* compute the velocity */
particle_arr[idx].v_x += particle_arr[idx].a_x * period;
particle_arr[idx].v_y += particle_arr[idx].a_y * period;
/* compute the new position */
particle_arr[idx].pos_x += particle_arr[idx].v_x * period;
particle_arr[idx].pos_y += particle_arr[idx].v_y * period;
}
/* calculate the whole acceleration */
__global__ void updateScene(int N, int P, double G, double e, double period, particle* particle_arr)
{
extern __shared__ particle shParticles[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
particle ptc = particle_arr[id];
my_double2 acceleration;
acceleration.x = ptc.a_x;
acceleration.y = ptc.a_y;
int i, tile;
for(i = 0, tile = 0; i < N; i += P, ++tile)
{
/* fill in the shared memory */
int idx = tile * blockDim.x + threadIdx.x;
shParticles[threadIdx.x] = particle_arr[idx];
__syncthreads();
/* calculate the acceleration with a tile */
acceleration = tileAcceleration(G, e, ptc, acceleration);
__syncthreads();
}
ptc.a_x = acceleration.x;
ptc.a_y = acceleration.y;
updatePosition(period, particle_arr);
}
int main(int argc, char ** argv) {
setUp();
g_P = static_cast<int>(sqrt(g_N)) + 1;
/* device copy of particle array */
thrust::device_vector<particle> d_particle_arr = g_pv;
/* get the raw pointer of particle array */
particle *particle_arr = thrust::raw_pointer_cast(d_particle_arr.data());
clock_t start, finish;
start = clock();
int time = 0;
while(time < 100000)
{
hipLaunchKernelGGL(( updateScene), dim3(g_P),dim3(g_P),g_P*sizeof(particle), 0, g_N, g_P, G, e, period, particle_arr);
/*
g_pv = d_particle_arr;
for ( int i = 0; i < g_N; ++i )
{
cout << "particle: " << i << " pos_x: " << g_pv[i].pos_x << " pos_y: " << g_pv[i].pos_y << endl;
}
*/
time++;
}
finish = clock();
cout << "Execution Time: " << (double)(finish-start)/CLOCKS_PER_SEC << endl;
return 0;
}
/* read the input data */
void setUp()
{
ifstream inFile;
inFile.open("input.txt");
inFile >> g_N;
g_pv.resize(g_N);
for ( int i = 0; i < g_N; ++i )
{
inFile >> g_pv[i].m >> g_pv[i].pos_x >> g_pv[i].pos_y
>> g_pv[i].v_x >> g_pv[i].v_y >> g_pv[i].a_x >> g_pv[i].a_y;
}
inFile.close();
}
| dfb5006693652a5568171c851990e40817ea1a33.cu | /* second version of N body simulation using CUDA */
#include <iostream>
#include <fstream>
#include <iomanip>
#include <math.h>
#include <cuda.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <ctime>
using namespace std;
/* define the global constants */
const double G = 6.67 * pow(10, -11);
const double e = 0.00001;
const double period = 1;
/* define the structure of particle */
struct particle
{
double m;
double pos_x;
double pos_y;
double v_x;
double v_y;
double a_x;
double a_y;
particle(double m = 0, double pos_x = 0, double pos_y = 0,
double v_x = 0, double v_y = 0, double a_x = 0, double a_y = 0)
{
this->m = m;
this->pos_x = pos_x;
this->pos_y = pos_y;
this->v_x = v_x;
this->v_y = v_y;
this->a_x = a_x;
this->a_y = a_y;
}
};
struct my_double2
{
double x, y;
__device__ my_double2(double x = 0, double y = 0)
{
this->x = x;
this->y = y;
}
};
/* define the global data */
int g_N; // number of particles
int g_P; // number of particles in a tile
thrust::host_vector<particle> g_pv; // particle vector
void setUp();
/* calculate the interaction between two bodies */
__device__ my_double2 bodyBodyAcceleration(double G, double e, particle b1, particle b2, my_double2 acceleration)
{
double r_2 = pow((b1.pos_x - b2.pos_x),2) + pow((b1.pos_y - b2.pos_y),2);
b1.a_x = (-1) * G * b2.m * (b1.pos_x - b2.pos_x) / (pow(r_2 + e, 1.5));
b1.a_y = (-1) * G * b2.m * (b1.pos_y - b2.pos_y) / (pow(r_2 + e, 1.5));
acceleration.x += b1.a_x;
acceleration.y += b1.a_y;
return acceleration;
}
/* calculate the interaction inside a P*P block */
__device__ my_double2 tileAcceleration(double G, double e, particle b, my_double2 acceleration)
{
extern __shared__ particle shParticles[];
for(int i = 0; i < blockDim.x; ++i)
{
acceleration = bodyBodyAcceleration(G, e, b, shParticles[i], acceleration);
}
return acceleration;
}
/* update the position */
__device__ void updatePosition(double period, particle* particle_arr)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* compute the velocity */
particle_arr[idx].v_x += particle_arr[idx].a_x * period;
particle_arr[idx].v_y += particle_arr[idx].a_y * period;
/* compute the new position */
particle_arr[idx].pos_x += particle_arr[idx].v_x * period;
particle_arr[idx].pos_y += particle_arr[idx].v_y * period;
}
/* calculate the whole acceleration */
__global__ void updateScene(int N, int P, double G, double e, double period, particle* particle_arr)
{
extern __shared__ particle shParticles[];
int id = blockIdx.x * blockDim.x + threadIdx.x;
particle ptc = particle_arr[id];
my_double2 acceleration;
acceleration.x = ptc.a_x;
acceleration.y = ptc.a_y;
int i, tile;
for(i = 0, tile = 0; i < N; i += P, ++tile)
{
/* fill in the shared memory */
int idx = tile * blockDim.x + threadIdx.x;
shParticles[threadIdx.x] = particle_arr[idx];
__syncthreads();
/* calculate the acceleration with a tile */
acceleration = tileAcceleration(G, e, ptc, acceleration);
__syncthreads();
}
ptc.a_x = acceleration.x;
ptc.a_y = acceleration.y;
updatePosition(period, particle_arr);
}
int main(int argc, char ** argv) {
setUp();
g_P = static_cast<int>(sqrt(g_N)) + 1;
/* device copy of particle array */
thrust::device_vector<particle> d_particle_arr = g_pv;
/* get the raw pointer of particle array */
particle *particle_arr = thrust::raw_pointer_cast(d_particle_arr.data());
clock_t start, finish;
start = clock();
int time = 0;
while(time < 100000)
{
updateScene<<<g_P,g_P,g_P*sizeof(particle)>>>(g_N, g_P, G, e, period, particle_arr);
/*
g_pv = d_particle_arr;
for ( int i = 0; i < g_N; ++i )
{
cout << "particle: " << i << " pos_x: " << g_pv[i].pos_x << " pos_y: " << g_pv[i].pos_y << endl;
}
*/
time++;
}
finish = clock();
cout << "Execution Time: " << (double)(finish-start)/CLOCKS_PER_SEC << endl;
return 0;
}
/* read the input data */
void setUp()
{
ifstream inFile;
inFile.open("input.txt");
inFile >> g_N;
g_pv.resize(g_N);
for ( int i = 0; i < g_N; ++i )
{
inFile >> g_pv[i].m >> g_pv[i].pos_x >> g_pv[i].pos_y
>> g_pv[i].v_x >> g_pv[i].v_y >> g_pv[i].a_x >> g_pv[i].a_y;
}
inFile.close();
}
|
526d8544f37a10f7b58a3fae400565f7b38b4cf7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <random>
#include <conio.h>
//CPU - host
//GPU - device
//blockDim - size of block
//blockIdx - index of current block
//threadIdx - index of current thread in block
__device__ void elem(double* ar, int m, int n, double k, int N) //execution on Device
{
int tid = blockIdx.y * blockDim.y + threadIdx.y;
if (tid < N)
ar[m * N + tid] -= k * ar[n * N + tid];
}
__global__ void triangle_kernel(double* arr, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int j;
double kof;
for (j = 0; j < N * N; j++)
{
// if (!arr[j * N + j]) elem(arr, j, N - 1, N, N);
if (tid >= j && tid < N - 1)
{
kof = arr[(tid + 1) * N + j] / arr[j * N + j];
elem(arr, tid + 1, j, kof, N);
}
}
}
// ,
double det(double* arr, int N)
{
double d = 1.0;
for (int i = 0; i < N; i++) d *= arr[i * N + i];
return d;
}
void print_cuda_device_info(hipDeviceProp_t& prop)
{
printf("Device name: %s\n", prop.name);
printf("Global memory available on device: %zu\n", prop.totalGlobalMem);
printf("Shared memory available per block: %zu\n", prop.sharedMemPerBlock);
printf("Warp size in threads: %d\n", prop.warpSize);
printf("Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Maximum size of each dimension of a block[0]: %d\n", prop.maxThreadsDim[0]);
printf("Maximum size of each dimension of a block[1]: %d\n", prop.maxThreadsDim[1]);
printf("Maximum size of each dimension of a block[2]: %i\n", prop.maxThreadsDim[2]);
printf("Maximum size of each dimension of a grid[0]: %i\n", prop.maxGridSize[0]);
printf("Maximum size of each dimension of a grid[1]: %i\n", prop.maxGridSize[1]);
printf("Maximum size of each dimension of a grid[2]: %i\n", prop.maxGridSize[2]);
printf("Clock frequency in kilohertz: %i\n", prop.clockRate);
printf("totalConstMem: %zu\n", prop.totalConstMem);
printf("Major compute capability: %i\n", prop.major);
printf("Minor compute capability: %i\n", prop.minor);
printf("Number of multiprocessors on device: %i\n", prop.multiProcessorCount);
}
__host__ int main()
{
int N;
printf("Input size of matrix N = ");
scanf_s("%i", &N);
unsigned int timer;
int Matrix_size = N * N;//Size of matrix
int MatrixTotalMemory = Matrix_size * sizeof(double);//, GPU
double* InputMatrix = new double[Matrix_size];//
//
srand(time(NULL));
for (int i = 0; i < Matrix_size; i++)
{
InputMatrix[i] = 1 + rand() % 9;
}
printf("\n");
for (int i = 0; i < Matrix_size; i++)
{
printf("%0.2f ", InputMatrix[i]);
if (((i + 1) % N == 0) && (i != 0)) printf("\n");
}
printf("\n");
_getch();
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
//print_cuda_device_info(prop);
double* MatrixDeviceMemory;
hipMalloc((void**)&MatrixDeviceMemory, MatrixTotalMemory);// GPU
hipMemcpy(MatrixDeviceMemory, InputMatrix, MatrixTotalMemory, hipMemcpyHostToDevice);// GPU
dim3 gridSize = dim3(N, N, 1);// (dim3),
dim3 blockSize = dim3(1, 1, 1);// (dim3),
//
float recording;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//float start2 = clock(); //Fix the begin of work in timeline.
hipLaunchKernelGGL(( triangle_kernel) , dim3(gridSize), dim3(blockSize) , 0, 0, MatrixDeviceMemory, N); //Execution of matrix triangling
hipDeviceSynchronize();
hipEventSynchronize(stop);
//float end = clock(); //Fix the end of execution
//
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&recording, start, stop);
hipMemcpy(InputMatrix, MatrixDeviceMemory, MatrixTotalMemory, hipMemcpyDeviceToHost);// GPU CPU
//
int string = 0;
for (int i = 0; i < Matrix_size; i++)
{
if (string && i == string * N)
{
int m = i;
for (int j = string * N; j < string * N + string; j++)
{
printf("0.00 ");
m++;
}
i = m;
}
printf("%.2f ", InputMatrix[i]);
if ((i + 1) % N == 0)
{
printf("\n");
string++;
}
}
printf("\ndet A = %.2f \n", det(InputMatrix, N));
//if (recording > 0)
printf("Time of execution = %.2f\n", recording);
//else printf("Time of execution = %.2f\n", end - start2);
hipFree(MatrixDeviceMemory); //Make the memory free
return 0;
}
| 526d8544f37a10f7b58a3fae400565f7b38b4cf7.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <curand_kernel.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <random>
#include <conio.h>
//CPU - host
//GPU - device
//blockDim - size of block
//blockIdx - index of current block
//threadIdx - index of current thread in block
__device__ void elem(double* ar, int m, int n, double k, int N) //execution on Device
{
int tid = blockIdx.y * blockDim.y + threadIdx.y;
if (tid < N)
ar[m * N + tid] -= k * ar[n * N + tid];
}
__global__ void triangle_kernel(double* arr, int N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int j;
double kof;
for (j = 0; j < N * N; j++)
{
// if (!arr[j * N + j]) elem(arr, j, N - 1, N, N);
if (tid >= j && tid < N - 1)
{
kof = arr[(tid + 1) * N + j] / arr[j * N + j];
elem(arr, tid + 1, j, kof, N);
}
}
}
//Перемножаем элементы на главной диагонали уже треугольной матрицы, тем самым получаем определитель
double det(double* arr, int N)
{
double d = 1.0;
for (int i = 0; i < N; i++) d *= arr[i * N + i];
return d;
}
void print_cuda_device_info(cudaDeviceProp& prop)
{
printf("Device name: %s\n", prop.name);
printf("Global memory available on device: %zu\n", prop.totalGlobalMem);
printf("Shared memory available per block: %zu\n", prop.sharedMemPerBlock);
printf("Warp size in threads: %d\n", prop.warpSize);
printf("Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
printf("Maximum size of each dimension of a block[0]: %d\n", prop.maxThreadsDim[0]);
printf("Maximum size of each dimension of a block[1]: %d\n", prop.maxThreadsDim[1]);
printf("Maximum size of each dimension of a block[2]: %i\n", prop.maxThreadsDim[2]);
printf("Maximum size of each dimension of a grid[0]: %i\n", prop.maxGridSize[0]);
printf("Maximum size of each dimension of a grid[1]: %i\n", prop.maxGridSize[1]);
printf("Maximum size of each dimension of a grid[2]: %i\n", prop.maxGridSize[2]);
printf("Clock frequency in kilohertz: %i\n", prop.clockRate);
printf("totalConstMem: %zu\n", prop.totalConstMem);
printf("Major compute capability: %i\n", prop.major);
printf("Minor compute capability: %i\n", prop.minor);
printf("Number of multiprocessors on device: %i\n", prop.multiProcessorCount);
}
__host__ int main()
{
int N;
printf("Input size of matrix N = ");
scanf_s("%i", &N);
unsigned int timer;
int Matrix_size = N * N;//Size of matrix
int MatrixTotalMemory = Matrix_size * sizeof(double);//Память, необходимая для массива на GPU
double* InputMatrix = new double[Matrix_size];//Выделяем память под массив
//Заполняем матрицу случайными числами и выводим на экран
srand(time(NULL));
for (int i = 0; i < Matrix_size; i++)
{
InputMatrix[i] = 1 + rand() % 9;
}
printf("\n");
for (int i = 0; i < Matrix_size; i++)
{
printf("%0.2f ", InputMatrix[i]);
if (((i + 1) % N == 0) && (i != 0)) printf("\n");
}
printf("\n");
_getch();
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
//print_cuda_device_info(prop);
double* MatrixDeviceMemory;
cudaMalloc((void**)&MatrixDeviceMemory, MatrixTotalMemory);//Выделяем память под массив на GPU
cudaMemcpy(MatrixDeviceMemory, InputMatrix, MatrixTotalMemory, cudaMemcpyHostToDevice);//Копируем значения матрицы на GPU
dim3 gridSize = dim3(N, N, 1);//Размерность сетки блоков (dim3), выделенную для расчетов
dim3 blockSize = dim3(1, 1, 1);//Размер блока (dim3), выделенного для расчетов
//Инициализируем переменные для замера времени работы
float recording;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//float start2 = clock(); //Fix the begin of work in timeline.
triangle_kernel <<< gridSize, blockSize >>> (MatrixDeviceMemory, N); //Execution of matrix triangling
cudaThreadSynchronize();
cudaEventSynchronize(stop);
//float end = clock(); //Fix the end of execution
//Получаем время работы
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&recording, start, stop);
cudaMemcpy(InputMatrix, MatrixDeviceMemory, MatrixTotalMemory, cudaMemcpyDeviceToHost);//Копируем новую матрицу с GPU на CPU
//Выводим полученную матрицу
int string = 0;
for (int i = 0; i < Matrix_size; i++)
{
if (string && i == string * N)
{
int m = i;
for (int j = string * N; j < string * N + string; j++)
{
printf("0.00 ");
m++;
}
i = m;
}
printf("%.2f ", InputMatrix[i]);
if ((i + 1) % N == 0)
{
printf("\n");
string++;
}
}
printf("\ndet A = %.2f \n", det(InputMatrix, N));
//if (recording > 0)
printf("Time of execution = %.2f\n", recording);
//else printf("Time of execution = %.2f\n", end - start2);
cudaFree(MatrixDeviceMemory); //Make the memory free
return 0;
}
|
cd79d74af81f6efbb0ebaa33975346066fa74247.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDA.h"
#include "CUDA_runtime.h"
#include "device_launch_parameters.h"
#include "DeviceArray.cuh"
#include "assert.h"
#define BLOCK_SIZE 256
#define NEWTON 0.2f
#define MIN_DIST 50.0f
/******************************
Data distribution on GRID:
- The following represents the time progression of the 1 dimensional grid that the
compute_gravs kernel works on.
- We separate the N stars in 1D tiles, and each thread works out the gravitational
force acting on a star.
- We have to load the positions of the other stars into shared memory
in chuncks to make it fit. Each orizontal line represent the moment when we load the
new stars' positions
------------------------------- <-Start point
| xx | xx | xx | xx | xx | x |
| xx | xx | xx | xx | xx | x |
------------------------------- <-New tile, load new positions!
| xx | xx | xx | xx | xx | x |
| xx | xx | xx | xx | xx | x | |
------------------------------- | Time direction
| xx | xx | xx | xx | xx | x | |
| xx | xx | xx | xx | xx | x | V
-------------------------------
| xx | xx | xx | xx | xx | x |
| xx | xx | xx | xx | xx | x |
-------------------------------
| xx | xx | xx | xx | xx | x |
| | | | | | |
------------------------------- <- End
******************************/
__global__ void compute_gravs(const float* d_pos_x, const float* d_pos_y,
const float* d_masses, float2* d_gravs, int num_stars )
{
//local coordinate on the block
int local_i = threadIdx.x;
//global coordinate on the grid
int global_i = blockIdx.x*blockDim.x + local_i;
//this is safe from G80, since the device shuold keep track of the active threads and not wait for
//the inactive ones in case of __syncthread
if( global_i >= num_stars ) return;
extern __shared__ float s[];
// Partition the SMEM stores so that each instruction
// never gets any bank conflicts and coalesces access too.
float2* pos = (float2*)s;
float* mass = (float*)&s[2*blockDim.x];
float2 total_acc;
total_acc.x = 0.0f;
total_acc.y = 0.0f;
float2 cur_pos = make_float2(d_pos_x[global_i], d_pos_y[global_i]);
for( unsigned int tile = 0; tile < gridDim.x; ++tile )
{
//current vertical position (the j we are at in this block)
int current_j = tile*blockDim.x;
if( (current_j+local_i) < num_stars )
{
pos[local_i] = make_float2(
d_pos_x[current_j + local_i], d_pos_y[current_j + local_i]);
mass[local_i] = d_masses[current_j + local_i];
}
__syncthreads();
//summing all the j's, we need to make sure we don't step outside the
//matrix.
int iterate_till = blockDim.x;
if ( current_j + blockDim.x >= num_stars )
iterate_till = num_stars - current_j;
#pragma unroll 128
for( unsigned int k = 0; k < iterate_till; ++k )
{
//beware of the tile spanning the diagonal of the big matrix!
if ( global_i != current_j + k )
{
float2 r;
r.x = pos[k].x - cur_pos.x;
r.y = pos[k].y - cur_pos.y ;
float dist_square = r.x*r.x + r.y*r.y + MIN_DIST; // impose min_dist to avoid infinities
float inv_sqrt_dist = rsqrtf( dist_square ); // for computing the real gravity interaction
//force = G*m*M/ r^2, here G = NEWTON, is multiplied only once at the end
float acc_strength = mass[k] * inv_sqrt_dist * inv_sqrt_dist * inv_sqrt_dist;
total_acc.x += acc_strength * r.x;
total_acc.y += acc_strength * r.y;
}
}
__syncthreads();
}
//Now we can multiply by G:
total_acc.x *= NEWTON;
total_acc.y *= NEWTON;
//store the result in global memory
d_gravs[global_i] = total_acc;
}
void compute_grav(
float* h_pos_x,
float* h_pos_y,
float* h_masses,
float2* h_gravs,
unsigned int num_stars)
{
//transfer data on the device
DeviceArray<float> d_pos_x( num_stars );
DeviceArray<float> d_pos_y( num_stars );
d_pos_x.copyHostToDevice( h_pos_x );
d_pos_y.copyHostToDevice( h_pos_y );
DeviceArray<float> d_masses( num_stars );
d_masses.copyHostToDevice( h_masses );
//instantiate a vectors to contain the gravitational forces
DeviceArray<float2> d_gravs( num_stars );
unsigned int grid_side = (num_stars + BLOCK_SIZE - 1) / BLOCK_SIZE;
//get regular pointers to be passed to the kernel
float* d_pos_x_ptr = d_pos_x.GetPtr();
float* d_pos_y_ptr = d_pos_y.GetPtr();
float* d_masses_ptr = d_masses.GetPtr();
float2* d_gravs_ptr = d_gravs.GetPtr();
//take the positions and compute the partial sums of the forces acting on each star. We need to store partials because
//we had to tile the matrix of the forces..
hipLaunchKernelGGL(( compute_gravs), dim3(grid_side), dim3(BLOCK_SIZE), BLOCK_SIZE*3*sizeof(float) , 0,
d_pos_x_ptr, d_pos_y_ptr, d_masses_ptr, d_gravs_ptr, num_stars );
hipDeviceSynchronize();
//transfer gravs back to host
d_gravs.copyDeviceToHost( h_gravs );
} | cd79d74af81f6efbb0ebaa33975346066fa74247.cu | #include "CUDA.h"
#include "CUDA_runtime.h"
#include "device_launch_parameters.h"
#include "DeviceArray.cuh"
#include "assert.h"
#define BLOCK_SIZE 256
#define NEWTON 0.2f
#define MIN_DIST 50.0f
/******************************
Data distribution on GRID:
- The following represents the time progression of the 1 dimensional grid that the
compute_gravs kernel works on.
- We separate the N stars in 1D tiles, and each thread works out the gravitational
force acting on a star.
- We have to load the positions of the other stars into shared memory
in chuncks to make it fit. Each orizontal line represent the moment when we load the
new stars' positions
------------------------------- <-Start point
| xx | xx | xx | xx | xx | x |
| xx | xx | xx | xx | xx | x |
------------------------------- <-New tile, load new positions!
| xx | xx | xx | xx | xx | x |
| xx | xx | xx | xx | xx | x | |
------------------------------- | Time direction
| xx | xx | xx | xx | xx | x | |
| xx | xx | xx | xx | xx | x | V
-------------------------------
| xx | xx | xx | xx | xx | x |
| xx | xx | xx | xx | xx | x |
-------------------------------
| xx | xx | xx | xx | xx | x |
| | | | | | |
------------------------------- <- End
******************************/
__global__ void compute_gravs(const float* d_pos_x, const float* d_pos_y,
const float* d_masses, float2* d_gravs, int num_stars )
{
//local coordinate on the block
int local_i = threadIdx.x;
//global coordinate on the grid
int global_i = blockIdx.x*blockDim.x + local_i;
//this is safe from G80, since the device shuold keep track of the active threads and not wait for
//the inactive ones in case of __syncthread
if( global_i >= num_stars ) return;
extern __shared__ float s[];
// Partition the SMEM stores so that each instruction
// never gets any bank conflicts and coalesces access too.
float2* pos = (float2*)s;
float* mass = (float*)&s[2*blockDim.x];
float2 total_acc;
total_acc.x = 0.0f;
total_acc.y = 0.0f;
float2 cur_pos = make_float2(d_pos_x[global_i], d_pos_y[global_i]);
for( unsigned int tile = 0; tile < gridDim.x; ++tile )
{
//current vertical position (the j we are at in this block)
int current_j = tile*blockDim.x;
if( (current_j+local_i) < num_stars )
{
pos[local_i] = make_float2(
d_pos_x[current_j + local_i], d_pos_y[current_j + local_i]);
mass[local_i] = d_masses[current_j + local_i];
}
__syncthreads();
//summing all the j's, we need to make sure we don't step outside the
//matrix.
int iterate_till = blockDim.x;
if ( current_j + blockDim.x >= num_stars )
iterate_till = num_stars - current_j;
#pragma unroll 128
for( unsigned int k = 0; k < iterate_till; ++k )
{
//beware of the tile spanning the diagonal of the big matrix!
if ( global_i != current_j + k )
{
float2 r;
r.x = pos[k].x - cur_pos.x;
r.y = pos[k].y - cur_pos.y ;
float dist_square = r.x*r.x + r.y*r.y + MIN_DIST; // impose min_dist to avoid infinities
float inv_sqrt_dist = rsqrtf( dist_square ); // for computing the real gravity interaction
//force = G*m*M/ r^2, here G = NEWTON, is multiplied only once at the end
float acc_strength = mass[k] * inv_sqrt_dist * inv_sqrt_dist * inv_sqrt_dist;
total_acc.x += acc_strength * r.x;
total_acc.y += acc_strength * r.y;
}
}
__syncthreads();
}
//Now we can multiply by G:
total_acc.x *= NEWTON;
total_acc.y *= NEWTON;
//store the result in global memory
d_gravs[global_i] = total_acc;
}
void compute_grav(
float* h_pos_x,
float* h_pos_y,
float* h_masses,
float2* h_gravs,
unsigned int num_stars)
{
//transfer data on the device
DeviceArray<float> d_pos_x( num_stars );
DeviceArray<float> d_pos_y( num_stars );
d_pos_x.copyHostToDevice( h_pos_x );
d_pos_y.copyHostToDevice( h_pos_y );
DeviceArray<float> d_masses( num_stars );
d_masses.copyHostToDevice( h_masses );
//instantiate a vectors to contain the gravitational forces
DeviceArray<float2> d_gravs( num_stars );
unsigned int grid_side = (num_stars + BLOCK_SIZE - 1) / BLOCK_SIZE;
//get regular pointers to be passed to the kernel
float* d_pos_x_ptr = d_pos_x.GetPtr();
float* d_pos_y_ptr = d_pos_y.GetPtr();
float* d_masses_ptr = d_masses.GetPtr();
float2* d_gravs_ptr = d_gravs.GetPtr();
//take the positions and compute the partial sums of the forces acting on each star. We need to store partials because
//we had to tile the matrix of the forces..
compute_gravs<<< grid_side, BLOCK_SIZE, BLOCK_SIZE*3*sizeof(float) >>>(
d_pos_x_ptr, d_pos_y_ptr, d_masses_ptr, d_gravs_ptr, num_stars );
cudaDeviceSynchronize();
//transfer gravs back to host
d_gravs.copyDeviceToHost( h_gravs );
} |
2aa97117ee13316e6ac1df72440972f748f16dc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__device__ float4 read(const unsigned short * in_half, int x, int y, int w)
{
return make_float4(__half2float(in_half[4 * (x + y * w) + 0]),
__half2float(in_half[4 * (x + y * w) + 1]),
__half2float(in_half[4 * (x + y * w) + 2]),
__half2float(in_half[4 * (x + y * w) + 3]));
}
__device__ float weight(int i, int j, int x, int y, float invdx2)
{
return exp(-invdx2*((i-x)*(i-x) + (j-y)*(j-y)));
}
__global__ void
gaussian_blur(const unsigned short * in_half,
unsigned short * out_half,
const int n,
const int width,
const int height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// array index
const int idx = x + width * y;
// inside image bounds check
if (x >= width || y >= height) {
return;
}
// kernel code
float4 out = make_float4(0, 0, 0, 0);
const float invdx2 = 1.0/(width*width);
float totWeight = 0;
float w;
for(int j = y - n; j <= y + n; ++j) {
for(int i = x - n; i <= x + n; ++i) {
if (i>=0 && j>= 0 && i < width && j < height) {
w = weight(i, j, x, y, invdx2);
out += w * read(in_half, i, j, width);
totWeight += w;
}
}
}
out /= totWeight;
// float to half conversion
out_half[4 * idx + 0] = __float2half_rn(out.x);
out_half[4 * idx + 1] = __float2half_rn(out.y);
out_half[4 * idx + 2] = __float2half_rn(out.z);
out_half[4 * idx + 3] = __float2half_rn(out.w);
}
| 2aa97117ee13316e6ac1df72440972f748f16dc6.cu | __device__ float4 read(const unsigned short * in_half, int x, int y, int w)
{
return make_float4(__half2float(in_half[4 * (x + y * w) + 0]),
__half2float(in_half[4 * (x + y * w) + 1]),
__half2float(in_half[4 * (x + y * w) + 2]),
__half2float(in_half[4 * (x + y * w) + 3]));
}
__device__ float weight(int i, int j, int x, int y, float invdx2)
{
return exp(-invdx2*((i-x)*(i-x) + (j-y)*(j-y)));
}
__global__ void
gaussian_blur(const unsigned short * in_half,
unsigned short * out_half,
const int n,
const int width,
const int height)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
// array index
const int idx = x + width * y;
// inside image bounds check
if (x >= width || y >= height) {
return;
}
// kernel code
float4 out = make_float4(0, 0, 0, 0);
const float invdx2 = 1.0/(width*width);
float totWeight = 0;
float w;
for(int j = y - n; j <= y + n; ++j) {
for(int i = x - n; i <= x + n; ++i) {
if (i>=0 && j>= 0 && i < width && j < height) {
w = weight(i, j, x, y, invdx2);
out += w * read(in_half, i, j, width);
totWeight += w;
}
}
}
out /= totWeight;
// float to half conversion
out_half[4 * idx + 0] = __float2half_rn(out.x);
out_half[4 * idx + 1] = __float2half_rn(out.y);
out_half[4 * idx + 2] = __float2half_rn(out.z);
out_half[4 * idx + 3] = __float2half_rn(out.w);
}
|
d571e4d88fabf6df28ef47db9da494369397efd7.hip | // !!! This is a file automatically generated by hipify!!!
#include "Prerequisites.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include "Generics.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
namespace gtom
{
///////////////////////////
//CUDA kernel declaration//
///////////////////////////
/////////////////
//Local Lowpass//
/////////////////
void d_LocalLowpass(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* d_resolution, tfloat maxprecision)
{
tcomplex* d_inputft;
hipMalloc((void**)&d_inputft, ElementsFFT(dims) * sizeof(tcomplex));
d_ValueFill(d_output, Elements(dims), (tfloat)0);
tcomplex* d_maskedft;
hipMalloc((void**)&d_maskedft, ElementsFFT(dims) * sizeof(tcomplex));
tfloat* d_cleanresolution;
hipMalloc((void**)&d_cleanresolution, Elements(dims) * sizeof(tfloat));
tfloat* d_mask;
hipMalloc((void**)&d_mask, Elements(dims) * sizeof(tfloat));
tfloat* d_maskhalf;
hipMalloc((void**)&d_maskhalf, ElementsFFT(dims) * sizeof(tfloat));
d_Xray(d_resolution, d_cleanresolution, dims);
d_FFTR2C(d_input, d_inputft, DimensionCount(dims), dims);
imgstats5* d_resstats = (imgstats5*)CudaMallocValueFilled(5, 0);
d_Dev(d_cleanresolution, d_resstats, Elements(dims), (char*)NULL);
imgstats5* h_resstats = (imgstats5*)MallocFromDeviceArray(d_resstats, 5 * sizeof(tfloat));
tfloat minval = h_resstats[0].min;
tfloat maxval = h_resstats[0].max;
tfloat* d_tempsum = CudaMallocValueFilled(1, (tfloat)0);
d_Sum(d_input, d_tempsum, Elements(dims));
tfloat* h_tempsum = (tfloat*)MallocFromDeviceArray(d_tempsum, sizeof(tfloat));
tfloat originalmean = *h_tempsum / (tfloat)Elements(dims);
int bins = (int)min((maxval - minval) / maxprecision, (tfloat)4096);
tfloat binsize = (maxval - minval) / (tfloat)bins;
uint* d_histogram = CudaMallocValueFilled(bins, (uint)0);
d_Histogram(d_cleanresolution, d_histogram, Elements(dims), bins, minval, maxval);
uint* h_histogram = (uint*)MallocFromDeviceArray(d_histogram, bins * sizeof(uint));
hipfftHandle planback = d_IFFTC2RGetPlan(DimensionCount(dims), dims);
for (int b = 0; b < bins; b++)
{
if (h_histogram[b] == 0)
continue;
tfloat res = (tfloat)b * binsize + minval;
tfloat freq = (tfloat)dims.x / res;
d_ValueFill(d_mask, Elements(dims), (tfloat)1);
d_SphereMask(d_mask, d_mask, dims, &freq, 0, NULL, false);
d_RemapFull2HalfFFT(d_mask, d_maskhalf, dims);
d_ComplexMultiplyByVector(d_inputft, d_maskhalf, d_maskedft, ElementsFFT(dims));
d_IFFTC2R(d_maskedft, d_mask, &planback, dims);
d_IsBetween(d_resolution, (tfloat*)d_maskedft, Elements(dims), (tfloat)b * binsize + minval, (tfloat)(b + 1) * binsize + minval);
d_MultiplyByVector(d_mask, (tfloat*)d_maskedft, d_mask, Elements(dims));
d_AddVector(d_output, d_mask, d_output, Elements(dims));
}
free(h_histogram);
hipFree(d_histogram);
free(h_tempsum);
hipFree(d_tempsum);
free(h_resstats);
hipFree(d_resstats);
hipFree(d_maskhalf);
hipFree(d_mask);
hipFree(d_cleanresolution);
hipFree(d_maskedft);
hipFree(d_inputft);
}
////////////////
//CUDA kernels//
////////////////
} | d571e4d88fabf6df28ef47db9da494369397efd7.cu | #include "Prerequisites.cuh"
#include "FFT.cuh"
#include "Helper.cuh"
#include "Generics.cuh"
#include "ImageManipulation.cuh"
#include "Masking.cuh"
namespace gtom
{
///////////////////////////
//CUDA kernel declaration//
///////////////////////////
/////////////////
//Local Lowpass//
/////////////////
void d_LocalLowpass(tfloat* d_input, tfloat* d_output, int3 dims, tfloat* d_resolution, tfloat maxprecision)
{
tcomplex* d_inputft;
cudaMalloc((void**)&d_inputft, ElementsFFT(dims) * sizeof(tcomplex));
d_ValueFill(d_output, Elements(dims), (tfloat)0);
tcomplex* d_maskedft;
cudaMalloc((void**)&d_maskedft, ElementsFFT(dims) * sizeof(tcomplex));
tfloat* d_cleanresolution;
cudaMalloc((void**)&d_cleanresolution, Elements(dims) * sizeof(tfloat));
tfloat* d_mask;
cudaMalloc((void**)&d_mask, Elements(dims) * sizeof(tfloat));
tfloat* d_maskhalf;
cudaMalloc((void**)&d_maskhalf, ElementsFFT(dims) * sizeof(tfloat));
d_Xray(d_resolution, d_cleanresolution, dims);
d_FFTR2C(d_input, d_inputft, DimensionCount(dims), dims);
imgstats5* d_resstats = (imgstats5*)CudaMallocValueFilled(5, 0);
d_Dev(d_cleanresolution, d_resstats, Elements(dims), (char*)NULL);
imgstats5* h_resstats = (imgstats5*)MallocFromDeviceArray(d_resstats, 5 * sizeof(tfloat));
tfloat minval = h_resstats[0].min;
tfloat maxval = h_resstats[0].max;
tfloat* d_tempsum = CudaMallocValueFilled(1, (tfloat)0);
d_Sum(d_input, d_tempsum, Elements(dims));
tfloat* h_tempsum = (tfloat*)MallocFromDeviceArray(d_tempsum, sizeof(tfloat));
tfloat originalmean = *h_tempsum / (tfloat)Elements(dims);
int bins = (int)min((maxval - minval) / maxprecision, (tfloat)4096);
tfloat binsize = (maxval - minval) / (tfloat)bins;
uint* d_histogram = CudaMallocValueFilled(bins, (uint)0);
d_Histogram(d_cleanresolution, d_histogram, Elements(dims), bins, minval, maxval);
uint* h_histogram = (uint*)MallocFromDeviceArray(d_histogram, bins * sizeof(uint));
cufftHandle planback = d_IFFTC2RGetPlan(DimensionCount(dims), dims);
for (int b = 0; b < bins; b++)
{
if (h_histogram[b] == 0)
continue;
tfloat res = (tfloat)b * binsize + minval;
tfloat freq = (tfloat)dims.x / res;
d_ValueFill(d_mask, Elements(dims), (tfloat)1);
d_SphereMask(d_mask, d_mask, dims, &freq, 0, NULL, false);
d_RemapFull2HalfFFT(d_mask, d_maskhalf, dims);
d_ComplexMultiplyByVector(d_inputft, d_maskhalf, d_maskedft, ElementsFFT(dims));
d_IFFTC2R(d_maskedft, d_mask, &planback, dims);
d_IsBetween(d_resolution, (tfloat*)d_maskedft, Elements(dims), (tfloat)b * binsize + minval, (tfloat)(b + 1) * binsize + minval);
d_MultiplyByVector(d_mask, (tfloat*)d_maskedft, d_mask, Elements(dims));
d_AddVector(d_output, d_mask, d_output, Elements(dims));
}
free(h_histogram);
cudaFree(d_histogram);
free(h_tempsum);
cudaFree(d_tempsum);
free(h_resstats);
cudaFree(d_resstats);
cudaFree(d_maskhalf);
cudaFree(d_mask);
cudaFree(d_cleanresolution);
cudaFree(d_maskedft);
cudaFree(d_inputft);
}
////////////////
//CUDA kernels//
////////////////
} |
b2d19ee670c5a7618629989136f312054f9297bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void same_num_channels_mul_kernel(const float *data_l, const float *data_r, float *result, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[idx] - data_l[idx + 1] * data_r[idx + 1];
result[idx + 1] = data_l[idx] * data_r[idx + 1] + data_l[idx + 1] * data_r[idx];
}
} | b2d19ee670c5a7618629989136f312054f9297bf.cu | #include "includes.h"
__global__ void same_num_channels_mul_kernel(const float *data_l, const float *data_r, float *result, int total)
{
int idx = 2 * (blockIdx.x * blockDim.x + threadIdx.x);
if (idx / 2 < total) {
result[idx] = data_l[idx] * data_r[idx] - data_l[idx + 1] * data_r[idx + 1];
result[idx + 1] = data_l[idx] * data_r[idx + 1] + data_l[idx + 1] * data_r[idx];
}
} |
56eb5917ed92a804e3c1502414be3766efb7f437.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for ray-voxel intersection based projection
*
* This file has the necesary fucntiosn to perform X-ray parallel projection
* operation given a geaometry, angles and image. It usesthe so-called
* Jacobs algorithm to compute efficiently the length of the x-rays over
* voxel space. Its called Siddon because Jacobs algorithm its just a small
* improvement over the traditional Siddons method.
*
* CODE by Ander Biguri
*
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "Siddon_projection_parallel.hpp"
//#include "mex.h"
#include <math.h>
// if (__err != hipSuccess) { \
// printf("%s \n", msg);\
// printf("%s \n", hipGetErrorString(__err));\
// } \
// TODO: Error logging
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
} while (0)
// Declare the texture reference.
texture<float, hipTextureType3D , hipReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector_parallel( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin){
// size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
Point3D pixel1D;
pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
source.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
source.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
source.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
///////
// Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516
//////
Point3D ray;
// vector of Xray
ray.x=pixel1D.x-source.x;
ray.y=pixel1D.y-source.y;
ray.z=pixel1D.z-source.z;
// This variables are ommited because
// bx,by,bz ={0,0,0}
// dx,dy,dz ={1,1,1}
// compute parameter values for x-ray parametric equation. eq(3-10)
float axm,aym,azm;
float axM,ayM,azM;
/**************************************
*
*
* Problem. In paralel beam, often ray.y or ray.x=0;
* This leads to infinities progpagating and breaking everything.
*
* We need to fix it.
*
***************************************/
// In the paper Nx= number of X planes-> Nvoxel+1
axm=min(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
aym=min(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
// azm=min(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
axM=max(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
ayM=max(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
// azM=max(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
float am=(max(axm,aym));
float aM=(min(axM,ayM));
// line intersects voxel space -> am<aM
if (am>=aM)
detector[idx]=0;
// Compute max/min image INDEX for intersection eq(11-19)
// Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version
float imin,imax,jmin,jmax;
// for X
if( source.x<pixel1D.x){
imin=(am==axm)? 1 : ceil (source.x+am*ray.x);
imax=(aM==axM)? geo.nVoxelX : floor(source.x+aM*ray.x);
}else{
imax=(am==axm)? geo.nVoxelX-1 : floor(source.x+am*ray.x);
imin=(aM==axM)? 0 : ceil (source.x+aM*ray.x);
}
// for Y
if( source.y<pixel1D.y){
jmin=(am==aym)? 1 : ceil (source.y+am*ray.y);
jmax=(aM==ayM)? geo.nVoxelY : floor(source.y+aM*ray.y);
}else{
jmax=(am==aym)? geo.nVoxelY-1 : floor(source.y+am*ray.y);
jmin=(aM==ayM)? 0 : ceil (source.y+aM*ray.y);
}
// // for Z
// if( source.z<pixel1D.z){
// kmin=(am==azm)? 1 : ceil (source.z+am*ray.z);
// kmax=(aM==azM)? geo.nVoxelZ : floor(source.z+aM*ray.z);
// }else{
// kmax=(am==azm)? geo.nVoxelZ-1 : floor(source.z+am*ray.z);
// kmin=(aM==azM)? 0 : ceil (source.z+aM*ray.z);
// }
// get intersection point N1. eq(20-21) [(also eq 9-10)]
float ax,ay;
ax=(source.x<pixel1D.x)? (imin-source.x)/ray.x : (imax-source.x)/ray.x;
ay=(source.y<pixel1D.y)? (jmin-source.y)/ray.y : (jmax-source.y)/ray.y;
// az=(source.z<pixel1D.z)? (kmin-source.z)/ray.z : (kmax-source.z)/ray.z;
// get index of first intersection. eq (26) and (19)
int i,j,k;
float aminc=min(ax,ay);
i=(int)floor(source.x+ (aminc+am)/2*ray.x);
j=(int)floor(source.y+ (aminc+am)/2*ray.y);
k=(int)floor(source.z+ (aminc+am)/2*ray.z);
// k=(int)source.z;
// Initialize
float ac=am;
//eq (28), unit alphas
float axu,ayu;
axu=1/abs(ray.x);
ayu=1/abs(ray.y);
// azu=1/abs(ray.z);
// eq(29), direction of update
float iu,ju;
iu=(source.x< pixel1D.x)? 1 : -1;
ju=(source.y< pixel1D.y)? 1 : -1;
// ku=(source.z< pixel1D.z)? 1 : -1;
float maxlength=sqrt(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY);//+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ);
float sum=0;
unsigned int Np=(imax-imin+1)+(jmax-jmin+1);//+(kmax-kmin+1); // Number of intersections
// Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed
for (unsigned int ii=0;ii<Np;ii++){
if (ax==aminc){
sum+=(ax-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);//(ax-ac)*
i=i+iu;
ac=ax;
ax+=axu;
}else if(ay==aminc){
sum+=(ay-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);//(ay-ac)*
j=j+ju;
ac=ay;
ay+=ayu;
// }else if(az==aminc){
// sum+=(az-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
// k=k+ku;
// ac=az;
// az+=azu;
}
aminc=min(ay,ax);
}
detector[idx]=maxlength*sum;
// detector[idx]=(iu);
}
int siddon_ray_projection_parallel(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){
// copy data to CUDA memory
hipArray *d_imagedata = 0;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
hipMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("hipMalloc3D error 3D tex");
hipMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_hipPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3D(©Params);
cudaCheckErrors("hipMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = hipFilterModePoint; //we dotn want itnerpolation
tex.addressMode[0] = hipAddressModeBorder;
tex.addressMode[1] = hipAddressModeBorder;
tex.addressMode[2] = hipAddressModeBorder;
hipBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
hipMalloc((void**)&dProjection, num_bytes);
cudaCheckErrors("hipMalloc fail");
bool timekernel=false;
hipEvent_t start, stop;
float elapsedTime;
if (timekernel){
hipEventCreate(&start);
hipEventRecord(start,0);
}
Point3D source, deltaU, deltaV, uvOrigin;
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
int divU,divV;
divU=16;
divV=16;
dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,1);
for (int i=0;i<nalpha;i++){
geo.alpha=alphas[i];
if(geo.alpha==0.0 || abs(geo.alpha-1.5707963267949)<0.0000001){
geo.alpha=geo.alpha+1.1920929e-07;
}
//precomute distances for faster execution
//Precompute per angle constant stuff for speed
computeDeltas_Siddon_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
hipLaunchKernelGGL(( kernelPixelDetector_parallel), dim3(grid),dim3(block), 0, 0, geo,dProjection, source, deltaU, deltaV, uvOrigin);
cudaCheckErrors("Kernel fail");
// copy result to host
hipMemcpy(result[i], dProjection, num_bytes, hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy fail");
}
if (timekernel){
hipEventCreate(&stop);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
//TODO: replace this
// mexPrintf("%f\n" ,elapsedTime);
}
hipUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
hipFree(dProjection);
hipFreeArray(d_imagedata);
cudaCheckErrors("hipFree d_imagedata fail");
// tehre is no need to reset the device, but if one whants to use the NVIDIA Visual profiler, one should.
//hipDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_Siddon_parallel(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x =geo.DSO; S.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); S.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z;
Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z;
Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z;
Point3D S2;
S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha);
S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha);
S2.z=S.z;
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2;
S2.x =S2.x+geo.sVoxelX/2; S2.y =S2.y+geo.sVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S2.x+=CORx; S2.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S2;
}
#ifndef PROJECTION_HPP
float maxDistanceCubeXY(Geometry geo, float alpha,int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY;
// Forgetting Z, compute max distance: diagonal+offset
maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX;
maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY;
return geo.DSO/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY);
}
#endif
| 56eb5917ed92a804e3c1502414be3766efb7f437.cu | /*-------------------------------------------------------------------------
*
* CUDA functions for ray-voxel intersection based projection
*
* This file has the necesary fucntiosn to perform X-ray parallel projection
* operation given a geaometry, angles and image. It usesthe so-called
* Jacobs algorithm to compute efficiently the length of the x-rays over
* voxel space. Its called Siddon because Jacobs algorithm its just a small
* improvement over the traditional Siddons method.
*
* CODE by Ander Biguri
*
---------------------------------------------------------------------------
---------------------------------------------------------------------------
Copyright (c) 2015, University of Bath and CERN- European Organization for
Nuclear Research
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------
Contact: [email protected]
Codes : https://github.com/CERN/TIGRE
---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "Siddon_projection_parallel.hpp"
//#include "mex.h"
#include <math.h>
// if (__err != cudaSuccess) { \
// printf("%s \n", msg);\
// printf("%s \n", cudaGetErrorString(__err));\
// } \
// TODO: Error logging
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
} while (0)
// Declare the texture reference.
texture<float, cudaTextureType3D , cudaReadModeElementType> tex;
#define MAXTREADS 1024
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
__global__ void kernelPixelDetector_parallel( Geometry geo,
float* detector,
Point3D source ,
Point3D deltaU,
Point3D deltaV,
Point3D uvOrigin){
// size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned long y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long idx = x * geo.nDetecV + y;
if ((x>= geo.nDetecU) | (y>= geo.nDetecV))
return;
/////// Get coordinates XYZ of pixel UV
int pixelV = geo.nDetecV-y-1;
int pixelU = x;
Point3D pixel1D;
pixel1D.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
pixel1D.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
pixel1D.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
source.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
source.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
source.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
///////
// Siddon's ray-voxel intersection, optimized as in doi=10.1.1.55.7516
//////
Point3D ray;
// vector of Xray
ray.x=pixel1D.x-source.x;
ray.y=pixel1D.y-source.y;
ray.z=pixel1D.z-source.z;
// This variables are ommited because
// bx,by,bz ={0,0,0}
// dx,dy,dz ={1,1,1}
// compute parameter values for x-ray parametric equation. eq(3-10)
float axm,aym,azm;
float axM,ayM,azM;
/**************************************
*
*
* Problem. In paralel beam, often ray.y or ray.x=0;
* This leads to infinities progpagating and breaking everything.
*
* We need to fix it.
*
***************************************/
// In the paper Nx= number of X planes-> Nvoxel+1
axm=min(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
aym=min(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
// azm=min(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
axM=max(-source.x/ray.x,(geo.nVoxelX-source.x)/ray.x);
ayM=max(-source.y/ray.y,(geo.nVoxelY-source.y)/ray.y);
// azM=max(-source.z/ray.z,(geo.nVoxelZ-source.z)/ray.z);
float am=(max(axm,aym));
float aM=(min(axM,ayM));
// line intersects voxel space -> am<aM
if (am>=aM)
detector[idx]=0;
// Compute max/min image INDEX for intersection eq(11-19)
// Discussion about ternary operator in CUDA: https://stackoverflow.com/questions/7104384/in-cuda-why-is-a-b010-more-efficient-than-an-if-else-version
float imin,imax,jmin,jmax;
// for X
if( source.x<pixel1D.x){
imin=(am==axm)? 1 : ceil (source.x+am*ray.x);
imax=(aM==axM)? geo.nVoxelX : floor(source.x+aM*ray.x);
}else{
imax=(am==axm)? geo.nVoxelX-1 : floor(source.x+am*ray.x);
imin=(aM==axM)? 0 : ceil (source.x+aM*ray.x);
}
// for Y
if( source.y<pixel1D.y){
jmin=(am==aym)? 1 : ceil (source.y+am*ray.y);
jmax=(aM==ayM)? geo.nVoxelY : floor(source.y+aM*ray.y);
}else{
jmax=(am==aym)? geo.nVoxelY-1 : floor(source.y+am*ray.y);
jmin=(aM==ayM)? 0 : ceil (source.y+aM*ray.y);
}
// // for Z
// if( source.z<pixel1D.z){
// kmin=(am==azm)? 1 : ceil (source.z+am*ray.z);
// kmax=(aM==azM)? geo.nVoxelZ : floor(source.z+aM*ray.z);
// }else{
// kmax=(am==azm)? geo.nVoxelZ-1 : floor(source.z+am*ray.z);
// kmin=(aM==azM)? 0 : ceil (source.z+aM*ray.z);
// }
// get intersection point N1. eq(20-21) [(also eq 9-10)]
float ax,ay;
ax=(source.x<pixel1D.x)? (imin-source.x)/ray.x : (imax-source.x)/ray.x;
ay=(source.y<pixel1D.y)? (jmin-source.y)/ray.y : (jmax-source.y)/ray.y;
// az=(source.z<pixel1D.z)? (kmin-source.z)/ray.z : (kmax-source.z)/ray.z;
// get index of first intersection. eq (26) and (19)
int i,j,k;
float aminc=min(ax,ay);
i=(int)floor(source.x+ (aminc+am)/2*ray.x);
j=(int)floor(source.y+ (aminc+am)/2*ray.y);
k=(int)floor(source.z+ (aminc+am)/2*ray.z);
// k=(int)source.z;
// Initialize
float ac=am;
//eq (28), unit alphas
float axu,ayu;
axu=1/abs(ray.x);
ayu=1/abs(ray.y);
// azu=1/abs(ray.z);
// eq(29), direction of update
float iu,ju;
iu=(source.x< pixel1D.x)? 1 : -1;
ju=(source.y< pixel1D.y)? 1 : -1;
// ku=(source.z< pixel1D.z)? 1 : -1;
float maxlength=sqrt(ray.x*ray.x*geo.dVoxelX*geo.dVoxelX+ray.y*ray.y*geo.dVoxelY*geo.dVoxelY);//+ray.z*ray.z*geo.dVoxelZ*geo.dVoxelZ);
float sum=0;
unsigned int Np=(imax-imin+1)+(jmax-jmin+1);//+(kmax-kmin+1); // Number of intersections
// Go iterating over the line, intersection by intersection. If double point, no worries, 0 will be computed
for (unsigned int ii=0;ii<Np;ii++){
if (ax==aminc){
sum+=(ax-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);//(ax-ac)*
i=i+iu;
ac=ax;
ax+=axu;
}else if(ay==aminc){
sum+=(ay-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);//(ay-ac)*
j=j+ju;
ac=ay;
ay+=ayu;
// }else if(az==aminc){
// sum+=(az-ac)*tex3D(tex, i+0.5, j+0.5, k+0.5);
// k=k+ku;
// ac=az;
// az+=azu;
}
aminc=min(ay,ax);
}
detector[idx]=maxlength*sum;
// detector[idx]=(iu);
}
int siddon_ray_projection_parallel(float const * const img, Geometry geo, float** result,float const * const alphas,int nalpha){
// copy data to CUDA memory
cudaArray *d_imagedata = 0;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
cudaMalloc3DArray(&d_imagedata, &channelDesc, extent);
cudaCheckErrors("cudaMalloc3D error 3D tex");
cudaMemcpy3DParms copyParams = { 0 };
copyParams.srcPtr = make_cudaPitchedPtr((void*)img, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_imagedata;
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(©Params);
cudaCheckErrors("cudaMemcpy3D fail");
// Configure texture options
tex.normalized = false;
tex.filterMode = cudaFilterModePoint; //we dotn want itnerpolation
tex.addressMode[0] = cudaAddressModeBorder;
tex.addressMode[1] = cudaAddressModeBorder;
tex.addressMode[2] = cudaAddressModeBorder;
cudaBindTextureToArray(tex, d_imagedata, channelDesc);
cudaCheckErrors("3D texture memory bind fail");
//Done! Image put into texture memory.
size_t num_bytes = geo.nDetecU*geo.nDetecV * sizeof(float);
float* dProjection;
cudaMalloc((void**)&dProjection, num_bytes);
cudaCheckErrors("cudaMalloc fail");
bool timekernel=false;
cudaEvent_t start, stop;
float elapsedTime;
if (timekernel){
cudaEventCreate(&start);
cudaEventRecord(start,0);
}
Point3D source, deltaU, deltaV, uvOrigin;
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
int divU,divV;
divU=16;
divV=16;
dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 block(divU,divV,1);
for (int i=0;i<nalpha;i++){
geo.alpha=alphas[i];
if(geo.alpha==0.0 || abs(geo.alpha-1.5707963267949)<0.0000001){
geo.alpha=geo.alpha+1.1920929e-07;
}
//precomute distances for faster execution
//Precompute per angle constant stuff for speed
computeDeltas_Siddon_parallel(geo,geo.alpha,i, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
kernelPixelDetector_parallel<<<grid,block>>>(geo,dProjection, source, deltaU, deltaV, uvOrigin);
cudaCheckErrors("Kernel fail");
// copy result to host
cudaMemcpy(result[i], dProjection, num_bytes, cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy fail");
}
if (timekernel){
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
//TODO: replace this
// mexPrintf("%f\n" ,elapsedTime);
}
cudaUnbindTexture(tex);
cudaCheckErrors("Unbind fail");
cudaFree(dProjection);
cudaFreeArray(d_imagedata);
cudaCheckErrors("cudaFree d_imagedata fail");
// tehre is no need to reset the device, but if one whants to use the NVIDIA Visual profiler, one should.
//cudaDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_Siddon_parallel(Geometry geo, float alpha,int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x =geo.DSO; S.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); S.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD-geo.DSO); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD-geo.DSO); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD-geo.DSO); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geomtric trasnformations:
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x*cos(geo.alpha)-P.y*sin(geo.alpha); Pfinal.y =P.y*cos(geo.alpha)+P.x*sin(geo.alpha); Pfinal.z =P.z;
Pfinalu0.x=Pu0.x*cos(geo.alpha)-Pu0.y*sin(geo.alpha); Pfinalu0.y=Pu0.y*cos(geo.alpha)+Pu0.x*sin(geo.alpha); Pfinalu0.z=Pu0.z;
Pfinalv0.x=Pv0.x*cos(geo.alpha)-Pv0.y*sin(geo.alpha); Pfinalv0.y=Pv0.y*cos(geo.alpha)+Pv0.x*sin(geo.alpha); Pfinalv0.z=Pv0.z;
Point3D S2;
S2.x=S.x*cos(geo.alpha)-S.y*sin(geo.alpha);
S2.y=S.y*cos(geo.alpha)+S.x*sin(geo.alpha);
S2.z=S.z;
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S2.x=S2.x-geo.offOrigX[i]; S2.y=S2.y-geo.offOrigY[i]; S2.z=S2.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2;
S2.x =S2.x+geo.sVoxelX/2; S2.y =S2.y+geo.sVoxelY/2; S2.z =S2.z +geo.sVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S2.x =S2.x/geo.dVoxelX; S2.y =S2.y/geo.dVoxelY; S2.z =S2.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S2.x+=CORx; S2.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S2;
}
#ifndef PROJECTION_HPP
float maxDistanceCubeXY(Geometry geo, float alpha,int i){
///////////
// Compute initial "t" so we access safely as less as out of bounds as possible.
//////////
float maxCubX,maxCubY;
// Forgetting Z, compute max distance: diagonal+offset
maxCubX=(geo.sVoxelX/2+ abs(geo.offOrigX[i]))/geo.dVoxelX;
maxCubY=(geo.sVoxelY/2+ abs(geo.offOrigY[i]))/geo.dVoxelY;
return geo.DSO/geo.dVoxelX-sqrt(maxCubX*maxCubX+maxCubY*maxCubY);
}
#endif
|
285220a069eb2cec49ac15d1a612496d2d3bcbd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_cuda.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific common definitions
////////////////////////////////////////////////////////////////////////////////
//Data type used for input data fetches
typedef uint4 data_t;
//May change on future hardware, so better parametrize the code
#define SHARED_MEMORY_BANKS 16
////////////////////////////////////////////////////////////////////////////////
// Main computation pass: compute gridDim.x partial histograms
////////////////////////////////////////////////////////////////////////////////
//Count a byte into shared-memory storage
inline __device__ void addByte(uchar *s_ThreadBase, uint data)
{
s_ThreadBase[UMUL(data, HISTOGRAM64_THREADBLOCK_SIZE)]++;
}
//Count four bytes of a word
inline __device__ void addWord(uchar *s_ThreadBase, uint data)
{
//Only higher 6 bits of each byte matter, as this is a 64-bin histogram
addByte(s_ThreadBase, (data >> 2) & 0x3FU);
addByte(s_ThreadBase, (data >> 10) & 0x3FU);
addByte(s_ThreadBase, (data >> 18) & 0x3FU);
addByte(s_ThreadBase, (data >> 26) & 0x3FU);
}
__global__ void histogram64Kernel(uint *d_PartialHistograms, data_t *d_Data, uint dataCount)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
//Encode thread index in order to avoid bank conflicts in s_Hist[] access:
//each group of SHARED_MEMORY_BANKS threads accesses consecutive shared memory banks
//and the same bytes [0..3] within the banks
//Because of this permutation block size should be a multiple of 4 * SHARED_MEMORY_BANKS
const uint threadPos =
((threadIdx.x & ~(SHARED_MEMORY_BANKS * 4 - 1)) << 0) | // 1. xxx....xx000000
((threadIdx.x & (SHARED_MEMORY_BANKS - 1)) << 2) | // 2. [3:0] -> [5:2]
((threadIdx.x & (SHARED_MEMORY_BANKS * 3)) >> 4); // 3. [5:4] -> [1:0]
// 1. keep [x:5] bits, make lower 6 bits 0s
// 2 & 3. shuffle bits, [3:0] and [5:4]
// ex) tid: 0, 1, 2, 3, 4, 5, 6, 7 -> tPos: 0, 16, 32, 48, 64, 80, 96, 112
//Per-thread histogram storage: 64 x 64
__shared__ uchar s_Hist[HISTOGRAM64_THREADBLOCK_SIZE * HISTOGRAM64_BIN_COUNT];
uchar *s_ThreadBase = s_Hist + threadPos;
//Initialize shared memory (writing 32-bit words)
#pragma unroll
for (uint i = 0; i < (HISTOGRAM64_BIN_COUNT / 4); i++)
{
((uint *)s_Hist)[threadIdx.x + i * HISTOGRAM64_THREADBLOCK_SIZE] = 0;
}
//Read data from global memory and submit to the shared-memory histogram
//Since histogram counters are byte-sized, every single thread can't do more than 255 submission
cg::sync(cta);
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
data_t data = d_Data[pos]; // takes uint4: 16 -> uchar
addWord(s_ThreadBase, data.x); // process 4 uchar
addWord(s_ThreadBase, data.y);
addWord(s_ThreadBase, data.z);
addWord(s_ThreadBase, data.w);
}
//Accumulate per-thread histograms into per-block and write to global memory
cg::sync(cta);
// Each threads sums each bin
if (threadIdx.x < HISTOGRAM64_BIN_COUNT)
{
uchar *s_HistBase = s_Hist + UMUL(threadIdx.x, HISTOGRAM64_THREADBLOCK_SIZE);
uint sum = 0;
uint pos = 4 * (threadIdx.x & (SHARED_MEMORY_BANKS - 1));
#pragma unroll
for (uint i = 0; i < (HISTOGRAM64_THREADBLOCK_SIZE / 4); i++)
{
sum +=
s_HistBase[pos + 0] +
s_HistBase[pos + 1] +
s_HistBase[pos + 2] +
s_HistBase[pos + 3];
pos = (pos + 4) & (HISTOGRAM64_THREADBLOCK_SIZE - 1);
}
d_PartialHistograms[blockIdx.x * HISTOGRAM64_BIN_COUNT + threadIdx.x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram64() output
// Run one threadblock per bin; each threadbock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram64
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram64Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
// each block handles each bin = blockIdx.x
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM64_BIN_COUNT];
}
data[threadIdx.x] = sum;
// reduction sum
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
cg::sync(cta);
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// CPU interface to GPU histogram calculator
////////////////////////////////////////////////////////////////////////////////
//histogram64kernel() intermediate results buffer
//MAX_PARTIAL_HISTOGRAM64_COUNT == 32768 and HISTOGRAM64_THREADBLOCK_SIZE == 64
//amounts to max. 480MB of input data
static const uint MAX_PARTIAL_HISTOGRAM64_COUNT = 32768;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram64(void)
{
assert(HISTOGRAM64_THREADBLOCK_SIZE % (4 * SHARED_MEMORY_BANKS) == 0);
checkCudaErrors(hipMalloc((void **)&d_PartialHistograms, MAX_PARTIAL_HISTOGRAM64_COUNT * HISTOGRAM64_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram64(void)
{
checkCudaErrors(hipFree(d_PartialHistograms));
}
//Round a / b to nearest higher integer value
inline uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Snap a to nearest lower multiple of b
inline uint iSnapDown(uint a, uint b)
{
return a - a % b;
}
extern "C" void histogram64(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
const uint histogramCount = iDivUp(byteCount, HISTOGRAM64_THREADBLOCK_SIZE * iSnapDown(255, sizeof(data_t)));
assert(byteCount % sizeof(data_t) == 0);
assert(histogramCount <= MAX_PARTIAL_HISTOGRAM64_COUNT);
// 64 threads per thread block
hipLaunchKernelGGL(( histogram64Kernel), dim3(histogramCount), dim3(HISTOGRAM64_THREADBLOCK_SIZE), 0, 0,
d_PartialHistograms,
(data_t *)d_Data,
byteCount / sizeof(data_t)
);
getLastCudaError("histogram64Kernel() execution failed\n");
hipLaunchKernelGGL(( mergeHistogram64Kernel), dim3(HISTOGRAM64_BIN_COUNT), dim3(MERGE_THREADBLOCK_SIZE), 0, 0,
d_Histogram,
d_PartialHistograms,
histogramCount
);
getLastCudaError("mergeHistogram64() execution failed\n");
}
| 285220a069eb2cec49ac15d1a612496d2d3bcbd5.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cooperative_groups.h>
namespace cg = cooperative_groups;
#include <helper_cuda.h>
#include "histogram_common.h"
////////////////////////////////////////////////////////////////////////////////
// GPU-specific common definitions
////////////////////////////////////////////////////////////////////////////////
//Data type used for input data fetches
typedef uint4 data_t;
//May change on future hardware, so better parametrize the code
#define SHARED_MEMORY_BANKS 16
////////////////////////////////////////////////////////////////////////////////
// Main computation pass: compute gridDim.x partial histograms
////////////////////////////////////////////////////////////////////////////////
//Count a byte into shared-memory storage
inline __device__ void addByte(uchar *s_ThreadBase, uint data)
{
s_ThreadBase[UMUL(data, HISTOGRAM64_THREADBLOCK_SIZE)]++;
}
//Count four bytes of a word
inline __device__ void addWord(uchar *s_ThreadBase, uint data)
{
//Only higher 6 bits of each byte matter, as this is a 64-bin histogram
addByte(s_ThreadBase, (data >> 2) & 0x3FU);
addByte(s_ThreadBase, (data >> 10) & 0x3FU);
addByte(s_ThreadBase, (data >> 18) & 0x3FU);
addByte(s_ThreadBase, (data >> 26) & 0x3FU);
}
__global__ void histogram64Kernel(uint *d_PartialHistograms, data_t *d_Data, uint dataCount)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
//Encode thread index in order to avoid bank conflicts in s_Hist[] access:
//each group of SHARED_MEMORY_BANKS threads accesses consecutive shared memory banks
//and the same bytes [0..3] within the banks
//Because of this permutation block size should be a multiple of 4 * SHARED_MEMORY_BANKS
const uint threadPos =
((threadIdx.x & ~(SHARED_MEMORY_BANKS * 4 - 1)) << 0) | // 1. xxx....xx000000
((threadIdx.x & (SHARED_MEMORY_BANKS - 1)) << 2) | // 2. [3:0] -> [5:2]
((threadIdx.x & (SHARED_MEMORY_BANKS * 3)) >> 4); // 3. [5:4] -> [1:0]
// 1. keep [x:5] bits, make lower 6 bits 0s
// 2 & 3. shuffle bits, [3:0] and [5:4]
// ex) tid: 0, 1, 2, 3, 4, 5, 6, 7 -> tPos: 0, 16, 32, 48, 64, 80, 96, 112
//Per-thread histogram storage: 64 x 64
__shared__ uchar s_Hist[HISTOGRAM64_THREADBLOCK_SIZE * HISTOGRAM64_BIN_COUNT];
uchar *s_ThreadBase = s_Hist + threadPos;
//Initialize shared memory (writing 32-bit words)
#pragma unroll
for (uint i = 0; i < (HISTOGRAM64_BIN_COUNT / 4); i++)
{
((uint *)s_Hist)[threadIdx.x + i * HISTOGRAM64_THREADBLOCK_SIZE] = 0;
}
//Read data from global memory and submit to the shared-memory histogram
//Since histogram counters are byte-sized, every single thread can't do more than 255 submission
cg::sync(cta);
for (uint pos = UMAD(blockIdx.x, blockDim.x, threadIdx.x); pos < dataCount; pos += UMUL(blockDim.x, gridDim.x))
{
data_t data = d_Data[pos]; // takes uint4: 16 -> uchar
addWord(s_ThreadBase, data.x); // process 4 uchar
addWord(s_ThreadBase, data.y);
addWord(s_ThreadBase, data.z);
addWord(s_ThreadBase, data.w);
}
//Accumulate per-thread histograms into per-block and write to global memory
cg::sync(cta);
// Each threads sums each bin
if (threadIdx.x < HISTOGRAM64_BIN_COUNT)
{
uchar *s_HistBase = s_Hist + UMUL(threadIdx.x, HISTOGRAM64_THREADBLOCK_SIZE);
uint sum = 0;
uint pos = 4 * (threadIdx.x & (SHARED_MEMORY_BANKS - 1));
#pragma unroll
for (uint i = 0; i < (HISTOGRAM64_THREADBLOCK_SIZE / 4); i++)
{
sum +=
s_HistBase[pos + 0] +
s_HistBase[pos + 1] +
s_HistBase[pos + 2] +
s_HistBase[pos + 3];
pos = (pos + 4) & (HISTOGRAM64_THREADBLOCK_SIZE - 1);
}
d_PartialHistograms[blockIdx.x * HISTOGRAM64_BIN_COUNT + threadIdx.x] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Merge histogram64() output
// Run one threadblock per bin; each threadbock adds up the same bin counter
// from every partial histogram. Reads are uncoalesced, but mergeHistogram64
// takes only a fraction of total processing time
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADBLOCK_SIZE 256
__global__ void mergeHistogram64Kernel(
uint *d_Histogram,
uint *d_PartialHistograms,
uint histogramCount
)
{
// Handle to thread block group
cg::thread_block cta = cg::this_thread_block();
__shared__ uint data[MERGE_THREADBLOCK_SIZE];
uint sum = 0;
for (uint i = threadIdx.x; i < histogramCount; i += MERGE_THREADBLOCK_SIZE)
{
// each block handles each bin = blockIdx.x
sum += d_PartialHistograms[blockIdx.x + i * HISTOGRAM64_BIN_COUNT];
}
data[threadIdx.x] = sum;
// reduction sum
for (uint stride = MERGE_THREADBLOCK_SIZE / 2; stride > 0; stride >>= 1)
{
cg::sync(cta);
if (threadIdx.x < stride)
{
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if (threadIdx.x == 0)
{
d_Histogram[blockIdx.x] = data[0];
}
}
////////////////////////////////////////////////////////////////////////////////
// CPU interface to GPU histogram calculator
////////////////////////////////////////////////////////////////////////////////
//histogram64kernel() intermediate results buffer
//MAX_PARTIAL_HISTOGRAM64_COUNT == 32768 and HISTOGRAM64_THREADBLOCK_SIZE == 64
//amounts to max. 480MB of input data
static const uint MAX_PARTIAL_HISTOGRAM64_COUNT = 32768;
static uint *d_PartialHistograms;
//Internal memory allocation
extern "C" void initHistogram64(void)
{
assert(HISTOGRAM64_THREADBLOCK_SIZE % (4 * SHARED_MEMORY_BANKS) == 0);
checkCudaErrors(cudaMalloc((void **)&d_PartialHistograms, MAX_PARTIAL_HISTOGRAM64_COUNT * HISTOGRAM64_BIN_COUNT * sizeof(uint)));
}
//Internal memory deallocation
extern "C" void closeHistogram64(void)
{
checkCudaErrors(cudaFree(d_PartialHistograms));
}
//Round a / b to nearest higher integer value
inline uint iDivUp(uint a, uint b)
{
return (a % b != 0) ? (a / b + 1) : (a / b);
}
//Snap a to nearest lower multiple of b
inline uint iSnapDown(uint a, uint b)
{
return a - a % b;
}
extern "C" void histogram64(
uint *d_Histogram,
void *d_Data,
uint byteCount
)
{
const uint histogramCount = iDivUp(byteCount, HISTOGRAM64_THREADBLOCK_SIZE * iSnapDown(255, sizeof(data_t)));
assert(byteCount % sizeof(data_t) == 0);
assert(histogramCount <= MAX_PARTIAL_HISTOGRAM64_COUNT);
// 64 threads per thread block
histogram64Kernel<<<histogramCount, HISTOGRAM64_THREADBLOCK_SIZE>>>(
d_PartialHistograms,
(data_t *)d_Data,
byteCount / sizeof(data_t)
);
getLastCudaError("histogram64Kernel() execution failed\n");
mergeHistogram64Kernel<<<HISTOGRAM64_BIN_COUNT, MERGE_THREADBLOCK_SIZE>>>(
d_Histogram,
d_PartialHistograms,
histogramCount
);
getLastCudaError("mergeHistogram64() execution failed\n");
}
|
cd6cd82354ddbcac1385845e7838b33d5e639c1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "tempest/utils/testing.hh"
#include "tempest/compute/ray-tracing-cuda-system.hh"
#include "tempest/math/matrix4.hh"
#include "tempest/math/quaternion.hh"
#define ILLUMINATION_MODEL_IMPLEMENTATION
#define ILLUMINATION_MODEL_STATIC_IMPLEMENTATION
#include "tempest/graphics/ray-tracing/illumination-models.hh"
#include <hip/hip_runtime_api.h>
#include <memory>
const uint32_t ImageWidth = 1920;
const uint32_t ImageHeight = 1080;
__global__ void ComputeImage(Tempest::RTSGGXSurface sggx_render_material, uint32_t width, uint32_t height, uint32_t* backbuffer)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height)
return;
float angle = Tempest::MathPi*0.25f, s, c;
Tempest::FastSinCos(angle, &s, &c);
Tempest::SampleData sample_data{};
sample_data.Material = &sggx_render_material;
sample_data.IncidentLight = Tempest::Vector3{ 0.0f, s, c };
sample_data.OutgoingLight = Tempest::Vector3{ 0.0f, -s, c };
sample_data.Tangent = Tempest::Vector3{ 1.0f, 0.0f, 0.0f };
sample_data.Binormal = Tempest::Vector3{ 0.0f, 1.0f, 0.0f };
sample_data.Normal = Tempest::Vector3{ 0.0f, 0.0f, 1.0f };
unsigned seed = (y << 16) + x;
Tempest::Cuda::SGGXSurfaceCache(sample_data, seed);
auto spec = Tempest::Cuda::SGGXMicroFlakePseudoVolumeBRDF(sample_data);
backbuffer[y*width + x] = Tempest::ToColor(Tempest::SpectrumToRGB(spec));
}
TGE_TEST("Testing pseudo volume performance in the most optimistic case")
{
auto backbuffer = CREATE_SCOPED(uint32_t*, ::hipFree);
uint32_t backbuffer_area = ImageWidth*ImageHeight;
uint32_t backbuffer_size = backbuffer_area*sizeof(backbuffer[0]);
auto status = hipMalloc(reinterpret_cast<void**>(&backbuffer), backbuffer_size);
TGE_CHECK(status == hipSuccess, "Failed to allocate backbuffer");
Tempest::TextureDescription stddev_tex_desc;
Tempest::TextureDescription sggx_tex_desc;
sggx_tex_desc.Width = 1;
sggx_tex_desc.Height = 1;
sggx_tex_desc.Format = Tempest::DataFormat::RGBA32F;
Tempest::Texture sggx_stddev_tex(sggx_tex_desc, reinterpret_cast<uint8_t*>(new Tempest::Vector4{ 0.5f, 0.5f, 0.5f, 0.0f }));
Tempest::Texture sggx_basis_tex(sggx_tex_desc, reinterpret_cast<uint8_t*>(new Tempest::Quaternion{ 0.0f, 0.0f, 0.0f, 1.0f }));
Tempest::RayTracingCudaSystem rt_sys(ImageWidth, ImageHeight, Tempest::Matrix4::identityMatrix());
auto rt_scene = rt_sys.getRayTracer();
Tempest::RTSGGXSurface sggx_render_material{};
sggx_render_material.Depth = 1;
sggx_render_material.SampleCount = 256;
sggx_render_material.Model = Tempest::IlluminationModel::SGGXPseudoVolume;
sggx_render_material.Diffuse = {};
sggx_render_material.Specular = Tempest::ToSpectrum(0.75f);
sggx_render_material.BasisMapWidth = sggx_tex_desc.Width;
sggx_render_material.BasisMapWidth = sggx_tex_desc.Height;
sggx_render_material.BasisMap = rt_scene->bindTexture(&sggx_basis_tex);
sggx_render_material.StandardDeviationMap = rt_scene->bindTexture(&sggx_stddev_tex);
sggx_render_material.setup();
Tempest::TimeQuery timer;
auto start_time = timer.time();
dim3 comp_group_size(8, 8, 1);
dim3 comp_thread_groups((ImageWidth + comp_group_size.x - 1) / comp_group_size.x, (ImageHeight + comp_group_size.y - 1) / comp_group_size.y, 1);
hipLaunchKernelGGL(( ComputeImage), dim3(comp_thread_groups), dim3(comp_group_size), 0, 0, sggx_render_material, ImageWidth, ImageHeight, backbuffer);
hipDeviceSynchronize();
status = hipGetLastError();
TGE_CHECK(status == hipSuccess, "Failed to launch kernel");
auto end_time = timer.time();
auto elapsed_time = end_time - start_time;
Tempest::Log(Tempest::LogLevel::Info, "Compute a Full HD image in ", elapsed_time*1e-6f, "s");
std::unique_ptr<uint32_t[]> backbuffer_copy(new uint32_t[backbuffer_size]);
status = hipMemcpy(backbuffer_copy.get(), backbuffer, backbuffer_size, hipMemcpyDeviceToHost);
TGE_CHECK(status == hipSuccess, "Failed to copy backbuffer data");
} | cd6cd82354ddbcac1385845e7838b33d5e639c1c.cu | #include "tempest/utils/testing.hh"
#include "tempest/compute/ray-tracing-cuda-system.hh"
#include "tempest/math/matrix4.hh"
#include "tempest/math/quaternion.hh"
#define ILLUMINATION_MODEL_IMPLEMENTATION
#define ILLUMINATION_MODEL_STATIC_IMPLEMENTATION
#include "tempest/graphics/ray-tracing/illumination-models.hh"
#include <cuda_runtime_api.h>
#include <memory>
const uint32_t ImageWidth = 1920;
const uint32_t ImageHeight = 1080;
__global__ void ComputeImage(Tempest::RTSGGXSurface sggx_render_material, uint32_t width, uint32_t height, uint32_t* backbuffer)
{
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >= width || y >= height)
return;
float angle = Tempest::MathPi*0.25f, s, c;
Tempest::FastSinCos(angle, &s, &c);
Tempest::SampleData sample_data{};
sample_data.Material = &sggx_render_material;
sample_data.IncidentLight = Tempest::Vector3{ 0.0f, s, c };
sample_data.OutgoingLight = Tempest::Vector3{ 0.0f, -s, c };
sample_data.Tangent = Tempest::Vector3{ 1.0f, 0.0f, 0.0f };
sample_data.Binormal = Tempest::Vector3{ 0.0f, 1.0f, 0.0f };
sample_data.Normal = Tempest::Vector3{ 0.0f, 0.0f, 1.0f };
unsigned seed = (y << 16) + x;
Tempest::Cuda::SGGXSurfaceCache(sample_data, seed);
auto spec = Tempest::Cuda::SGGXMicroFlakePseudoVolumeBRDF(sample_data);
backbuffer[y*width + x] = Tempest::ToColor(Tempest::SpectrumToRGB(spec));
}
TGE_TEST("Testing pseudo volume performance in the most optimistic case")
{
auto backbuffer = CREATE_SCOPED(uint32_t*, ::cudaFree);
uint32_t backbuffer_area = ImageWidth*ImageHeight;
uint32_t backbuffer_size = backbuffer_area*sizeof(backbuffer[0]);
auto status = cudaMalloc(reinterpret_cast<void**>(&backbuffer), backbuffer_size);
TGE_CHECK(status == cudaSuccess, "Failed to allocate backbuffer");
Tempest::TextureDescription stddev_tex_desc;
Tempest::TextureDescription sggx_tex_desc;
sggx_tex_desc.Width = 1;
sggx_tex_desc.Height = 1;
sggx_tex_desc.Format = Tempest::DataFormat::RGBA32F;
Tempest::Texture sggx_stddev_tex(sggx_tex_desc, reinterpret_cast<uint8_t*>(new Tempest::Vector4{ 0.5f, 0.5f, 0.5f, 0.0f }));
Tempest::Texture sggx_basis_tex(sggx_tex_desc, reinterpret_cast<uint8_t*>(new Tempest::Quaternion{ 0.0f, 0.0f, 0.0f, 1.0f }));
Tempest::RayTracingCudaSystem rt_sys(ImageWidth, ImageHeight, Tempest::Matrix4::identityMatrix());
auto rt_scene = rt_sys.getRayTracer();
Tempest::RTSGGXSurface sggx_render_material{};
sggx_render_material.Depth = 1;
sggx_render_material.SampleCount = 256;
sggx_render_material.Model = Tempest::IlluminationModel::SGGXPseudoVolume;
sggx_render_material.Diffuse = {};
sggx_render_material.Specular = Tempest::ToSpectrum(0.75f);
sggx_render_material.BasisMapWidth = sggx_tex_desc.Width;
sggx_render_material.BasisMapWidth = sggx_tex_desc.Height;
sggx_render_material.BasisMap = rt_scene->bindTexture(&sggx_basis_tex);
sggx_render_material.StandardDeviationMap = rt_scene->bindTexture(&sggx_stddev_tex);
sggx_render_material.setup();
Tempest::TimeQuery timer;
auto start_time = timer.time();
dim3 comp_group_size(8, 8, 1);
dim3 comp_thread_groups((ImageWidth + comp_group_size.x - 1) / comp_group_size.x, (ImageHeight + comp_group_size.y - 1) / comp_group_size.y, 1);
ComputeImage<<<comp_thread_groups, comp_group_size>>>(sggx_render_material, ImageWidth, ImageHeight, backbuffer);
cudaThreadSynchronize();
status = cudaGetLastError();
TGE_CHECK(status == cudaSuccess, "Failed to launch kernel");
auto end_time = timer.time();
auto elapsed_time = end_time - start_time;
Tempest::Log(Tempest::LogLevel::Info, "Compute a Full HD image in ", elapsed_time*1e-6f, "s");
std::unique_ptr<uint32_t[]> backbuffer_copy(new uint32_t[backbuffer_size]);
status = cudaMemcpy(backbuffer_copy.get(), backbuffer, backbuffer_size, cudaMemcpyDeviceToHost);
TGE_CHECK(status == cudaSuccess, "Failed to copy backbuffer data");
} |
a71b36ccf1970e1f0707f969c94abc478c2a46af.hip | // !!! This is a file automatically generated by hipify!!!
#include "CudaFuncs.h"
#include <cuda_gl_interop.h>
#include <hip/device_functions.h>
#include "device_launch_parameters.h"
#include<iostream>
#include <assert.h>
#pragma region Functions
__device__ int div2ceil(int value) { return (value & 1) + (value >> 1); }
__device__ int cuda_div_ceil(int nume, int denom) { return nume / denom + ((nume % denom) > 0); }
/* For positive nums with sum less then INT_MAX*/
__device__ int cuda_div_ceil_pos(int nume, int denom) { return (nume + denom - 1) / denom; }
#pragma endregion
#pragma region Device constructs
/* Initiate runtime device. */
bool initCudaDevice()
{
hipError_t err = hipSetDevice(0);
if (err != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
return false;
}
return true;
}
__host__ void cudaCheck()
{
hipDeviceSynchronize();
hipError_t err = hipPeekAtLastError();
if (err != hipSuccess)
{
std::cout << "Error: " << hipGetErrorString(err) << std::endl;
hipDeviceReset();
exit(0);
}
}
bool read(int* dev_arr, int* arr, size_t arr_len)
{
// Copy output vector from GPU buffer to host memory.
hipError_t cudaStatus = hipMemcpy(arr, dev_arr, arr_len * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return false;
}
return true;
}
bool read(float* dev_arr, float* arr, size_t arr_len)
{
// Copy output vector from GPU buffer to host memory.
hipError_t cudaStatus = hipMemcpy(arr, dev_arr, arr_len * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
return false;
}
return true;
}
#pragma endregion
#ifdef OPEN_GL
bool allocateTexture_RGBA(unsigned int width, unsigned int height, CU_image &image)
{
// Generate a texture ID
glGenTextures(1, &image._textureID);
// Make this the current texture (remember that GL is state-based)
glBindTexture(GL_TEXTURE_2D, image._textureID);
// Allocate the texture memory. The last parameter is NULL since we only
// want to allocate memory, not initialize it
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA,
GL_FLOAT, NULL);
// Must set the filter mode, GL_LINEAR enables interpolation when scaling
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLenum gl_err = glGetError();
hipError_t err = hipGraphicsGLRegisterImage(&image._resource, image._textureID, GL_TEXTURE_2D,
hipGraphicsRegisterFlagsSurfaceLoadStore);
if (err != hipSuccess)
{
glDeleteBuffers(1, &image._textureID);
fprintf(stderr, "hipGraphicsGLRegisterImage failed!\n");
return false;
}
return true;
}
hipError_t createCudaSurface(hipArray_t arr, hipSurfaceObject_t &surfObj)
{
//Create resource desc.
struct hipResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
resDesc.res.array.array = arr;
return hipCreateSurfaceObject(&surfObj, &resDesc);
}
hipError_t CU_image::map(hipArray_t &arr)
{
hipError_t err = hipGraphicsMapResources(1, &_resource);
if (err != hipSuccess)
return err;
return hipGraphicsSubResourceGetMappedArray(&arr, _resource, 0, 0);
}
hipError_t CU_image::mapSurface(hipSurfaceObject_t &surfObj)
{
hipArray_t arr;
hipError_t err = map(arr);
if (err != hipSuccess) return err;
err = createCudaSurface(arr, surfObj);
return err;
}
hipError_t CU_image::unmap()
{
return hipGraphicsUnmapResources(1, &_resource);
}
hipError_t CU_image::destroy()
{
hipError_t err = hipGraphicsUnregisterResource(_resource);
glDeleteBuffers(1, &_textureID);
_textureID = 0;
return err;
}
#endif | a71b36ccf1970e1f0707f969c94abc478c2a46af.cu | #include "CudaFuncs.h"
#include <cuda_gl_interop.h>
#include <device_functions.h>
#include "device_launch_parameters.h"
#include<iostream>
#include <assert.h>
#pragma region Functions
__device__ int div2ceil(int value) { return (value & 1) + (value >> 1); }
__device__ int cuda_div_ceil(int nume, int denom) { return nume / denom + ((nume % denom) > 0); }
/* For positive nums with sum less then INT_MAX*/
__device__ int cuda_div_ceil_pos(int nume, int denom) { return (nume + denom - 1) / denom; }
#pragma endregion
#pragma region Device constructs
/* Initiate runtime device. */
bool initCudaDevice()
{
cudaError_t err = cudaSetDevice(0);
if (err != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
return false;
}
return true;
}
__host__ void cudaCheck()
{
cudaDeviceSynchronize();
cudaError err = cudaPeekAtLastError();
if (err != cudaSuccess)
{
std::cout << "Error: " << cudaGetErrorString(err) << std::endl;
cudaDeviceReset();
exit(0);
}
}
bool read(int* dev_arr, int* arr, size_t arr_len)
{
// Copy output vector from GPU buffer to host memory.
cudaError_t cudaStatus = cudaMemcpy(arr, dev_arr, arr_len * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return false;
}
return true;
}
bool read(float* dev_arr, float* arr, size_t arr_len)
{
// Copy output vector from GPU buffer to host memory.
cudaError_t cudaStatus = cudaMemcpy(arr, dev_arr, arr_len * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
return false;
}
return true;
}
#pragma endregion
#ifdef OPEN_GL
bool allocateTexture_RGBA(unsigned int width, unsigned int height, CU_image &image)
{
// Generate a texture ID
glGenTextures(1, &image._textureID);
// Make this the current texture (remember that GL is state-based)
glBindTexture(GL_TEXTURE_2D, image._textureID);
// Allocate the texture memory. The last parameter is NULL since we only
// want to allocate memory, not initialize it
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA,
GL_FLOAT, NULL);
// Must set the filter mode, GL_LINEAR enables interpolation when scaling
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
GLenum gl_err = glGetError();
cudaError err = cudaGraphicsGLRegisterImage(&image._resource, image._textureID, GL_TEXTURE_2D,
cudaGraphicsRegisterFlagsSurfaceLoadStore);
if (err != cudaSuccess)
{
glDeleteBuffers(1, &image._textureID);
fprintf(stderr, "cudaGraphicsGLRegisterImage failed!\n");
return false;
}
return true;
}
cudaError createCudaSurface(cudaArray_t arr, cudaSurfaceObject_t &surfObj)
{
//Create resource desc.
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = arr;
return cudaCreateSurfaceObject(&surfObj, &resDesc);
}
cudaError CU_image::map(cudaArray_t &arr)
{
cudaError err = cudaGraphicsMapResources(1, &_resource);
if (err != cudaSuccess)
return err;
return cudaGraphicsSubResourceGetMappedArray(&arr, _resource, 0, 0);
}
cudaError CU_image::mapSurface(cudaSurfaceObject_t &surfObj)
{
cudaArray_t arr;
cudaError err = map(arr);
if (err != cudaSuccess) return err;
err = createCudaSurface(arr, surfObj);
return err;
}
cudaError CU_image::unmap()
{
return cudaGraphicsUnmapResources(1, &_resource);
}
cudaError CU_image::destroy()
{
cudaError err = cudaGraphicsUnregisterResource(_resource);
glDeleteBuffers(1, &_textureID);
_textureID = 0;
return err;
}
#endif |
c304b7c8e0f37da06966145ac316b1034f0b7234.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeHessianListS1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *trans_x = NULL;
hipMalloc(&trans_x, XSIZE*YSIZE);
float *trans_y = NULL;
hipMalloc(&trans_y, XSIZE*YSIZE);
float *trans_z = NULL;
hipMalloc(&trans_z, XSIZE*YSIZE);
int *valid_points = NULL;
hipMalloc(&valid_points, XSIZE*YSIZE);
int *starting_voxel_id = NULL;
hipMalloc(&starting_voxel_id, XSIZE*YSIZE);
int *voxel_id = NULL;
hipMalloc(&voxel_id, XSIZE*YSIZE);
int valid_points_num = 1;
double *centroid_x = NULL;
hipMalloc(¢roid_x, XSIZE*YSIZE);
double *centroid_y = NULL;
hipMalloc(¢roid_y, XSIZE*YSIZE);
double *centroid_z = NULL;
hipMalloc(¢roid_z, XSIZE*YSIZE);
double gauss_d1 = 1;
double gauss_d2 = 1;
double *hessians = NULL;
hipMalloc(&hessians, XSIZE*YSIZE);
double *e_x_cov_x = NULL;
hipMalloc(&e_x_cov_x, XSIZE*YSIZE);
double *tmp_hessian = NULL;
hipMalloc(&tmp_hessian, XSIZE*YSIZE);
double *cov_dxd_pi = NULL;
hipMalloc(&cov_dxd_pi, XSIZE*YSIZE);
double *point_gradients = NULL;
hipMalloc(&point_gradients, XSIZE*YSIZE);
int valid_voxel_num = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeHessianListS1), dim3(gridBlock),dim3(threadBlock), 0, 0, trans_x,trans_y,trans_z,valid_points,starting_voxel_id,voxel_id,valid_points_num,centroid_x,centroid_y,centroid_z,gauss_d1,gauss_d2,hessians,e_x_cov_x,tmp_hessian,cov_dxd_pi,point_gradients,valid_voxel_num);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeHessianListS1), dim3(gridBlock),dim3(threadBlock), 0, 0, trans_x,trans_y,trans_z,valid_points,starting_voxel_id,voxel_id,valid_points_num,centroid_x,centroid_y,centroid_z,gauss_d1,gauss_d2,hessians,e_x_cov_x,tmp_hessian,cov_dxd_pi,point_gradients,valid_voxel_num);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeHessianListS1), dim3(gridBlock),dim3(threadBlock), 0, 0, trans_x,trans_y,trans_z,valid_points,starting_voxel_id,voxel_id,valid_points_num,centroid_x,centroid_y,centroid_z,gauss_d1,gauss_d2,hessians,e_x_cov_x,tmp_hessian,cov_dxd_pi,point_gradients,valid_voxel_num);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | c304b7c8e0f37da06966145ac316b1034f0b7234.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeHessianListS1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *trans_x = NULL;
cudaMalloc(&trans_x, XSIZE*YSIZE);
float *trans_y = NULL;
cudaMalloc(&trans_y, XSIZE*YSIZE);
float *trans_z = NULL;
cudaMalloc(&trans_z, XSIZE*YSIZE);
int *valid_points = NULL;
cudaMalloc(&valid_points, XSIZE*YSIZE);
int *starting_voxel_id = NULL;
cudaMalloc(&starting_voxel_id, XSIZE*YSIZE);
int *voxel_id = NULL;
cudaMalloc(&voxel_id, XSIZE*YSIZE);
int valid_points_num = 1;
double *centroid_x = NULL;
cudaMalloc(¢roid_x, XSIZE*YSIZE);
double *centroid_y = NULL;
cudaMalloc(¢roid_y, XSIZE*YSIZE);
double *centroid_z = NULL;
cudaMalloc(¢roid_z, XSIZE*YSIZE);
double gauss_d1 = 1;
double gauss_d2 = 1;
double *hessians = NULL;
cudaMalloc(&hessians, XSIZE*YSIZE);
double *e_x_cov_x = NULL;
cudaMalloc(&e_x_cov_x, XSIZE*YSIZE);
double *tmp_hessian = NULL;
cudaMalloc(&tmp_hessian, XSIZE*YSIZE);
double *cov_dxd_pi = NULL;
cudaMalloc(&cov_dxd_pi, XSIZE*YSIZE);
double *point_gradients = NULL;
cudaMalloc(&point_gradients, XSIZE*YSIZE);
int valid_voxel_num = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeHessianListS1<<<gridBlock,threadBlock>>>(trans_x,trans_y,trans_z,valid_points,starting_voxel_id,voxel_id,valid_points_num,centroid_x,centroid_y,centroid_z,gauss_d1,gauss_d2,hessians,e_x_cov_x,tmp_hessian,cov_dxd_pi,point_gradients,valid_voxel_num);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeHessianListS1<<<gridBlock,threadBlock>>>(trans_x,trans_y,trans_z,valid_points,starting_voxel_id,voxel_id,valid_points_num,centroid_x,centroid_y,centroid_z,gauss_d1,gauss_d2,hessians,e_x_cov_x,tmp_hessian,cov_dxd_pi,point_gradients,valid_voxel_num);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeHessianListS1<<<gridBlock,threadBlock>>>(trans_x,trans_y,trans_z,valid_points,starting_voxel_id,voxel_id,valid_points_num,centroid_x,centroid_y,centroid_z,gauss_d1,gauss_d2,hessians,e_x_cov_x,tmp_hessian,cov_dxd_pi,point_gradients,valid_voxel_num);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
27d316a3e8fcb363479ec0b5ba7166de5fa4651d.hip | // !!! This is a file automatically generated by hipify!!!
// Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_inner_product_layer_acc.h"
#include <rocblas.h>
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
#include "tnn/core/status.h"
namespace TNN_NS {
Status CudaInnerProductLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);
InnerProductLayerParam *ip_param =
dynamic_cast<InnerProductLayerParam *>(param);
if (ip_param == nullptr) {
LOGE("Convert to InnerProductLayerParam failed\n");
return TNNERR_LAYER_ERR;
}
InnerProductLayerResource *ip_resource =
dynamic_cast<InnerProductLayerResource *>(resource);
if (ip_resource == nullptr) {
LOGE("Convert to InnerProductLayerResource failed\n");
return TNNERR_LAYER_ERR;
}
has_bias_ = ip_param->has_bias;
multiplier_size_ = 0;
float *weight = ip_resource->weight_handle.force_to<float *>();
float *bias = ip_resource->bias_handle.force_to<float *>();
weight_size_ = ip_resource->weight_handle.GetBytesSize() / sizeof(float);
bias_size_ = ip_resource->bias_handle.GetBytesSize() / sizeof(float);
CUDA_CHECK(hipMalloc((void **)&weight_, weight_size_ * sizeof(float)));
CUDA_CHECK(hipMalloc((void **)&bias_, bias_size_ * sizeof(float)));
CUDA_CHECK(hipMemcpy(weight_, weight, weight_size_ * sizeof(float),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(bias_, bias, bias_size_ * sizeof(float),
hipMemcpyHostToDevice));
return this->Reshape(inputs, outputs);
}
Status CudaInnerProductLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
InnerProductLayerParam *ip_param =
dynamic_cast<InnerProductLayerParam *>(param_);
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
DimsVector input_dims = input_blob->GetBlobDesc().dims;
DimsVector output_dims = output_blob->GetBlobDesc().dims;
n_ = ip_param->num_output;
int axis = ip_param->axis;
m_ = DimsVectorUtils::Count(input_dims, 0, axis);
k_ = DimsVectorUtils::Count(input_dims, axis);
if (k_ * n_ != weight_size_) {
LOGE("weight size (%lu) != N(%d) * K(%d). \n", weight_size_, n_, k_);
return TNNERR_LAYER_ERR;
}
if (has_bias_) {
if (m_ > multiplier_size_) {
multiplier_size_ = m_;
if (multiplier_ != nullptr) {
CUDA_CHECK(hipFree(multiplier_));
multiplier_ = nullptr;
}
CUDA_CHECK(hipMalloc((void **)&multiplier_,
multiplier_size_ * sizeof(float)));
float *tmp = new float[multiplier_size_];
for (int i = 0; i < multiplier_size_; i++) {
tmp[i] = 1.0;
}
CUDA_CHECK(hipMemcpy(multiplier_, tmp,
multiplier_size_ * sizeof(float),
hipMemcpyHostToDevice));
delete[] tmp;
}
}
return TNN_OK;
}
Status CudaInnerProductLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
InnerProductLayerParam *ip_param =
dynamic_cast<InnerProductLayerParam *>(param_);
float *bottom_data = (float *)inputs[0]->GetHandle().base;
float *top_data = (float *)outputs[0]->GetHandle().base;
float alpha = 1.0;
float beta = 0.0;
CUBLAS_CHECK(hipblasSgemm(context_->cublas_handle_, HIPBLAS_OP_T, HIPBLAS_OP_N,
n_, m_, k_, &alpha, weight_, k_, bottom_data, k_,
&beta, top_data, n_));
if (has_bias_) {
alpha = 1.0;
beta = 1.0;
CUBLAS_CHECK(hipblasSgemm(context_->cublas_handle_, HIPBLAS_OP_N,
HIPBLAS_OP_N, n_, m_, 1, &alpha, bias_, n_,
multiplier_, 1, &beta, top_data, n_));
}
return TNN_OK;
}
CudaInnerProductLayerAcc::~CudaInnerProductLayerAcc(){
if (weight_ != nullptr) {
CUDA_CHECK(hipFree(weight_));
weight_ = nullptr;
}
if (bias_ != nullptr) {
CUDA_CHECK(hipFree(bias_));
bias_ = nullptr;
}
if (multiplier_ != nullptr) {
CUDA_CHECK(hipFree(multiplier_));
multiplier_ = nullptr;
}
}
REGISTER_CUDA_ACC(InnerProduct, LAYER_INNER_PRODUCT);
} // namespace TNN_NS
| 27d316a3e8fcb363479ec0b5ba7166de5fa4651d.cu | // Tencent is pleased to support the open source community by making TNN available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "tnn/device/cuda/acc/cuda_inner_product_layer_acc.h"
#include <cublas_v2.h>
#include "tnn/device/cuda/acc/cuda_layer_acc.h"
#include "tnn/utils/dims_utils.h"
#include "tnn/core/status.h"
namespace TNN_NS {
Status CudaInnerProductLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource,
const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return CudaLayerAcc::Init(context, param, resource, inputs, outputs);
InnerProductLayerParam *ip_param =
dynamic_cast<InnerProductLayerParam *>(param);
if (ip_param == nullptr) {
LOGE("Convert to InnerProductLayerParam failed\n");
return TNNERR_LAYER_ERR;
}
InnerProductLayerResource *ip_resource =
dynamic_cast<InnerProductLayerResource *>(resource);
if (ip_resource == nullptr) {
LOGE("Convert to InnerProductLayerResource failed\n");
return TNNERR_LAYER_ERR;
}
has_bias_ = ip_param->has_bias;
multiplier_size_ = 0;
float *weight = ip_resource->weight_handle.force_to<float *>();
float *bias = ip_resource->bias_handle.force_to<float *>();
weight_size_ = ip_resource->weight_handle.GetBytesSize() / sizeof(float);
bias_size_ = ip_resource->bias_handle.GetBytesSize() / sizeof(float);
CUDA_CHECK(cudaMalloc((void **)&weight_, weight_size_ * sizeof(float)));
CUDA_CHECK(cudaMalloc((void **)&bias_, bias_size_ * sizeof(float)));
CUDA_CHECK(cudaMemcpy(weight_, weight, weight_size_ * sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(bias_, bias, bias_size_ * sizeof(float),
cudaMemcpyHostToDevice));
return this->Reshape(inputs, outputs);
}
Status CudaInnerProductLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
return TNN_OK;
InnerProductLayerParam *ip_param =
dynamic_cast<InnerProductLayerParam *>(param_);
Blob *input_blob = inputs[0];
Blob *output_blob = outputs[0];
DimsVector input_dims = input_blob->GetBlobDesc().dims;
DimsVector output_dims = output_blob->GetBlobDesc().dims;
n_ = ip_param->num_output;
int axis = ip_param->axis;
m_ = DimsVectorUtils::Count(input_dims, 0, axis);
k_ = DimsVectorUtils::Count(input_dims, axis);
if (k_ * n_ != weight_size_) {
LOGE("weight size (%lu) != N(%d) * K(%d). \n", weight_size_, n_, k_);
return TNNERR_LAYER_ERR;
}
if (has_bias_) {
if (m_ > multiplier_size_) {
multiplier_size_ = m_;
if (multiplier_ != nullptr) {
CUDA_CHECK(cudaFree(multiplier_));
multiplier_ = nullptr;
}
CUDA_CHECK(cudaMalloc((void **)&multiplier_,
multiplier_size_ * sizeof(float)));
float *tmp = new float[multiplier_size_];
for (int i = 0; i < multiplier_size_; i++) {
tmp[i] = 1.0;
}
CUDA_CHECK(cudaMemcpy(multiplier_, tmp,
multiplier_size_ * sizeof(float),
cudaMemcpyHostToDevice));
delete[] tmp;
}
}
return TNN_OK;
}
Status CudaInnerProductLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) {
InnerProductLayerParam *ip_param =
dynamic_cast<InnerProductLayerParam *>(param_);
float *bottom_data = (float *)inputs[0]->GetHandle().base;
float *top_data = (float *)outputs[0]->GetHandle().base;
float alpha = 1.0;
float beta = 0.0;
CUBLAS_CHECK(cublasSgemm(context_->cublas_handle_, CUBLAS_OP_T, CUBLAS_OP_N,
n_, m_, k_, &alpha, weight_, k_, bottom_data, k_,
&beta, top_data, n_));
if (has_bias_) {
alpha = 1.0;
beta = 1.0;
CUBLAS_CHECK(cublasSgemm(context_->cublas_handle_, CUBLAS_OP_N,
CUBLAS_OP_N, n_, m_, 1, &alpha, bias_, n_,
multiplier_, 1, &beta, top_data, n_));
}
return TNN_OK;
}
CudaInnerProductLayerAcc::~CudaInnerProductLayerAcc(){
if (weight_ != nullptr) {
CUDA_CHECK(cudaFree(weight_));
weight_ = nullptr;
}
if (bias_ != nullptr) {
CUDA_CHECK(cudaFree(bias_));
bias_ = nullptr;
}
if (multiplier_ != nullptr) {
CUDA_CHECK(cudaFree(multiplier_));
multiplier_ = nullptr;
}
}
REGISTER_CUDA_ACC(InnerProduct, LAYER_INNER_PRODUCT);
} // namespace TNN_NS
|
dfe906af57c0f744270fe8c8a3262434c690d174.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cupp/deviceT/vector.h"
#include "cupp/common.h"
#include "OpenSteer/deviceT/Vec3.h"
#include "OpenSteer/deviceT/Boid.h"
#include "OpenSteer/deviceT/Matrix.h"
#include "OpenSteer/CuPPConfig.h"
#include "OpenSteer/kernels.h"
using OpenSteer::deviceT::Vec3;
using OpenSteer::deviceT::Boid;
using OpenSteer::deviceT::Matrix;
template<class T>
__device__ T interpolate (const float alpha, const T& x0, const T& x1) {
return x0 + ((x1 - x0) * alpha);
}
__device__ float clip (const float x, const float min, const float max) {
if (x < min) return min;
if (x > max) return max;
return x;
}
template<class T>
__device__ void blendIntoAccumulator (const float smoothRate, const T& newValue, T& smoothedAccumulator)
{
smoothedAccumulator = interpolate (clip (smoothRate, 0, 1), smoothedAccumulator, newValue);
}
__device__ Vec3 limitMaxDeviationAngle (const Vec3& source, const float cosineOfConeAngle, const Vec3& basis) {
// immediately return zero length input vectors
float sourceLength = source.length();
if (sourceLength == 0) return source;
// measure the angular diviation of "source" from "basis"
const Vec3 direction = source / sourceLength;
float cosineOfSourceAngle = direction.dot (basis);
// Simply return "source" if it already meets the angle criteria.
// (note: we hope this top "if" gets compiled out since the flag
// is a constant when the function is inlined into its caller)
// source vector is already inside the cone, just return it
if (cosineOfSourceAngle >= cosineOfConeAngle) return source;
// find the portion of "source" that is perpendicular to "basis"
const Vec3 perp = source.perpendicularComponent (basis);
// normalize that perpendicular
const Vec3 unitPerp = perp.normalize ();
// construct a new vector whose length equals the source vector,
// and lies on the intersection of a plane (formed the source and
// basis vectors) and a cone (whose axis is "basis" and whose
// angle corresponds to cosineOfConeAngle)
float perpDist = sqrtf (1 - (cosineOfConeAngle * cosineOfConeAngle));
const Vec3 c0 = basis * cosineOfConeAngle;
const Vec3 c1 = unitPerp * perpDist;
return (c0 + c1) * sourceLength;
}
__global__ void update (const float elapsedTime,
cupp::deviceT::vector <Vec3> &positions_,
cupp::deviceT::vector <Vec3> &forwards_,
const cupp::deviceT::vector <Vec3> &steering_forces_,
cupp::deviceT::vector <Boid> &boids_,
cupp::deviceT::vector <Matrix> &render_position_
)
{
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ Vec3 steering_forces[threads_per_block];
steering_forces[threadIdx.x] = steering_forces_[my_index];
Vec3 &force = steering_forces[threadIdx.x];
__shared__ Boid boids[threads_per_block];
boids[threadIdx.x] = boids_[my_index];
Boid &me = boids[threadIdx.x];
//__shared__ Vec3 positions[threads_per_block];
//positions[threadIdx.x] = positions_[my_index];
Vec3 &position = positions_[my_index];//positions[threadIdx.x];
//__shared__ Vec3 forwards[threads_per_block];
//forwards[threadIdx.x] = forwards_[my_index];
Vec3 &forward = forwards_[my_index];//forwards[threadIdx.x];
// adjustRawSteeringForce
if (!((me.speed > boid_maxAdjustedSpeed) || force.is_zero())) {
const float range = me.speed / boid_maxAdjustedSpeed;
const float cosine = interpolate (powf (range, 20), 1.0f, -1.0f);
force = limitMaxDeviationAngle (force, cosine, forward);
}
// enforce limit on magnitude of steering force
force = force.truncateLength (boid_maxForce);
// compute acceleration and velocity
const Vec3 newAcceleration = force; /* / mass; mass == 1.0f */
Vec3 newVelocity = forward * me.speed;
// damp out abrupt changes and oscillations in steering acceleration
// (rate is proportional to time step, then clipped into useful range)
if (elapsedTime > 0) {
const float smoothRate = clip (9.0f * elapsedTime, 0.15f, 0.4f);
blendIntoAccumulator (smoothRate, newAcceleration, me.smoothedAcceleration);
// Euler integrate (per frame) acceleration into velocity
newVelocity = newVelocity + me.smoothedAcceleration * elapsedTime;
}
// enforce speed limit
newVelocity = newVelocity.truncateLength (boid_maxSpeed);
// update Speed
me.speed = newVelocity.length();
const Vec3 globalUp = { 0.0f, 0.2f, 0.0f};
const Vec3 accelUp = me.smoothedAcceleration * 0.05f;
const Vec3 bankUp = accelUp + globalUp;
const float smoothRate = elapsedTime * 3;
Vec3 tempUp = me.up;
blendIntoAccumulator (smoothRate, bankUp, tempUp);
me.up = tempUp.normalize();
if (me.speed > 0.0f) {
const Vec3 newUnitForward = newVelocity / me.speed;
forward = newUnitForward;
}
// derive new side basis vector from NEW forward and OLD up
me.side.cross (forward, me.up);
me.up.cross (me.side, forward);
// Euler integrate (per frame) velocity into position
position = position + (newVelocity * elapsedTime);
position = position.sphericalWrapAround (boid_worldRadius);
boids_[my_index] = me;
positions_[my_index] = position;
forwards_[my_index] = forward;
render_position_[my_index].elements_[0] = me.side.t.x;
render_position_[my_index].elements_[1] = me.side.t.y;
render_position_[my_index].elements_[2] = me.side.t.z;
//render_position_[my_index].elements_[3] = 0.0f;
render_position_[my_index].elements_[4] = me.up.t.x;
render_position_[my_index].elements_[5] = me.up.t.y;
render_position_[my_index].elements_[6] = me.up.t.z;
//render_position_[my_index].elements_[7] = 0.0f;
render_position_[my_index].elements_[8] = forward.t.x;
render_position_[my_index].elements_[9] = forward.t.y;
render_position_[my_index].elements_[10] = forward.t.z;
//render_position_[my_index].elements_[11] = 0.0f;
render_position_[my_index].elements_[12] = position.t.x;
render_position_[my_index].elements_[13] = position.t.y;
render_position_[my_index].elements_[14] = position.t.z;
//render_position_[my_index].elements_[15] = 1.0f;
}
#if 0
slow ... no idea why
__global__ void update (const float elapsedTime,
cupp::deviceT::vector <Vec3> &positions_,
cupp::deviceT::vector <Vec3> &forwards_,
const cupp::deviceT::vector <Vec3> &steering_forces_,
cupp::deviceT::vector <Boid> &boids_,
cupp::deviceT::vector <Matrix> &render_position_
)
{
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ Vec3 steering_forces[threads_per_block];
steering_forces[threadIdx.x] = steering_forces_[my_index];
Vec3 &force = steering_forces[threadIdx.x];
__shared__ Boid boids[threads_per_block];
boids[threadIdx.x] = boids_[my_index];
Boid &me = boids[threadIdx.x];
Vec3 &position = positions_[my_index];
//__shared__ Vec3 forwards[threads_per_block];
//forwards[threadIdx.x] = forwards_[my_index];
Vec3 &forward = forwards_[my_index];//forwards[threadIdx.x];
// adjustRawSteeringForce
if (!((me.speed > maxAdjustedSpeed) || force.is_zero())) {
const float range = me.speed / maxAdjustedSpeed;
const float cosine = interpolate (powf (range, 20), 1.0f, -1.0f);
force = limitMaxDeviationAngle (force, cosine, forward);
}
// enforce limit on magnitude of steering force
force = force.truncateLength (maxForce);
// compute acceleration and velocity
const Vec3 newAcceleration = force; /* / mass; mass == 1.0f */
Vec3 newVelocity = forward * me.speed;
// damp out abrupt changes and oscillations in steering acceleration
// (rate is proportional to time step, then clipped into useful range)
if (elapsedTime > 0) {
const float smoothRate = clip (9.0f * elapsedTime, 0.15f, 0.4f);
blendIntoAccumulator (smoothRate, newAcceleration, me.smoothedAcceleration);
// Euler integrate (per frame) acceleration into velocity
newVelocity = newVelocity + me.smoothedAcceleration * elapsedTime;
}
// enforce speed limit
newVelocity = newVelocity.truncateLength (maxSpeed);
// update Speed
me.speed = newVelocity.length();
const Vec3 globalUp = { 0.0f, 0.2f, 0.0f};
const Vec3 accelUp = me.smoothedAcceleration * 0.05f;
const Vec3 bankUp = accelUp + globalUp;
const float smoothRate = elapsedTime * 3;
Vec3 tempUp = {render_position_[my_index].elements_[4] ,render_position_[my_index].elements_[5], render_position_[my_index].elements_[6]};
blendIntoAccumulator (smoothRate, bankUp, tempUp);
tempUp = tempUp.normalize();
if (me.speed > 0.0f) {
const Vec3 newUnitForward = newVelocity / me.speed;
forward = newUnitForward;
}
// derive new side basis vector from NEW forward and OLD up
Vec3 side;
side.cross (forward, tempUp);
tempUp.cross (side, forward);
// Euler integrate (per frame) velocity into position
position = position + (newVelocity * elapsedTime);
position = position.sphericalWrapAround (worldRadius);
boids_[my_index] = me;
positions_[my_index] = position;
forwards_[my_index] = forward;
render_position_[my_index].elements_[0] = side.t.x;
render_position_[my_index].elements_[1] = side.t.y;
render_position_[my_index].elements_[2] = side.t.z;
//render_position_[my_index].elements_[3] = 0.0f;
render_position_[my_index].elements_[4] = tempUp.t.x;
render_position_[my_index].elements_[5] = tempUp.t.y;
render_position_[my_index].elements_[6] = tempUp.t.z;
//render_position_[my_index].elements_[7] = 0.0f;
render_position_[my_index].elements_[8] = forward.t.x;
render_position_[my_index].elements_[9] = forward.t.y;
render_position_[my_index].elements_[10] = forward.t.z;
//render_position_[my_index].elements_[11] = 0.0f;
render_position_[my_index].elements_[12] = position.t.x;
render_position_[my_index].elements_[13] = position.t.y;
render_position_[my_index].elements_[14] = position.t.z;
//render_position_[my_index].elements_[15] = 1.0f;
}
#endif
update_kernelT get_update_kernel() {
return (update_kernelT)update;
}
| dfe906af57c0f744270fe8c8a3262434c690d174.cu | #include "cupp/deviceT/vector.h"
#include "cupp/common.h"
#include "OpenSteer/deviceT/Vec3.h"
#include "OpenSteer/deviceT/Boid.h"
#include "OpenSteer/deviceT/Matrix.h"
#include "OpenSteer/CuPPConfig.h"
#include "OpenSteer/kernels.h"
using OpenSteer::deviceT::Vec3;
using OpenSteer::deviceT::Boid;
using OpenSteer::deviceT::Matrix;
template<class T>
__device__ T interpolate (const float alpha, const T& x0, const T& x1) {
return x0 + ((x1 - x0) * alpha);
}
__device__ float clip (const float x, const float min, const float max) {
if (x < min) return min;
if (x > max) return max;
return x;
}
template<class T>
__device__ void blendIntoAccumulator (const float smoothRate, const T& newValue, T& smoothedAccumulator)
{
smoothedAccumulator = interpolate (clip (smoothRate, 0, 1), smoothedAccumulator, newValue);
}
__device__ Vec3 limitMaxDeviationAngle (const Vec3& source, const float cosineOfConeAngle, const Vec3& basis) {
// immediately return zero length input vectors
float sourceLength = source.length();
if (sourceLength == 0) return source;
// measure the angular diviation of "source" from "basis"
const Vec3 direction = source / sourceLength;
float cosineOfSourceAngle = direction.dot (basis);
// Simply return "source" if it already meets the angle criteria.
// (note: we hope this top "if" gets compiled out since the flag
// is a constant when the function is inlined into its caller)
// source vector is already inside the cone, just return it
if (cosineOfSourceAngle >= cosineOfConeAngle) return source;
// find the portion of "source" that is perpendicular to "basis"
const Vec3 perp = source.perpendicularComponent (basis);
// normalize that perpendicular
const Vec3 unitPerp = perp.normalize ();
// construct a new vector whose length equals the source vector,
// and lies on the intersection of a plane (formed the source and
// basis vectors) and a cone (whose axis is "basis" and whose
// angle corresponds to cosineOfConeAngle)
float perpDist = sqrtf (1 - (cosineOfConeAngle * cosineOfConeAngle));
const Vec3 c0 = basis * cosineOfConeAngle;
const Vec3 c1 = unitPerp * perpDist;
return (c0 + c1) * sourceLength;
}
__global__ void update (const float elapsedTime,
cupp::deviceT::vector <Vec3> &positions_,
cupp::deviceT::vector <Vec3> &forwards_,
const cupp::deviceT::vector <Vec3> &steering_forces_,
cupp::deviceT::vector <Boid> &boids_,
cupp::deviceT::vector <Matrix> &render_position_
)
{
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ Vec3 steering_forces[threads_per_block];
steering_forces[threadIdx.x] = steering_forces_[my_index];
Vec3 &force = steering_forces[threadIdx.x];
__shared__ Boid boids[threads_per_block];
boids[threadIdx.x] = boids_[my_index];
Boid &me = boids[threadIdx.x];
//__shared__ Vec3 positions[threads_per_block];
//positions[threadIdx.x] = positions_[my_index];
Vec3 &position = positions_[my_index];//positions[threadIdx.x];
//__shared__ Vec3 forwards[threads_per_block];
//forwards[threadIdx.x] = forwards_[my_index];
Vec3 &forward = forwards_[my_index];//forwards[threadIdx.x];
// adjustRawSteeringForce
if (!((me.speed > boid_maxAdjustedSpeed) || force.is_zero())) {
const float range = me.speed / boid_maxAdjustedSpeed;
const float cosine = interpolate (powf (range, 20), 1.0f, -1.0f);
force = limitMaxDeviationAngle (force, cosine, forward);
}
// enforce limit on magnitude of steering force
force = force.truncateLength (boid_maxForce);
// compute acceleration and velocity
const Vec3 newAcceleration = force; /* / mass; mass == 1.0f */
Vec3 newVelocity = forward * me.speed;
// damp out abrupt changes and oscillations in steering acceleration
// (rate is proportional to time step, then clipped into useful range)
if (elapsedTime > 0) {
const float smoothRate = clip (9.0f * elapsedTime, 0.15f, 0.4f);
blendIntoAccumulator (smoothRate, newAcceleration, me.smoothedAcceleration);
// Euler integrate (per frame) acceleration into velocity
newVelocity = newVelocity + me.smoothedAcceleration * elapsedTime;
}
// enforce speed limit
newVelocity = newVelocity.truncateLength (boid_maxSpeed);
// update Speed
me.speed = newVelocity.length();
const Vec3 globalUp = { 0.0f, 0.2f, 0.0f};
const Vec3 accelUp = me.smoothedAcceleration * 0.05f;
const Vec3 bankUp = accelUp + globalUp;
const float smoothRate = elapsedTime * 3;
Vec3 tempUp = me.up;
blendIntoAccumulator (smoothRate, bankUp, tempUp);
me.up = tempUp.normalize();
if (me.speed > 0.0f) {
const Vec3 newUnitForward = newVelocity / me.speed;
forward = newUnitForward;
}
// derive new side basis vector from NEW forward and OLD up
me.side.cross (forward, me.up);
me.up.cross (me.side, forward);
// Euler integrate (per frame) velocity into position
position = position + (newVelocity * elapsedTime);
position = position.sphericalWrapAround (boid_worldRadius);
boids_[my_index] = me;
positions_[my_index] = position;
forwards_[my_index] = forward;
render_position_[my_index].elements_[0] = me.side.t.x;
render_position_[my_index].elements_[1] = me.side.t.y;
render_position_[my_index].elements_[2] = me.side.t.z;
//render_position_[my_index].elements_[3] = 0.0f;
render_position_[my_index].elements_[4] = me.up.t.x;
render_position_[my_index].elements_[5] = me.up.t.y;
render_position_[my_index].elements_[6] = me.up.t.z;
//render_position_[my_index].elements_[7] = 0.0f;
render_position_[my_index].elements_[8] = forward.t.x;
render_position_[my_index].elements_[9] = forward.t.y;
render_position_[my_index].elements_[10] = forward.t.z;
//render_position_[my_index].elements_[11] = 0.0f;
render_position_[my_index].elements_[12] = position.t.x;
render_position_[my_index].elements_[13] = position.t.y;
render_position_[my_index].elements_[14] = position.t.z;
//render_position_[my_index].elements_[15] = 1.0f;
}
#if 0
slow ... no idea why
__global__ void update (const float elapsedTime,
cupp::deviceT::vector <Vec3> &positions_,
cupp::deviceT::vector <Vec3> &forwards_,
const cupp::deviceT::vector <Vec3> &steering_forces_,
cupp::deviceT::vector <Boid> &boids_,
cupp::deviceT::vector <Matrix> &render_position_
)
{
const unsigned int my_index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ Vec3 steering_forces[threads_per_block];
steering_forces[threadIdx.x] = steering_forces_[my_index];
Vec3 &force = steering_forces[threadIdx.x];
__shared__ Boid boids[threads_per_block];
boids[threadIdx.x] = boids_[my_index];
Boid &me = boids[threadIdx.x];
Vec3 &position = positions_[my_index];
//__shared__ Vec3 forwards[threads_per_block];
//forwards[threadIdx.x] = forwards_[my_index];
Vec3 &forward = forwards_[my_index];//forwards[threadIdx.x];
// adjustRawSteeringForce
if (!((me.speed > maxAdjustedSpeed) || force.is_zero())) {
const float range = me.speed / maxAdjustedSpeed;
const float cosine = interpolate (powf (range, 20), 1.0f, -1.0f);
force = limitMaxDeviationAngle (force, cosine, forward);
}
// enforce limit on magnitude of steering force
force = force.truncateLength (maxForce);
// compute acceleration and velocity
const Vec3 newAcceleration = force; /* / mass; mass == 1.0f */
Vec3 newVelocity = forward * me.speed;
// damp out abrupt changes and oscillations in steering acceleration
// (rate is proportional to time step, then clipped into useful range)
if (elapsedTime > 0) {
const float smoothRate = clip (9.0f * elapsedTime, 0.15f, 0.4f);
blendIntoAccumulator (smoothRate, newAcceleration, me.smoothedAcceleration);
// Euler integrate (per frame) acceleration into velocity
newVelocity = newVelocity + me.smoothedAcceleration * elapsedTime;
}
// enforce speed limit
newVelocity = newVelocity.truncateLength (maxSpeed);
// update Speed
me.speed = newVelocity.length();
const Vec3 globalUp = { 0.0f, 0.2f, 0.0f};
const Vec3 accelUp = me.smoothedAcceleration * 0.05f;
const Vec3 bankUp = accelUp + globalUp;
const float smoothRate = elapsedTime * 3;
Vec3 tempUp = {render_position_[my_index].elements_[4] ,render_position_[my_index].elements_[5], render_position_[my_index].elements_[6]};
blendIntoAccumulator (smoothRate, bankUp, tempUp);
tempUp = tempUp.normalize();
if (me.speed > 0.0f) {
const Vec3 newUnitForward = newVelocity / me.speed;
forward = newUnitForward;
}
// derive new side basis vector from NEW forward and OLD up
Vec3 side;
side.cross (forward, tempUp);
tempUp.cross (side, forward);
// Euler integrate (per frame) velocity into position
position = position + (newVelocity * elapsedTime);
position = position.sphericalWrapAround (worldRadius);
boids_[my_index] = me;
positions_[my_index] = position;
forwards_[my_index] = forward;
render_position_[my_index].elements_[0] = side.t.x;
render_position_[my_index].elements_[1] = side.t.y;
render_position_[my_index].elements_[2] = side.t.z;
//render_position_[my_index].elements_[3] = 0.0f;
render_position_[my_index].elements_[4] = tempUp.t.x;
render_position_[my_index].elements_[5] = tempUp.t.y;
render_position_[my_index].elements_[6] = tempUp.t.z;
//render_position_[my_index].elements_[7] = 0.0f;
render_position_[my_index].elements_[8] = forward.t.x;
render_position_[my_index].elements_[9] = forward.t.y;
render_position_[my_index].elements_[10] = forward.t.z;
//render_position_[my_index].elements_[11] = 0.0f;
render_position_[my_index].elements_[12] = position.t.x;
render_position_[my_index].elements_[13] = position.t.y;
render_position_[my_index].elements_[14] = position.t.z;
//render_position_[my_index].elements_[15] = 1.0f;
}
#endif
update_kernelT get_update_kernel() {
return (update_kernelT)update;
}
|
2aeb68326ed332fcd0e9f1c7f71fe03bbbc391ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void transpose_tanh(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transpose_relud order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transpose_relud matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
} | 2aeb68326ed332fcd0e9f1c7f71fe03bbbc391ad.cu | #include "includes.h"
__global__ void transpose_tanh(float *odata, float *idata, int width, int height)
{
__shared__ float block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
// load one element per thread from device memory (idata) and store it
// in transpose_relud order in block[][]
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
// synchronise to ensure all writes to block[][] have completed
__syncthreads();
// write the transpose_relud matrix tile to global memory (odata) in linear order
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
} |
7646ab486d305350c8eed34b15ffb1d27bc96f90.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Redheffer Matrix Computation
* MP1, Spring 2016, GPU Programming @ Auburn University
* @author: Xing Wang
*
* Compile this with:
* nvcc -O3 -Xcompiler=-fopenmp -o redheffer redheffer.cu
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define CUDA_CHECK(e) { \
hipError_t err = (e); \
if (err != hipSuccess) { \
fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", __FILE__, __LINE__, #e, hipGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
#define SIZE 20000
int h_result[SIZE * SIZE];
void verify_result();
void check(int row, int col, int expected);
__global__ static void compute_result(int *h_result);
int main(int argc, char **argv) {
double start_time, time_to_compute, time_to_verify;
printf("Initializing CUDA runtime...\n");
hipDeviceSynchronize();
printf("Computing %d x %d Redheffer matrix...\n", SIZE, SIZE);
size_t matsize = sizeof(int) * SIZE * SIZE;
/*int *h_result = (int*)malloc(matsize);
if (h_result == NULL) {
fprintf(stderr, "Unable to allocate host memory\n");
exit(EXIT_FAILURE);
}*/
int *d_result;
start_time = omp_get_wtime();
CUDA_CHECK(hipMalloc((void**)&d_result, matsize));
CUDA_CHECK(hipMemcpy(d_result, h_result, matsize, hipMemcpyHostToDevice));
/*int threadsPerBlock = 256;
int blocksPerGrid = (matsize + threadsPerBlock - 1) / threadsPerBlock;*/
dim3 threadsPerBlock(16, 16);
dim3 blocksPerGrid((SIZE + threadsPerBlock.x - 1) / threadsPerBlock.x, (SIZE + threadsPerBlock.y - 1)/threadsPerBlock.y);
hipLaunchKernelGGL(( compute_result), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_result);
CUDA_CHECK(hipDeviceSynchronize());
CUDA_CHECK(hipMemcpy(h_result, d_result, matsize, hipMemcpyDeviceToHost));
time_to_compute = omp_get_wtime() - start_time;
start_time = omp_get_wtime();
verify_result();
time_to_verify = omp_get_wtime() - start_time;
CUDA_CHECK(hipFree(d_result));
CUDA_CHECK(hipDeviceReset());
printf("Done (%2.3f seconds to compute, %2.3f seconds to verify)\n", time_to_compute, time_to_verify);
return EXIT_SUCCESS;
}
/* Fills the result array with the SIZE X SIZE Redheffer matrix */
__global__ static void compute_result(int *A) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < SIZE && col < SIZE) {
int i = row + 1;
int j = col + 1;
if (j == 1 || j % i == 0)
A[row * SIZE + col] = 1;
else
A[row * SIZE + col] = 0;
}
}
/* Verifies that the data in the h_result array is correct */
void verify_result() {
int row, col, i, j, expected;
for (row = 0; row < SIZE; row++) {
for (col = 0; col < SIZE; col++) {
i = row + 1;
j = col + 1;
expected = (j == 1 || j % i == 0);
check(row, col, expected);
}
}
}
/* Exits with an error message iff h_result[row * SIZE + col] != expected */
void check(int row, int col, int expected) {
if (h_result[row * SIZE + col] != expected) {
fprintf(stderr, "Row %d column %d is incorrect.\n", row, col);
fprintf(stderr, " Should be: %d\n", expected);
fprintf(stderr, " Is actually: %d\n", h_result[row * SIZE + col]);
exit(EXIT_FAILURE);
}
}
| 7646ab486d305350c8eed34b15ffb1d27bc96f90.cu | /*
* Redheffer Matrix Computation
* MP1, Spring 2016, GPU Programming @ Auburn University
* @author: Xing Wang
*
* Compile this with:
* nvcc -O3 -Xcompiler=-fopenmp -o redheffer redheffer.cu
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define CUDA_CHECK(e) { \
cudaError_t err = (e); \
if (err != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s, line %d, %s: %s\n", __FILE__, __LINE__, #e, cudaGetErrorString(err)); \
exit(EXIT_FAILURE); \
} \
}
#define SIZE 20000
int h_result[SIZE * SIZE];
void verify_result();
void check(int row, int col, int expected);
__global__ static void compute_result(int *h_result);
int main(int argc, char **argv) {
double start_time, time_to_compute, time_to_verify;
printf("Initializing CUDA runtime...\n");
cudaDeviceSynchronize();
printf("Computing %d x %d Redheffer matrix...\n", SIZE, SIZE);
size_t matsize = sizeof(int) * SIZE * SIZE;
/*int *h_result = (int*)malloc(matsize);
if (h_result == NULL) {
fprintf(stderr, "Unable to allocate host memory\n");
exit(EXIT_FAILURE);
}*/
int *d_result;
start_time = omp_get_wtime();
CUDA_CHECK(cudaMalloc((void**)&d_result, matsize));
CUDA_CHECK(cudaMemcpy(d_result, h_result, matsize, cudaMemcpyHostToDevice));
/*int threadsPerBlock = 256;
int blocksPerGrid = (matsize + threadsPerBlock - 1) / threadsPerBlock;*/
dim3 threadsPerBlock(16, 16);
dim3 blocksPerGrid((SIZE + threadsPerBlock.x - 1) / threadsPerBlock.x, (SIZE + threadsPerBlock.y - 1)/threadsPerBlock.y);
compute_result<<<blocksPerGrid, threadsPerBlock>>>(d_result);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy(h_result, d_result, matsize, cudaMemcpyDeviceToHost));
time_to_compute = omp_get_wtime() - start_time;
start_time = omp_get_wtime();
verify_result();
time_to_verify = omp_get_wtime() - start_time;
CUDA_CHECK(cudaFree(d_result));
CUDA_CHECK(cudaDeviceReset());
printf("Done (%2.3f seconds to compute, %2.3f seconds to verify)\n", time_to_compute, time_to_verify);
return EXIT_SUCCESS;
}
/* Fills the result array with the SIZE X SIZE Redheffer matrix */
__global__ static void compute_result(int *A) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < SIZE && col < SIZE) {
int i = row + 1;
int j = col + 1;
if (j == 1 || j % i == 0)
A[row * SIZE + col] = 1;
else
A[row * SIZE + col] = 0;
}
}
/* Verifies that the data in the h_result array is correct */
void verify_result() {
int row, col, i, j, expected;
for (row = 0; row < SIZE; row++) {
for (col = 0; col < SIZE; col++) {
i = row + 1;
j = col + 1;
expected = (j == 1 || j % i == 0);
check(row, col, expected);
}
}
}
/* Exits with an error message iff h_result[row * SIZE + col] != expected */
void check(int row, int col, int expected) {
if (h_result[row * SIZE + col] != expected) {
fprintf(stderr, "Row %d column %d is incorrect.\n", row, col);
fprintf(stderr, " Should be: %d\n", expected);
fprintf(stderr, " Is actually: %d\n", h_result[row * SIZE + col]);
exit(EXIT_FAILURE);
}
}
|
b974c387551a9487fd89ef159806bd557f9ff6ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
CUDA program.
VectorAddition.cu
Date : 28/08/2020
*/
// header
#include<iostream>
#include"../../Include/book.h"
#define N 10
__global__ void add(int *a, int *b, int *c)
{
// declaration of variables
int tid; // for thread id
// code
tid = blockIdx.x;
if(tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
int main(void)
{
// declaration of variables
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// code
// allocate the memory on GPU
HANDLE_ERROR(hipMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(hipMalloc((void**)&dev_c, N * sizeof(int)));
// fill the array a and b
for(int i = 0 ; i < N ; i++)
{
a[i] = i;
b[i] = i * i;
}
// copy the arrays a and b to the GPU
HANDLE_ERROR( hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice) );
HANDLE_ERROR( hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( add), dim3(N),dim3(1), 0, 0, dev_a,dev_b,dev_c);
// copy c array from GPU to host
HANDLE_ERROR( hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost) );
for(int i = 0 ; i < N; i++)
{
printf("%d + %d = %d \n",a[i], b[i], c[i]);
}
// free the device memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return(0);
}
| b974c387551a9487fd89ef159806bd557f9ff6ec.cu | /*
CUDA program.
VectorAddition.cu
Date : 28/08/2020
*/
// header
#include<iostream>
#include"../../Include/book.h"
#define N 10
__global__ void add(int *a, int *b, int *c)
{
// declaration of variables
int tid; // for thread id
// code
tid = blockIdx.x;
if(tid < N)
{
c[tid] = a[tid] + b[tid];
}
}
int main(void)
{
// declaration of variables
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
// code
// allocate the memory on GPU
HANDLE_ERROR(cudaMalloc((void**)&dev_a, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_b, N * sizeof(int)));
HANDLE_ERROR(cudaMalloc((void**)&dev_c, N * sizeof(int)));
// fill the array a and b
for(int i = 0 ; i < N ; i++)
{
a[i] = i;
b[i] = i * i;
}
// copy the arrays a and b to the GPU
HANDLE_ERROR( cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice) );
add<<<N,1>>>(dev_a,dev_b,dev_c);
// copy c array from GPU to host
HANDLE_ERROR( cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost) );
for(int i = 0 ; i < N; i++)
{
printf("%d + %d = %d \n",a[i], b[i], c[i]);
}
// free the device memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return(0);
}
|
05ac5190900970ad81f008dec28323897b1278ca.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cudaSBilinearTF_Forward_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int outputWidth = 1;
unsigned int outputHeight = 1;
unsigned int nbChannels = 1;
unsigned int batchSize = 1;
unsigned int inputWidth = 1;
unsigned int inputHeight = 1;
const unsigned int *yLowIdx = NULL;
hipMalloc(&yLowIdx, XSIZE*YSIZE);
const unsigned int *yHighIdx = NULL;
hipMalloc(&yHighIdx, XSIZE*YSIZE);
const float *yInter = NULL;
hipMalloc(&yInter, XSIZE*YSIZE);
const unsigned int *xLowIdx = NULL;
hipMalloc(&xLowIdx, XSIZE*YSIZE);
const unsigned int *xHighIdx = NULL;
hipMalloc(&xHighIdx, XSIZE*YSIZE);
const float *xInter = NULL;
hipMalloc(&xInter, XSIZE*YSIZE);
const float *input = NULL;
hipMalloc(&input, XSIZE*YSIZE);
float *outputs = NULL;
hipMalloc(&outputs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cudaSBilinearTF_Forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outputWidth,outputHeight,nbChannels,batchSize,inputWidth,inputHeight,yLowIdx,yHighIdx,yInter,xLowIdx,xHighIdx,xInter,input,outputs);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cudaSBilinearTF_Forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outputWidth,outputHeight,nbChannels,batchSize,inputWidth,inputHeight,yLowIdx,yHighIdx,yInter,xLowIdx,xHighIdx,xInter,input,outputs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cudaSBilinearTF_Forward_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, outputWidth,outputHeight,nbChannels,batchSize,inputWidth,inputHeight,yLowIdx,yHighIdx,yInter,xLowIdx,xHighIdx,xInter,input,outputs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 05ac5190900970ad81f008dec28323897b1278ca.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cudaSBilinearTF_Forward_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int outputWidth = 1;
unsigned int outputHeight = 1;
unsigned int nbChannels = 1;
unsigned int batchSize = 1;
unsigned int inputWidth = 1;
unsigned int inputHeight = 1;
const unsigned int *yLowIdx = NULL;
cudaMalloc(&yLowIdx, XSIZE*YSIZE);
const unsigned int *yHighIdx = NULL;
cudaMalloc(&yHighIdx, XSIZE*YSIZE);
const float *yInter = NULL;
cudaMalloc(&yInter, XSIZE*YSIZE);
const unsigned int *xLowIdx = NULL;
cudaMalloc(&xLowIdx, XSIZE*YSIZE);
const unsigned int *xHighIdx = NULL;
cudaMalloc(&xHighIdx, XSIZE*YSIZE);
const float *xInter = NULL;
cudaMalloc(&xInter, XSIZE*YSIZE);
const float *input = NULL;
cudaMalloc(&input, XSIZE*YSIZE);
float *outputs = NULL;
cudaMalloc(&outputs, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cudaSBilinearTF_Forward_kernel<<<gridBlock,threadBlock>>>(outputWidth,outputHeight,nbChannels,batchSize,inputWidth,inputHeight,yLowIdx,yHighIdx,yInter,xLowIdx,xHighIdx,xInter,input,outputs);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cudaSBilinearTF_Forward_kernel<<<gridBlock,threadBlock>>>(outputWidth,outputHeight,nbChannels,batchSize,inputWidth,inputHeight,yLowIdx,yHighIdx,yInter,xLowIdx,xHighIdx,xInter,input,outputs);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cudaSBilinearTF_Forward_kernel<<<gridBlock,threadBlock>>>(outputWidth,outputHeight,nbChannels,batchSize,inputWidth,inputHeight,yLowIdx,yHighIdx,yInter,xLowIdx,xHighIdx,xInter,input,outputs);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
eafb0d8a829d31cabe8048636088b64abea97f5f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Builder.h"
static __global__ void setup_kernel (hiprandState_t * state,
unsigned long seed,
const int maxI )
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
if (id >= maxI) return;
hiprand_init ( seed, id, 0, &state[id] );
}
static __global__ void generate(hiprandState_t* globalState,
float* data,
const float* mean,
const float dev,
const int maxI,
const int dimension)
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
if (id >= maxI) return;
hiprandState_t localState = globalState[id];
float value = hiprand_normal ( &localState );
globalState[id] = localState;
data[id] = value*dev + mean[id % dimension];
}
static __global__ void generateUniform(hiprandState_t* globalState,
float* data,
const float* boundingBox,
const int maxI,
const int dimension)
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
if (id >= maxI) return;
int dim = id % dimension;
hiprandState_t localState = globalState[id];
float value = hiprand_uniform ( &localState );
globalState[id] = localState;
data[id] = value*(boundingBox[dimension + dim] - boundingBox[dim]) + boundingBox[dim];
}
static __global__ void setLabels (int* labels)
{
int cluster = blockIdx.x;
int point = threadIdx.x;
labels[point + cluster*blockDim.x] = cluster;
}
void GenerateSingleCluster (hiprandState_t* states,
const int seed,
const int dimension,
float* mean,
const float dev,
const int Npoints,
float* data,
bool* initSetting)
{
const int kernelsPerBlock = 512;
const int blocks = Npoints*dimension/kernelsPerBlock + 1;
if (!(*initSetting))
{
hipLaunchKernelGGL(( setup_kernel) , dim3(blocks), dim3(kernelsPerBlock), 0, 0, states, seed, Npoints*dimension);
*initSetting = true;
}
hipLaunchKernelGGL(( generate) , dim3(blocks), dim3(kernelsPerBlock), 0, 0, states, data, mean, dev, Npoints*dimension, dimension);
}
void GenerateUniformBox (const int seed,
const int dimension,
float* boundingBox,
const int Npoints,
float* data)
{
hiprandState_t* states = nullptr;
CC(hipMalloc (&states, Npoints*dimension*sizeof (hiprandState_t)));
const int kernelsPerBlock = 512;
const int blocks = Npoints*dimension/kernelsPerBlock + 1;
hipLaunchKernelGGL(( setup_kernel) , dim3(blocks), dim3(kernelsPerBlock), 0, 0, states, seed, Npoints*dimension);
hipLaunchKernelGGL(( generateUniform) , dim3(blocks), dim3(kernelsPerBlock), 0, 0, states,
data, boundingBox,
Npoints*dimension,
dimension);
CC(hipFree (states));
}
void GenerateDatasetGaussian (const int seed,
const int Npoints,
const int Nclusters,
const int dimension,
float* data,
int* labels,
bool shuffle,
const float stddev)
{
srand (seed);
hiprandState_t* states = nullptr;
CC(hipMalloc (&states, Npoints*dimension*sizeof (hiprandState_t)));
const float dev = (stddev < 0.001f) ? 0.2f/Nclusters : stddev;
bool initSetting = false;
for (int cluster = 0; cluster < Nclusters; cluster++)
{
thrust::host_vector<float> mean (dimension, 0.0f);
for (int d = 0; d < dimension; d++)
mean[d] = ((rand()*1.0f) /RAND_MAX) * (1-6*dev) + 3*dev;
thrust::device_vector<float> meanD (mean);
GenerateSingleCluster (states,
seed,
dimension,
meanD.data().get(),
dev, Npoints,
data + cluster*Npoints*dimension,
&initSetting);
}
hipLaunchKernelGGL(( setLabels) , dim3(Nclusters), dim3(Npoints), 0, 0, labels);
if (shuffle)
{
thrust::device_vector<float> swapPoint (dimension);
float* swapPtr = swapPoint.data().get();
float swapLabel = 0.0f;
for (int s = 0; s < Npoints*Nclusters; s++)
{
int x = rand () % Npoints*Nclusters;
int y = s;//rand () % Npoints*Nclusters;
if (x == y) continue;
CC(hipMemcpy (swapPtr,
data+x*dimension,
sizeof(float)*dimension,
hipMemcpyDeviceToDevice));
CC(hipMemcpy (data+x*dimension,
data+y*dimension,
sizeof(float)*dimension,
hipMemcpyDeviceToDevice));
CC(hipMemcpy (data+y*dimension,
swapPtr,
sizeof(float)*dimension,
hipMemcpyDeviceToDevice));
CC(hipMemcpy (&swapLabel,
labels+x,
sizeof(float),
hipMemcpyDeviceToHost));
CC(hipMemcpy (labels + x,
labels + y,
sizeof(float),
hipMemcpyDeviceToDevice));
CC(hipMemcpy (labels + y,
&swapLabel,
sizeof(float),
hipMemcpyHostToDevice));
}
}
CC(hipFree (states));
}
| eafb0d8a829d31cabe8048636088b64abea97f5f.cu | #include "Builder.h"
static __global__ void setup_kernel (curandState * state,
unsigned long seed,
const int maxI )
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
if (id >= maxI) return;
curand_init ( seed, id, 0, &state[id] );
}
static __global__ void generate(curandState* globalState,
float* data,
const float* mean,
const float dev,
const int maxI,
const int dimension)
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
if (id >= maxI) return;
curandState localState = globalState[id];
float value = curand_normal ( &localState );
globalState[id] = localState;
data[id] = value*dev + mean[id % dimension];
}
static __global__ void generateUniform(curandState* globalState,
float* data,
const float* boundingBox,
const int maxI,
const int dimension)
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
if (id >= maxI) return;
int dim = id % dimension;
curandState localState = globalState[id];
float value = curand_uniform ( &localState );
globalState[id] = localState;
data[id] = value*(boundingBox[dimension + dim] - boundingBox[dim]) + boundingBox[dim];
}
static __global__ void setLabels (int* labels)
{
int cluster = blockIdx.x;
int point = threadIdx.x;
labels[point + cluster*blockDim.x] = cluster;
}
void GenerateSingleCluster (curandState* states,
const int seed,
const int dimension,
float* mean,
const float dev,
const int Npoints,
float* data,
bool* initSetting)
{
const int kernelsPerBlock = 512;
const int blocks = Npoints*dimension/kernelsPerBlock + 1;
if (!(*initSetting))
{
setup_kernel <<<blocks, kernelsPerBlock>>> (states, seed, Npoints*dimension);
*initSetting = true;
}
generate <<<blocks, kernelsPerBlock>>> (states, data, mean, dev, Npoints*dimension, dimension);
}
void GenerateUniformBox (const int seed,
const int dimension,
float* boundingBox,
const int Npoints,
float* data)
{
curandState* states = nullptr;
CC(cudaMalloc (&states, Npoints*dimension*sizeof (curandState)));
const int kernelsPerBlock = 512;
const int blocks = Npoints*dimension/kernelsPerBlock + 1;
setup_kernel <<<blocks, kernelsPerBlock>>> (states, seed, Npoints*dimension);
generateUniform <<<blocks, kernelsPerBlock>>> (states,
data, boundingBox,
Npoints*dimension,
dimension);
CC(cudaFree (states));
}
void GenerateDatasetGaussian (const int seed,
const int Npoints,
const int Nclusters,
const int dimension,
float* data,
int* labels,
bool shuffle,
const float stddev)
{
srand (seed);
curandState* states = nullptr;
CC(cudaMalloc (&states, Npoints*dimension*sizeof (curandState)));
const float dev = (stddev < 0.001f) ? 0.2f/Nclusters : stddev;
bool initSetting = false;
for (int cluster = 0; cluster < Nclusters; cluster++)
{
thrust::host_vector<float> mean (dimension, 0.0f);
for (int d = 0; d < dimension; d++)
mean[d] = ((rand()*1.0f) /RAND_MAX) * (1-6*dev) + 3*dev;
thrust::device_vector<float> meanD (mean);
GenerateSingleCluster (states,
seed,
dimension,
meanD.data().get(),
dev, Npoints,
data + cluster*Npoints*dimension,
&initSetting);
}
setLabels <<<Nclusters, Npoints>>> (labels);
if (shuffle)
{
thrust::device_vector<float> swapPoint (dimension);
float* swapPtr = swapPoint.data().get();
float swapLabel = 0.0f;
for (int s = 0; s < Npoints*Nclusters; s++)
{
int x = rand () % Npoints*Nclusters;
int y = s;//rand () % Npoints*Nclusters;
if (x == y) continue;
CC(cudaMemcpy (swapPtr,
data+x*dimension,
sizeof(float)*dimension,
cudaMemcpyDeviceToDevice));
CC(cudaMemcpy (data+x*dimension,
data+y*dimension,
sizeof(float)*dimension,
cudaMemcpyDeviceToDevice));
CC(cudaMemcpy (data+y*dimension,
swapPtr,
sizeof(float)*dimension,
cudaMemcpyDeviceToDevice));
CC(cudaMemcpy (&swapLabel,
labels+x,
sizeof(float),
cudaMemcpyDeviceToHost));
CC(cudaMemcpy (labels + x,
labels + y,
sizeof(float),
cudaMemcpyDeviceToDevice));
CC(cudaMemcpy (labels + y,
&swapLabel,
sizeof(float),
cudaMemcpyHostToDevice));
}
}
CC(cudaFree (states));
}
|
132bd607ee27545159283fa89eb31e70fbf78d2f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#include <mpfr.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <math.h>
using namespace std;
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
__constant__ double dcap, dxr, dyr, dzr, dsp;
void readtemp(double *vtp, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float tpv;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fprintf(stderr, "error: not enough lines in file\n");
if ((sscanf(str, "%f", &tpv) != 1))
fprintf(stderr, "error: invalid file format\n");
vtp[i*grid_cols+j] = (double)tpv;
}
fclose(fp);
}
void readpower(double *vpr, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float pwv;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fprintf(stderr, "error: not enough lines in file\n");
if ((sscanf(str, "%f", &pwv) != 1))
fprintf(stderr, "error: invalid file format\n");
vpr[i*grid_cols+j] = (double)pwv;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration,
double *pwer,
double *temp_src,
double *temp_dst,
int grid_cols,
int grid_rows,
int border_cols,
int border_rows){
__shared__ double temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double temp_t[BLOCK_SIZE][BLOCK_SIZE];
double amb_temp = (double)80.0;
double step_div_Cap;
double Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=dsp/dcap;
Rx_1=1/dxr;
Ry_1=1/dyr;
Rz_1=1/dzr;
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;
int small_block_cols = BLOCK_SIZE-iteration*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index];
power_on_cuda[ty][tx] = pwer[index];
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed)
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
int size;
int grid_rows,grid_cols;
//char *tfile, *pfile, *ofile;
char *tfile, *pfile;
struct timeval start_t;
struct timeval end_t;
struct timeval skt_t;
struct timeval ske_t;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
int ret;
if (argc != 6)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||(grid_cols = atoi(argv[1]))<=0||(pyramid_height = atoi(argv[2]))<=0||(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
double *FilesavingTemp = new double[size];
double *FilesavingPower = new double[size];
double *MatrixOut = new double[size];
for (int i=0; i<size; i++)
MatrixOut[i] = (double)0.0;
readtemp(FilesavingTemp, grid_rows, grid_cols, tfile);
readpower(FilesavingPower, grid_rows, grid_cols, pfile);
double t_chip = 0.0005;
double chip_height = 0.016;
double chip_width = 0.016;
double grid_height = chip_height / grid_rows;
double grid_width = chip_width / grid_cols;
double max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
double Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
double Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
double Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
double Rz = t_chip / (K_SI * grid_height * grid_width);
double step = PRECISION / max_slope;
int ts;
int src = 1, dst = 0;
int dviter;
int temp;
gettimeofday(&start_t,0L);
hipMemcpyToSymbol (&dcap, &Cap, sizeof(double));
hipMemcpyToSymbol (&dxr, &Rx, sizeof(double));
hipMemcpyToSymbol (&dyr, &Ry, sizeof(double));
hipMemcpyToSymbol (&dzr, &Rz, sizeof(double));
hipMemcpyToSymbol (&dsp, &step, sizeof(double));
double *MatrixTemp[2], *MatrixPower;
hipMalloc((void**)&MatrixTemp[0], sizeof(double)*size);
hipMalloc((void**)&MatrixTemp[1], sizeof(double)*size);
hipMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(double)*size, hipMemcpyHostToDevice);
hipMemcpy(MatrixTemp[1], MatrixOut, sizeof(double)*size, hipMemcpyHostToDevice);
hipMalloc((void**)&MatrixPower, sizeof(double)*size);
hipMemcpy(MatrixPower, FilesavingPower, sizeof(double)*size, hipMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
gettimeofday(&skt_t,0L);
for (ts = 0; ts < total_iterations; ts=ts+pyramid_height) {
temp = src;
src = dst;
dst = temp;
dviter = MIN(pyramid_height, total_iterations-ts);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
hipLaunchKernelGGL(( calculate_temp), dim3(dimGrid), dim3(dimBlock), 0, 0, dviter, MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
grid_cols, grid_rows,borderCols, borderRows);
}
hipDeviceSynchronize();
ret = dst;
gettimeofday(&ske_t,0L);
printf("Ending simulation\n");
hipMemcpy(MatrixOut, MatrixTemp[ret], sizeof(double)*size, hipMemcpyDeviceToHost);
gettimeofday(&end_t,0L);
std::cout << "time: " << ((end_t.tv_sec + end_t.tv_usec*1e-6) - (start_t.tv_sec + start_t.tv_usec*1e-6)) << "\n";
std::cout <<"kernel: " << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6) << endl;
hipFree(MatrixPower);
hipFree(MatrixTemp[0]);
hipFree(MatrixTemp[1]);
free(MatrixOut);
return EXIT_SUCCESS;
}
| 132bd607ee27545159283fa89eb31e70fbf78d2f.cu | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#include <mpfr.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <math.h>
using namespace std;
#ifdef RD_WG_SIZE_0_0
#define BLOCK_SIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define BLOCK_SIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE RD_WG_SIZE
#else
#define BLOCK_SIZE 16
#endif
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
#define pin_stats_reset() startCycle()
#define pin_stats_pause(cycles) stopCycle(cycles)
#define pin_stats_dump(cycles) printf("timer: %Lu\n", cycles)
__constant__ double dcap, dxr, dyr, dzr, dsp;
void readtemp(double *vtp, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float tpv;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fprintf(stderr, "error: not enough lines in file\n");
if ((sscanf(str, "%f", &tpv) != 1))
fprintf(stderr, "error: invalid file format\n");
vtp[i*grid_cols+j] = (double)tpv;
}
fclose(fp);
}
void readpower(double *vpr, int grid_rows, int grid_cols, char *file){
int i,j;
FILE *fp;
char str[STR_SIZE];
float pwv;
if( (fp = fopen(file, "r" )) ==0 )
printf( "The file was not opened\n" );
for (i=0; i <= grid_rows-1; i++)
for (j=0; j <= grid_cols-1; j++)
{
fgets(str, STR_SIZE, fp);
if (feof(fp))
fprintf(stderr, "error: not enough lines in file\n");
if ((sscanf(str, "%f", &pwv) != 1))
fprintf(stderr, "error: invalid file format\n");
vpr[i*grid_cols+j] = (double)pwv;
}
fclose(fp);
}
#define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max))
#define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x )
#define MIN(a, b) ((a)<=(b) ? (a) : (b))
__global__ void calculate_temp(int iteration,
double *pwer,
double *temp_src,
double *temp_dst,
int grid_cols,
int grid_rows,
int border_cols,
int border_rows){
__shared__ double temp_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double power_on_cuda[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double temp_t[BLOCK_SIZE][BLOCK_SIZE];
double amb_temp = (double)80.0;
double step_div_Cap;
double Rx_1,Ry_1,Rz_1;
int bx = blockIdx.x;
int by = blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
step_div_Cap=dsp/dcap;
Rx_1=1/dxr;
Ry_1=1/dyr;
Rz_1=1/dzr;
// calculate the small block size
int small_block_rows = BLOCK_SIZE-iteration*2;
int small_block_cols = BLOCK_SIZE-iteration*2;
// calculate the boundary for the block according to
// the boundary of its small block
int blkY = small_block_rows*by-border_rows;
int blkX = small_block_cols*bx-border_cols;
int blkYmax = blkY+BLOCK_SIZE-1;
int blkXmax = blkX+BLOCK_SIZE-1;
// calculate the global thread coordination
int yidx = blkY+ty;
int xidx = blkX+tx;
// load data if it is within the valid input range
int loadYidx=yidx, loadXidx=xidx;
int index = grid_cols*loadYidx+loadXidx;
if(IN_RANGE(loadYidx, 0, grid_rows-1) && IN_RANGE(loadXidx, 0, grid_cols-1)){
temp_on_cuda[ty][tx] = temp_src[index];
power_on_cuda[ty][tx] = pwer[index];
}
__syncthreads();
// effective range within this block that falls within
// the valid range of the input data
// used to rule out computation outside the boundary.
int validYmin = (blkY < 0) ? -blkY : 0;
int validYmax = (blkYmax > grid_rows-1) ? BLOCK_SIZE-1-(blkYmax-grid_rows+1) : BLOCK_SIZE-1;
int validXmin = (blkX < 0) ? -blkX : 0;
int validXmax = (blkXmax > grid_cols-1) ? BLOCK_SIZE-1-(blkXmax-grid_cols+1) : BLOCK_SIZE-1;
int N = ty-1;
int S = ty+1;
int W = tx-1;
int E = tx+1;
N = (N < validYmin) ? validYmin : N;
S = (S > validYmax) ? validYmax : S;
W = (W < validXmin) ? validXmin : W;
E = (E > validXmax) ? validXmax : E;
bool computed;
for (int i=0; i<iteration ; i++){
computed = false;
if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(ty, i+1, BLOCK_SIZE-i-2) && \
IN_RANGE(tx, validXmin, validXmax) && \
IN_RANGE(ty, validYmin, validYmax) ) {
computed = true;
temp_t[ty][tx] = temp_on_cuda[ty][tx] + step_div_Cap * (power_on_cuda[ty][tx] +
(temp_on_cuda[S][tx] + temp_on_cuda[N][tx] - 2.0*temp_on_cuda[ty][tx]) * Ry_1 +
(temp_on_cuda[ty][E] + temp_on_cuda[ty][W] - 2.0*temp_on_cuda[ty][tx]) * Rx_1 +
(amb_temp - temp_on_cuda[ty][tx]) * Rz_1);
}
__syncthreads();
if(i==iteration-1)
break;
if(computed)
temp_on_cuda[ty][tx]= temp_t[ty][tx];
__syncthreads();
}
// update the global memory
// after the last iteration, only threads coordinated within the
// small block perform the calculation and switch on ``computed''
if (computed){
temp_dst[index]= temp_t[ty][tx];
}
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <grid_rows/grid_cols> <pyramid_height> <sim_time> <temp_file> <power_file> <output_file>\n", argv[0]);
fprintf(stderr, "\t<grid_rows/grid_cols> - number of rows/cols in the grid (positive integer)\n");
fprintf(stderr, "\t<pyramid_height> - pyramid heigh(positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr, "\t<power_file> - name of the file containing the dissipated power values of each cell\n");
fprintf(stderr, "\t<output_file> - name of the output file\n");
exit(1);
}
int main(int argc, char** argv)
{
printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE);
int size;
int grid_rows,grid_cols;
//char *tfile, *pfile, *ofile;
char *tfile, *pfile;
struct timeval start_t;
struct timeval end_t;
struct timeval skt_t;
struct timeval ske_t;
int total_iterations = 60;
int pyramid_height = 1; // number of iterations
int ret;
if (argc != 6)
usage(argc, argv);
if((grid_rows = atoi(argv[1]))<=0||(grid_cols = atoi(argv[1]))<=0||(pyramid_height = atoi(argv[2]))<=0||(total_iterations = atoi(argv[3]))<=0)
usage(argc, argv);
tfile=argv[4];
pfile=argv[5];
size=grid_rows*grid_cols;
/* --------------- pyramid parameters --------------- */
# define EXPAND_RATE 2
int borderCols = (pyramid_height)*EXPAND_RATE/2;
int borderRows = (pyramid_height)*EXPAND_RATE/2;
int smallBlockCol = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int smallBlockRow = BLOCK_SIZE-(pyramid_height)*EXPAND_RATE;
int blockCols = grid_cols/smallBlockCol+((grid_cols%smallBlockCol==0)?0:1);
int blockRows = grid_rows/smallBlockRow+((grid_rows%smallBlockRow==0)?0:1);
double *FilesavingTemp = new double[size];
double *FilesavingPower = new double[size];
double *MatrixOut = new double[size];
for (int i=0; i<size; i++)
MatrixOut[i] = (double)0.0;
readtemp(FilesavingTemp, grid_rows, grid_cols, tfile);
readpower(FilesavingPower, grid_rows, grid_cols, pfile);
double t_chip = 0.0005;
double chip_height = 0.016;
double chip_width = 0.016;
double grid_height = chip_height / grid_rows;
double grid_width = chip_width / grid_cols;
double max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
double Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
double Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
double Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
double Rz = t_chip / (K_SI * grid_height * grid_width);
double step = PRECISION / max_slope;
int ts;
int src = 1, dst = 0;
int dviter;
int temp;
gettimeofday(&start_t,0L);
cudaMemcpyToSymbol (&dcap, &Cap, sizeof(double));
cudaMemcpyToSymbol (&dxr, &Rx, sizeof(double));
cudaMemcpyToSymbol (&dyr, &Ry, sizeof(double));
cudaMemcpyToSymbol (&dzr, &Rz, sizeof(double));
cudaMemcpyToSymbol (&dsp, &step, sizeof(double));
double *MatrixTemp[2], *MatrixPower;
cudaMalloc((void**)&MatrixTemp[0], sizeof(double)*size);
cudaMalloc((void**)&MatrixTemp[1], sizeof(double)*size);
cudaMemcpy(MatrixTemp[0], FilesavingTemp, sizeof(double)*size, cudaMemcpyHostToDevice);
cudaMemcpy(MatrixTemp[1], MatrixOut, sizeof(double)*size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&MatrixPower, sizeof(double)*size);
cudaMemcpy(MatrixPower, FilesavingPower, sizeof(double)*size, cudaMemcpyHostToDevice);
printf("Start computing the transient temperature\n");
gettimeofday(&skt_t,0L);
for (ts = 0; ts < total_iterations; ts=ts+pyramid_height) {
temp = src;
src = dst;
dst = temp;
dviter = MIN(pyramid_height, total_iterations-ts);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(blockCols, blockRows);
calculate_temp<<<dimGrid, dimBlock>>>(dviter, MatrixPower,MatrixTemp[src],MatrixTemp[dst],\
grid_cols, grid_rows,borderCols, borderRows);
}
cudaThreadSynchronize();
ret = dst;
gettimeofday(&ske_t,0L);
printf("Ending simulation\n");
cudaMemcpy(MatrixOut, MatrixTemp[ret], sizeof(double)*size, cudaMemcpyDeviceToHost);
gettimeofday(&end_t,0L);
std::cout << "time: " << ((end_t.tv_sec + end_t.tv_usec*1e-6) - (start_t.tv_sec + start_t.tv_usec*1e-6)) << "\n";
std::cout <<"kernel: " << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6) << endl;
cudaFree(MatrixPower);
cudaFree(MatrixTemp[0]);
cudaFree(MatrixTemp[1]);
free(MatrixOut);
return EXIT_SUCCESS;
}
|
0eb366000a8b1366e2aae6b5df2cdd7939830d46.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <iostream>
#include <math.h>
#include <layer_kernels.cuh>
#include <layer.cuh>
#include <data.cuh>
#include <util.cuh>
#include <cudaconv2.cuh>
#include <matrix.h>
#include <Python.h>
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) :
_convNet(convNet), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_numGradProducersNext = 0;
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_conserveMem = pyDictGetInt(paramsDict, "conserveMem");
_outputs = _actsTarget < 0 ? new NVMatrix() : NULL;
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL;
}
void Layer::fpropNext(PASS_TYPE passType) {
for (int i = 0; i < _next.size(); i++) {
_next[i]->fprop(passType);
}
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_conserveMem && _actsGradTarget < 0) {
getActsGrad().truncate();
}
if (_conserveMem) {
getActs().truncate();
}
}
void Layer::fprop(PASS_TYPE passType) {
_rcvdFInputs += 1;
if (_rcvdFInputs == _prev.size()) {
NVMatrixV v;
for (int i = 0; i < _prev.size(); i++) {
v.push_back(&_prev[i]->getActs());
}
fprop(v, passType);
}
}
void Layer::fprop(NVMatrix& v, PASS_TYPE passType) {
NVMatrixV vl;
vl.push_back(&v);
fprop(vl, passType);
}
void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) {
assert(v.size() == _prev.size());
_inputs.clear();
_inputs.insert(_inputs.begin(), v.begin(), v.end());
_outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget];
_rcvdFInputs = _prev.size();
for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) {
(*it)->transpose(_trans);
}
getActs().transpose(_trans);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType);
}
// Then add the rest of the inputs to that
for (int i = 0; i < _prev.size(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType);
}
}
fpropNext(passType);
}
void Layer::bprop(PASS_TYPE passType) {
if (_rcvdBInputs == _numGradProducersNext) {
_rcvdBInputs++; // avoid doing bprop computation twice
bprop(getActsGrad(), passType);
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType) {
v.transpose(_trans);
for (int i = 0; i < _prev.size(); i++) {
_prev[i]->getActs().transpose(_trans);
_prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
bpropCommon(v, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer() && _actsGradTarget != i) {
bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[i]->incRcvdBInputs();
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) {
bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[_actsGradTarget]->incRcvdBInputs();
}
}
truncBwdActs();
if (isGradProducer()) {
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer()) {
_prev[i]->bprop(passType);
}
}
}
}
void Layer::reset() {
_rcvdFInputs = 0;
_rcvdBInputs = 0;
}
string& Layer::getName() {
return _name;
}
string& Layer::getType() {
return _type;
}
int Layer::getRcvdFInputs() {
return _rcvdFInputs;
}
int Layer::getRcvdBInputs() {
return _rcvdBInputs;
}
int Layer::incRcvdBInputs() {
return ++_rcvdBInputs;
}
void Layer::addNext(Layer* l) {
_next.push_back(l);
_numGradProducersNext += l->isGradProducer();
}
void Layer::addPrev(Layer* l) {
_prev.push_back(l);
}
void Layer::postInit() {
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers) {
for (int i = 0; i < _prev.size(); i++) {
_gradConsumer |= _prev[i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
vector<Layer*>& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
assert(_outputs != NULL);
return *_outputs;
}
NVMatrix& Layer::getActsGrad() {
assert(_actsGrad != NULL);
return *_actsGrad;
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict)
: Layer(convNet, paramsDict, true) {
_neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron"));
}
void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0);
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->activate(*_inputs[0], getActs());
}
/*
* =======================
* WeightLayer
* =======================
*/
WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) :
Layer(convNet, paramsDict, trans) {
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
float epsB = pyDictGetFloat(paramsDict, "epsB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
for (int i = 0; i < weightSourceLayerIndices.size(); i++) {
int srcLayerIdx = weightSourceLayerIndices[i];
int matrixIdx = weightSourceMatrixIndices[i];
if (srcLayerIdx == convNet->getNumLayers()) { // Current layer
_weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i]));
} else if (srcLayerIdx >= 0) {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights.addWeights(*new Weights(*srcWeights, epsW[i]));
} else {
_weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true);
// Epsilons for finite-difference gradient checking operation
_wStep = 0.001;
_bStep = 0.002;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &epsW;
delete &wc;
if (getName() == "noise" && PyDict_GetItemString(paramsDict, "testWeight") != NULL) {
Matrix& testWeight = *pyDictGetMatrix(paramsDict, "testWeight");
Matrix& testWeightInc = *pyDictGetMatrix(paramsDict, "testWeightInc");
_weights.addWeights(*new Weights(testWeight, testWeightInc,0, 0, 0, false));
}
}
void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
if (_biases->getEps() > 0) {
bpropBiases(v, passType);
}
for (int i = 0; i < _weights.getSize(); i++) {
if (_weights[i].getEps() > 0) {
bpropWeights(v, i, passType);
// Increment its number of updates
_weights[i].incNumUpdates();
}
}
}
void WeightLayer::updateWeights() {
if (getName() == "noise") {
_weights[0].getGrad().maxWithScalar(-0.1);
_weights[0].getGrad().minWithScalar(0.1);
}
_weights.update();
_biases->update();
}
void WeightLayer::copyToCPU() {
_weights.copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights.copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradients() {
for (int i = 0; i < _weights.getSize(); i++) {
_convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]);
}
_convNet->checkGradient(_name + " biases", _bStep, *_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights[idx];
}
void WeightLayer::adjustLearningRate(float factor) {
for (int i = 0; i < _weights.getSize(); i++) {
_weights[i].setEps(factor * _weights[i].getEps());
}
_biases->setEps(factor * _biases->getEps());
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) {
_wStep = 0.1;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (getName() == "noise" && passType == PASS_TEST) {
if (_weights.getSize() > 1) {
getActs().addProduct(*_inputs[inpIdx], *_weights[1], scaleTargets, 1);
} else {
_inputs[inpIdx]->addScalar(0, getActs()); // noise layer does nothing during test
}
return;
}
getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose();
_prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumRows();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 0, 0, scaleBGrad);
}
void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumRows();
NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose();
float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom();
float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
_weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad)
: WeightLayer(convNet, paramsDict, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_randSparse = pyDictGetIntV(paramsDict, "randSparse");
_overSample = pyDictGetIntV(paramsDict, "overSample");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
// It's a vector on the heap to be consistent with all the others...
_filterConns = new vector<FilterConns>();
PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns");
for (int i = 0; i < _randSparse->size(); i++) {
FilterConns fc;
if (_randSparse->at(i)) {
fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i));
}
_filterConns->push_back(fc);
}
}
void LocalLayer::copyToGPU() {
WeightLayer::copyToGPU();
for (int i = 0; i < _prev.size(); i++) {
if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity
hipMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i));
hipMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns,
sizeof(int) * _groups->at(i) * _filterChannels->at(i), hipMemcpyHostToDevice);
cutilCheckMsg("hipMemcpy: failed");
}
}
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) {
_partialSum = pyDictGetInt(paramsDict, "partialSum");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(_biases->getW());
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(_biases->getW());
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad();
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0;
if (_randSparse->at(inpIdx)) {
convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX,
_filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
} else {
convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
}
if (_partialSum > 0) {
scaleTargets = _weights[inpIdx].getNumUpdates() > 0;
_weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad();
convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (_overSample->at(inpIdx) > 1) {
_actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx));
_actGradTmp.sum(0, _prev[inpIdx]->getActsGrad());
_prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols());
}
} else {
convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
if (_conserveMem) {
_weightGradTmp.truncate();
_actGradTmp.truncate();
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases
if (_randSparse->at(inpIdx)) {
localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
} else {
localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& input = *_inputs[0];
NVMatrix& max = input.max(1);
input.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &max;
delete ∑
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg";
if (doLogregGrad) {
NVMatrix& labels = _next[0]->getPrev()[0]->getActs();
float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
} else {
computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1);
}
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0) {
_inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs());
} else {
getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx));
}
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0 ) {
v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad());
} else {
assert(&_prev[inpIdx]->getActsGrad() != &v);
_prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
}
void DataLayer::fprop(PASS_TYPE passType) {
throw string("No dava given!");
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
}
void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) {
_outputs = data[_dataIdx];
fpropNext(passType);
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_channels = pyDictGetInt(paramsDict, "channels");
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) {
string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNet, paramsDict);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNet, paramsDict);
}
throw string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1);
convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
if (_conserveMem) {
_denoms.truncate();
}
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
if (_conserveMem) {
_meanDiffs.truncate();
}
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(PASS_TYPE passType) {
if (_coeff != 0) {
Layer::bprop(passType);
}
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
doublev& v = *new doublev();
v.insert(v.begin(), _costv.begin(), _costv.end());
return v;
}
CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) {
if (type == "cost.logreg") {
return *new LogregCostLayer(convNet, paramsDict);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNet, paramsDict);
}
throw string("Unknown cost layer type ") + type;
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& probs = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax";
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
| 0eb366000a8b1366e2aae6b5df2cdd7939830d46.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutil_inline.h>
#include <iostream>
#include <math.h>
#include <layer_kernels.cuh>
#include <layer.cuh>
#include <data.cuh>
#include <util.cuh>
#include <cudaconv2.cuh>
#include <matrix.h>
#include <Python.h>
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNet* convNet, PyObject* paramsDict, bool trans) :
_convNet(convNet), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_numGradProducersNext = 0;
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_conserveMem = pyDictGetInt(paramsDict, "conserveMem");
_outputs = _actsTarget < 0 ? new NVMatrix() : NULL;
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : NULL;
}
void Layer::fpropNext(PASS_TYPE passType) {
for (int i = 0; i < _next.size(); i++) {
_next[i]->fprop(passType);
}
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_conserveMem && _actsGradTarget < 0) {
getActsGrad().truncate();
}
if (_conserveMem) {
getActs().truncate();
}
}
void Layer::fprop(PASS_TYPE passType) {
_rcvdFInputs += 1;
if (_rcvdFInputs == _prev.size()) {
NVMatrixV v;
for (int i = 0; i < _prev.size(); i++) {
v.push_back(&_prev[i]->getActs());
}
fprop(v, passType);
}
}
void Layer::fprop(NVMatrix& v, PASS_TYPE passType) {
NVMatrixV vl;
vl.push_back(&v);
fprop(vl, passType);
}
void Layer::fprop(NVMatrixV& v, PASS_TYPE passType) {
assert(v.size() == _prev.size());
_inputs.clear();
_inputs.insert(_inputs.begin(), v.begin(), v.end());
_outputs = _actsTarget < 0 ? _outputs : _inputs[_actsTarget];
_rcvdFInputs = _prev.size();
for (NVMatrixV::iterator it = v.begin(); it != v.end(); ++it) {
(*it)->transpose(_trans);
}
getActs().transpose(_trans);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType);
}
// Then add the rest of the inputs to that
for (int i = 0; i < _prev.size(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType);
}
}
fpropNext(passType);
}
void Layer::bprop(PASS_TYPE passType) {
if (_rcvdBInputs == _numGradProducersNext) {
_rcvdBInputs++; // avoid doing bprop computation twice
bprop(getActsGrad(), passType);
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType) {
v.transpose(_trans);
for (int i = 0; i < _prev.size(); i++) {
_prev[i]->getActs().transpose(_trans);
_prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
bpropCommon(v, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer() && _actsGradTarget != i) {
bpropActs(v, i, _prev[i]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[i]->incRcvdBInputs();
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0 && _prev[_actsGradTarget]->isGradConsumer()) {
bpropActs(v, _actsGradTarget, _prev[_actsGradTarget]->getRcvdBInputs() > 0 ? 1 : 0, passType);
_prev[_actsGradTarget]->incRcvdBInputs();
}
}
truncBwdActs();
if (isGradProducer()) {
for (int i = 0; i < _prev.size(); i++) {
if (_prev[i]->isGradConsumer()) {
_prev[i]->bprop(passType);
}
}
}
}
void Layer::reset() {
_rcvdFInputs = 0;
_rcvdBInputs = 0;
}
string& Layer::getName() {
return _name;
}
string& Layer::getType() {
return _type;
}
int Layer::getRcvdFInputs() {
return _rcvdFInputs;
}
int Layer::getRcvdBInputs() {
return _rcvdBInputs;
}
int Layer::incRcvdBInputs() {
return ++_rcvdBInputs;
}
void Layer::addNext(Layer* l) {
_next.push_back(l);
_numGradProducersNext += l->isGradProducer();
}
void Layer::addPrev(Layer* l) {
_prev.push_back(l);
}
void Layer::postInit() {
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
_actsGrad = _actsGradTarget < 0 ? new NVMatrix() : &_prev[_actsGradTarget]->getActsGrad();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers) {
for (int i = 0; i < _prev.size(); i++) {
_gradConsumer |= _prev[i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
vector<Layer*>& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
assert(_outputs != NULL);
return *_outputs;
}
NVMatrix& Layer::getActsGrad() {
assert(_actsGrad != NULL);
return *_actsGrad;
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNet* convNet, PyObject* paramsDict)
: Layer(convNet, paramsDict, true) {
_neuron = &Neuron::makeNeuron(PyDict_GetItemString(paramsDict, "neuron"));
}
void NeuronLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->computeInputGrad(v, _prev[0]->getActsGrad(), scaleTargets > 0);
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_neuron->activate(*_inputs[0], getActs());
}
/*
* =======================
* WeightLayer
* =======================
*/
WeightLayer::WeightLayer(ConvNet* convNet, PyObject* paramsDict, bool trans, bool useGrad) :
Layer(convNet, paramsDict, trans) {
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& epsW = *pyDictGetFloatV(paramsDict, "epsW");
float epsB = pyDictGetFloat(paramsDict, "epsB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
// Source layers for shared weights
intv& weightSourceLayerIndices = *pyDictGetIntV(paramsDict, "weightSourceLayerIndices");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
for (int i = 0; i < weightSourceLayerIndices.size(); i++) {
int srcLayerIdx = weightSourceLayerIndices[i];
int matrixIdx = weightSourceMatrixIndices[i];
if (srcLayerIdx == convNet->getNumLayers()) { // Current layer
_weights.addWeights(*new Weights(_weights[matrixIdx], epsW[i]));
} else if (srcLayerIdx >= 0) {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNet->getLayer(srcLayerIdx));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights.addWeights(*new Weights(*srcWeights, epsW[i]));
} else {
_weights.addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], epsW[i], wc[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, epsB, 0, momB, true);
// Epsilons for finite-difference gradient checking operation
_wStep = 0.001;
_bStep = 0.002;
delete &weightSourceLayerIndices;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &epsW;
delete &wc;
if (getName() == "noise" && PyDict_GetItemString(paramsDict, "testWeight") != NULL) {
Matrix& testWeight = *pyDictGetMatrix(paramsDict, "testWeight");
Matrix& testWeightInc = *pyDictGetMatrix(paramsDict, "testWeightInc");
_weights.addWeights(*new Weights(testWeight, testWeightInc,0, 0, 0, false));
}
}
void WeightLayer::bpropCommon(NVMatrix& v, PASS_TYPE passType) {
if (_biases->getEps() > 0) {
bpropBiases(v, passType);
}
for (int i = 0; i < _weights.getSize(); i++) {
if (_weights[i].getEps() > 0) {
bpropWeights(v, i, passType);
// Increment its number of updates
_weights[i].incNumUpdates();
}
}
}
void WeightLayer::updateWeights() {
if (getName() == "noise") {
_weights[0].getGrad().maxWithScalar(-0.1);
_weights[0].getGrad().minWithScalar(0.1);
}
_weights.update();
_biases->update();
}
void WeightLayer::copyToCPU() {
_weights.copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights.copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradients() {
for (int i = 0; i < _weights.getSize(); i++) {
_convNet->checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights[i]);
}
_convNet->checkGradient(_name + " biases", _bStep, *_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights[idx];
}
void WeightLayer::adjustLearningRate(float factor) {
for (int i = 0; i < _weights.getSize(); i++) {
_weights[i].setEps(factor * _weights[i].getEps());
}
_biases->setEps(factor * _biases->getEps());
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNet* convNet, PyObject* paramsDict) : WeightLayer(convNet, paramsDict, true, false) {
_wStep = 0.1;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (getName() == "noise" && passType == PASS_TEST) {
if (_weights.getSize() > 1) {
getActs().addProduct(*_inputs[inpIdx], *_weights[1], scaleTargets, 1);
} else {
_inputs[inpIdx]->addScalar(0, getActs()); // noise layer does nothing during test
}
return;
}
getActs().addProduct(*_inputs[inpIdx], *_weights[inpIdx], scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void FCLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = _weights[inpIdx].getW().getTranspose();
_prev[inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumRows();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 0, 0, scaleBGrad);
}
void FCLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumRows();
NVMatrix& prevActs_T = _prev[inpIdx]->getActs().getTranspose();
float scaleInc = (_weights[inpIdx].getNumUpdates() == 0 && passType != PASS_GC) * _weights[inpIdx].getMom();
float scaleGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
_weights[inpIdx].getInc().addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNet* convNet, PyObject* paramsDict, bool useGrad)
: WeightLayer(convNet, paramsDict, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_randSparse = pyDictGetIntV(paramsDict, "randSparse");
_overSample = pyDictGetIntV(paramsDict, "overSample");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
// It's a vector on the heap to be consistent with all the others...
_filterConns = new vector<FilterConns>();
PyObject* pyFilterConns = PyDict_GetItemString(paramsDict, "filterConns");
for (int i = 0; i < _randSparse->size(); i++) {
FilterConns fc;
if (_randSparse->at(i)) {
fc.hFilterConns = getIntA(PyList_GET_ITEM(pyFilterConns, i));
}
_filterConns->push_back(fc);
}
}
void LocalLayer::copyToGPU() {
WeightLayer::copyToGPU();
for (int i = 0; i < _prev.size(); i++) {
if (_randSparse->at(i)) { // Copy to GPU vector that describes sparse random connectivity
cudaMalloc(&_filterConns->at(i).dFilterConns, sizeof(int) * _groups->at(i) * _filterChannels->at(i));
cudaMemcpy(_filterConns->at(i).dFilterConns, _filterConns->at(i).hFilterConns,
sizeof(int) * _groups->at(i) * _filterChannels->at(i), cudaMemcpyHostToDevice);
cutilCheckMsg("cudaMemcpy: failed");
}
}
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, true) {
_partialSum = pyDictGetInt(paramsDict, "partialSum");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
convFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
convFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(_biases->getW());
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(_biases->getW());
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
NVMatrix& tgt = _partialSum > 0 ? _weightGradTmp : _weights[inpIdx].getGrad();
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases;
float scaleTargets = _weights[inpIdx].getNumUpdates() > 0 && _partialSum == 0; // ? 1 : 0;
if (_randSparse->at(inpIdx)) {
convWeightActsSparse(_prev[inpIdx]->getActs(), v, tgt, _filterConns->at(inpIdx).dFilterConns, _imgSize->at(inpIdx), _modulesX, _modulesX,
_filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
} else {
convWeightActs(_prev[inpIdx]->getActs(), v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _partialSum, scaleTargets, scaleWGrad);
}
if (_partialSum > 0) {
scaleTargets = _weights[inpIdx].getNumUpdates() > 0;
_weightGradTmp.reshape(_modules / _partialSum, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights[inpIdx].getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights[inpIdx].getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
NVMatrix& tgt = _overSample->at(inpIdx) > 1 ? _actGradTmp : _prev[inpIdx]->getActsGrad();
convImgActsSparse(v, *_weights[inpIdx], tgt, _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (_overSample->at(inpIdx) > 1) {
_actGradTmp.reshape(_overSample->at(inpIdx), _actGradTmp.getNumElements() / _overSample->at(inpIdx));
_actGradTmp.sum(0, _prev[inpIdx]->getActsGrad());
_prev[inpIdx]->getActsGrad().reshape(_prev[inpIdx]->getActsGrad().getNumElements() / v.getNumCols(), v.getNumCols());
}
} else {
convImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
if (_conserveMem) {
_weightGradTmp.truncate();
_actGradTmp.truncate();
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNet* convNet, PyObject* paramsDict) : LocalLayer(convNet, paramsDict, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localFilterActsSparse(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localFilterActs(*_inputs[inpIdx], *_weights[inpIdx], getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
if (scaleTargets == 0) {
getActs().addVector(_biases->getW());
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleBGrad = passType == PASS_GC ? 1 : _biases->getEps() / numCases;
_biases->getGrad().addSum(v, 1, 0, scaleBGrad);
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int inpIdx, PASS_TYPE passType) {
int numCases = v.getNumCols();
float scaleInc = (passType != PASS_GC && _weights[inpIdx].getNumUpdates() == 0) * _weights[inpIdx].getMom(); // momentum
float scaleWGrad = passType == PASS_GC ? 1 : _weights[inpIdx].getEps() / numCases; // eps / numCases
if (_randSparse->at(inpIdx)) {
localWeightActsSparse(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx), _stride->at(inpIdx),
_channels->at(inpIdx), _filterChannels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
} else {
localWeightActs(_prev[inpIdx]->getActs(), v, _weights[inpIdx].getInc(), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx),
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (_randSparse->at(inpIdx)) {
localImgActsSparse(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(), _filterConns->at(inpIdx).dFilterConns,
_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX, _padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx),
_filterChannels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
} else {
localImgActs(v, *_weights[inpIdx], _prev[inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, true) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& input = *_inputs[0];
NVMatrix& max = input.max(1);
input.addVector(max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
NVMatrix& sum = getActs().sum(1);
getActs().eltwiseDivideByVector(sum);
delete &max;
delete ∑
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
bool doLogregGrad = _next.size() == 1 && _next[0]->getType() == "cost.logreg";
if (doLogregGrad) {
NVMatrix& labels = _next[0]->getPrev()[0]->getActs();
float gradCoeff = dynamic_cast<CostLayer*>(_next[0])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), _prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
} else {
computeSoftmaxGrad(getActs(), v, _prev[0]->getActsGrad(), scaleTargets == 1);
}
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0) {
_inputs[inpIdx]->scale(_coeffs->at(inpIdx), getActs());
} else {
getActs().add(*_inputs[inpIdx], _coeffs->at(inpIdx));
}
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (scaleTargets == 0 ) {
v.scale(_coeffs->at(inpIdx), _prev[inpIdx]->getActsGrad());
} else {
assert(&_prev[inpIdx]->getActsGrad() != &v);
_prev[inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
}
void DataLayer::fprop(PASS_TYPE passType) {
throw string("No dava given!");
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
}
void DataLayer::fprop(NVMatrixV& data, PASS_TYPE passType) {
_outputs = data[_dataIdx];
fpropNext(passType);
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_channels = pyDictGetInt(paramsDict, "channels");
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::makePoolLayer(ConvNet* convNet, PyObject* paramsDict) {
string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNet, paramsDict);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNet, paramsDict);
}
throw string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler());
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNet* convNet, PyObject* paramsDict) : PoolLayer(convNet, paramsDict, false) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalMaxUndo(_prev[0]->getActs(), v, getActs(), _prev[inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
// This is here just for completeness' sake. Why would you backpropagate
// through a blur filter?
void GaussianBlurLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt1 = _prev[0]->getRcvdBInputs() > 0 ? _actGradsTmp : _prev[0]->getActsGrad();
convGaussianBlur(v, _filter, tgt1, true, _channels, 0, 1);
convGaussianBlur(tgt1, _filter, _prev[0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : Layer(convNet, paramsDict, false) {
_channels = pyDictGetInt(paramsDict, "channels");
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
if (_conserveMem) {
_denoms.truncate();
}
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMap(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, _denoms, _prev[0]->getActs(), getActs(), _prev[0]->getActsGrad(), _channels, _size, _scale, _pow, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNet* convNet, PyObject* paramsDict) : ResponseNormLayer(convNet, paramsDict) {
_imgSize = pyDictGetInt(paramsDict, "imgSize");
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
if (_conserveMem) {
_meanDiffs.truncate();
}
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNet* convNet, PyObject* paramsDict, bool trans)
: Layer(convNet, paramsDict, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(PASS_TYPE passType) {
if (_coeff != 0) {
Layer::bprop(passType);
}
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
doublev& v = *new doublev();
v.insert(v.begin(), _costv.begin(), _costv.end());
return v;
}
CostLayer& CostLayer::makeCostLayer(ConvNet* convNet, string& type, PyObject* paramsDict) {
if (type == "cost.logreg") {
return *new LogregCostLayer(convNet, paramsDict);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNet, paramsDict);
}
throw string("Unknown cost layer type ") + type;
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getNumElements();
NVMatrix& trueLabelLogProbs = getActs(), correctProbs;
computeLogregCost(labels, probs, trueLabelLogProbs, correctProbs);
_costv.clear();
_costv.push_back(-trueLabelLogProbs.sum());
_costv.push_back(numCases - correctProbs.sum());
}
}
void LogregCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
NVMatrix& labels = _prev[0]->getActs();
NVMatrix& probs = _prev[1]->getActs();
NVMatrix& target = _prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = _prev[1]->getNext().size() > 1 || _prev[1]->getType() != "softmax";
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNet* convNet, PyObject* paramsDict) : CostLayer(convNet, paramsDict, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType) {
_inputs[0]->apply(NVMatrixOps::Square(), getActs());
_costv.clear();
_costv.push_back(getActs().sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
|
2051b5f5a59738d9e311714df40ab90ff354bf24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_PLANE_DIM_X 16
#define N_THREADS_PER_PLANE_DIM_Y 16
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const int nz_s_u = 1+2*N_RADIUS;
__shared__ float s_u[nz_s_u][N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM_Y, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM_X, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint sje = (j0+N_THREADS_PER_PLANE_DIM_Y<y4) ? N_THREADS_PER_PLANE_DIM_Y : ((y4-y3-1)%N_THREADS_PER_PLANE_DIM_Y+1);
const llint ske = (k0+N_THREADS_PER_PLANE_DIM_X<z4) ? N_THREADS_PER_PLANE_DIM_X : ((z4-z3-1)%N_THREADS_PER_PLANE_DIM_X+1);
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
// Preparation
for (llint i = x3-N_RADIUS; i < x3+N_RADIUS; i++) {
int z = (i + nz_s_u) % nz_s_u;
s_u[z][suj][suk] = u[IDX3_l(i,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z][threadIdx.y][suk] = u[IDX3_l(i, j - N_RADIUS, k)];
s_u[z][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z][suj][threadIdx.x] = u[IDX3_l(i,j,k - N_RADIUS)];
s_u[z][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i,j,threadIdx.x+ke)];
}
}
// Streaming
for (llint i = x3; i < x4; i++) {
int z = (i + nz_s_u) % nz_s_u;
int z_R = (i + N_RADIUS + nz_s_u) % nz_s_u;
s_u[z_R][suj][suk] = u[IDX3_l(i+N_RADIUS,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z_R][threadIdx.y][suk] = u[IDX3_l(i+N_RADIUS, j - N_RADIUS, k)];
s_u[z_R][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i+N_RADIUS, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z_R][suj][threadIdx.x] = u[IDX3_l(i+N_RADIUS,j,k - N_RADIUS)];
s_u[z_R][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i+N_RADIUS,j,threadIdx.x+ke)];
}
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, s_u[z][suj][suk]
, __fmaf_rn(coefx_1, __fadd_rn(s_u[(z+1)%nz_s_u][suj][suk],s_u[(z-1+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_1, __fadd_rn(s_u[z][suj+1][suk],s_u[z][suj-1][suk])
, __fmaf_rn(coefz_1, __fadd_rn(s_u[z][suj][suk+1],s_u[z][suj][suk-1])
, __fmaf_rn(coefx_2, __fadd_rn(s_u[(z+2)%nz_s_u][suj][suk],s_u[(z-2+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_2, __fadd_rn(s_u[z][suj+2][suk],s_u[z][suj-2][suk])
, __fmaf_rn(coefz_2, __fadd_rn(s_u[z][suj][suk+2],s_u[z][suj][suk-2])
, __fmaf_rn(coefx_3, __fadd_rn(s_u[(z+3)%nz_s_u][suj][suk],s_u[(z-3+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_3, __fadd_rn(s_u[z][suj+3][suk],s_u[z][suj-3][suk])
, __fmaf_rn(coefz_3, __fadd_rn(s_u[z][suj][suk+3],s_u[z][suj][suk-3])
, __fmaf_rn(coefx_4, __fadd_rn(s_u[(z+4)%nz_s_u][suj][suk],s_u[(z-4+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_4, __fadd_rn(s_u[z][suj+4][suk],s_u[z][suj-4][suk])
, __fmul_rn(coefz_4, __fadd_rn(s_u[z][suj][suk+4],s_u[z][suj][suk-4])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, s_u[z][suj][suk],
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
}
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const int nz_s_u = 1+2*N_RADIUS;
__shared__ float s_u[nz_s_u][N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM_Y, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM_X, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint sje = (j0+N_THREADS_PER_PLANE_DIM_Y<y4) ? N_THREADS_PER_PLANE_DIM_Y : ((y4-y3-1)%N_THREADS_PER_PLANE_DIM_Y+1);
const llint ske = (k0+N_THREADS_PER_PLANE_DIM_X<z4) ? N_THREADS_PER_PLANE_DIM_X : ((z4-z3-1)%N_THREADS_PER_PLANE_DIM_X+1);
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
// Preparation
for (llint i = x3-N_RADIUS; i < x3+N_RADIUS; i++) {
int z = (i + nz_s_u) % nz_s_u;
s_u[z][suj][suk] = u[IDX3_l(i,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z][threadIdx.y][suk] = u[IDX3_l(i, j - N_RADIUS, k)];
s_u[z][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z][suj][threadIdx.x] = u[IDX3_l(i,j,k - N_RADIUS)];
s_u[z][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i,j,threadIdx.x+ke)];
}
}
// Streaming
for (llint i = x3; i < x4; i++) {
int z = (i + nz_s_u) % nz_s_u;
int z_R = (i + N_RADIUS + nz_s_u) % nz_s_u;
s_u[z_R][suj][suk] = u[IDX3_l(i+N_RADIUS,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z_R][threadIdx.y][suk] = u[IDX3_l(i+N_RADIUS, j - N_RADIUS, k)];
s_u[z_R][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i+N_RADIUS, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z_R][suj][threadIdx.x] = u[IDX3_l(i+N_RADIUS,j,k - N_RADIUS)];
s_u[z_R][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i+N_RADIUS,j,threadIdx.x+ke)];
}
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, s_u[z][suj][suk]
, __fmaf_rn(coefx_1, __fadd_rn(s_u[(z+1)%nz_s_u][suj][suk],s_u[(z-1+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_1, __fadd_rn(s_u[z][suj+1][suk],s_u[z][suj-1][suk])
, __fmaf_rn(coefz_1, __fadd_rn(s_u[z][suj][suk+1],s_u[z][suj][suk-1])
, __fmaf_rn(coefx_2, __fadd_rn(s_u[(z+2)%nz_s_u][suj][suk],s_u[(z-2+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_2, __fadd_rn(s_u[z][suj+2][suk],s_u[z][suj-2][suk])
, __fmaf_rn(coefz_2, __fadd_rn(s_u[z][suj][suk+2],s_u[z][suj][suk-2])
, __fmaf_rn(coefx_3, __fadd_rn(s_u[(z+3)%nz_s_u][suj][suk],s_u[(z-3+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_3, __fadd_rn(s_u[z][suj+3][suk],s_u[z][suj-3][suk])
, __fmaf_rn(coefz_3, __fadd_rn(s_u[z][suj][suk+3],s_u[z][suj][suk-3])
, __fmaf_rn(coefx_4, __fadd_rn(s_u[(z+4)%nz_s_u][suj][suk],s_u[(z-4+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_4, __fadd_rn(s_u[z][suj+4][suk],s_u[z][suj-4][suk])
, __fmul_rn(coefz_4, __fadd_rn(s_u[z][suj][suk+4],s_u[z][suj][suk-4])
)))))))))))));
const float s_eta_c = eta[IDX3_eta1(i,j,k)];
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, s_eta_c,
__fsub_rn(2.f,
__fmul_rn(s_eta_c, s_eta_c)
)
),
s_u[z][suj][suk],
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, s_eta_c, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]),
__fsub_rn(s_u[(z+1)%nz_s_u][suj][suk], s_u[(z-1+nz_s_u)%nz_s_u][suj][suk])
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(s_u[z][suj+1][suk], s_u[z][suj-1][suk])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(s_u[z][suj][suk+1], s_u[z][suj][suk-1])
),
hdz_2)
))
)
,
__fadd_rn(1.f, s_eta_c)
);
}
}
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
const llint size_u_ext = (nx + 2 * lx)
* ((((ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y + 1) * N_THREADS_PER_PLANE_DIM_Y) + 2 * ly)
* ((((nz+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X + 1) * N_THREADS_PER_PLANE_DIM_X) + 2 * lz);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
hipMalloc(&d_u, sizeof(float) * size_u_ext);
hipMalloc(&d_v, sizeof(float) * size_u_ext);
hipMalloc(&d_vp, sizeof(float) * size_vp);
hipMalloc(&d_phi, sizeof(float) * size_phi);
hipMalloc(&d_eta, sizeof(float) * size_eta);
hipMemcpy(d_u, u, sizeof(float) * size_u, hipMemcpyHostToDevice);
hipMemcpy(d_v, v, sizeof(float) * size_v, hipMemcpyHostToDevice);
hipMemcpy(d_vp, vp, sizeof(float) * size_vp, hipMemcpyHostToDevice);
hipMemcpy(d_phi, phi, sizeof(float) * size_phi, hipMemcpyHostToDevice);
hipMemcpy(d_eta, eta, sizeof(float) * size_eta, hipMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_PER_PLANE_DIM_X, N_THREADS_PER_PLANE_DIM_Y, 1);
int num_streams = 7;
hipStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
hipStreamCreateWithFlags(&(streams[i]), hipStreamNonBlocking);
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_front), dim3(threadsPerBlock), 0, streams[1], nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y2-y1+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_top), dim3(threadsPerBlock), 0, streams[2], nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_left), dim3(threadsPerBlock), 0, streams[3], nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
hipLaunchKernelGGL(( target_inner_3d_kernel), dim3(n_block_center), dim3(threadsPerBlock), 0, streams[0], nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_right), dim3(threadsPerBlock), 0, streams[4], nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y6-y5+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_bottom), dim3(threadsPerBlock), 0, streams[5], nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
hipLaunchKernelGGL(( target_pml_3d_kernel), dim3(n_block_back), dim3(threadsPerBlock), 0, streams[6], nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
hipStreamSynchronize(streams[i]);
}
hipLaunchKernelGGL(( kernel_add_source_kernel), dim3(1), dim3(1), 0, 0, d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
hipStreamDestroy(streams[i]);
}
hipMemcpy(u, d_u, sizeof(float) * size_u, hipMemcpyDeviceToHost);
hipFree(d_u);
hipFree(d_v);
hipFree(d_vp);
hipFree(d_phi);
hipFree(d_eta);
}
| 2051b5f5a59738d9e311714df40ab90ff354bf24.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <float.h>
#include <math.h>
#include <time.h>
#include "../../constants.h"
#define N_RADIUS 4
#define N_THREADS_PER_PLANE_DIM_X 16
#define N_THREADS_PER_PLANE_DIM_Y 16
__global__ void target_inner_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta
) {
const int nz_s_u = 1+2*N_RADIUS;
__shared__ float s_u[nz_s_u][N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM_Y, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM_X, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint sje = (j0+N_THREADS_PER_PLANE_DIM_Y<y4) ? N_THREADS_PER_PLANE_DIM_Y : ((y4-y3-1)%N_THREADS_PER_PLANE_DIM_Y+1);
const llint ske = (k0+N_THREADS_PER_PLANE_DIM_X<z4) ? N_THREADS_PER_PLANE_DIM_X : ((z4-z3-1)%N_THREADS_PER_PLANE_DIM_X+1);
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
// Preparation
for (llint i = x3-N_RADIUS; i < x3+N_RADIUS; i++) {
int z = (i + nz_s_u) % nz_s_u;
s_u[z][suj][suk] = u[IDX3_l(i,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z][threadIdx.y][suk] = u[IDX3_l(i, j - N_RADIUS, k)];
s_u[z][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z][suj][threadIdx.x] = u[IDX3_l(i,j,k - N_RADIUS)];
s_u[z][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i,j,threadIdx.x+ke)];
}
}
// Streaming
for (llint i = x3; i < x4; i++) {
int z = (i + nz_s_u) % nz_s_u;
int z_R = (i + N_RADIUS + nz_s_u) % nz_s_u;
s_u[z_R][suj][suk] = u[IDX3_l(i+N_RADIUS,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z_R][threadIdx.y][suk] = u[IDX3_l(i+N_RADIUS, j - N_RADIUS, k)];
s_u[z_R][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i+N_RADIUS, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z_R][suj][threadIdx.x] = u[IDX3_l(i+N_RADIUS,j,k - N_RADIUS)];
s_u[z_R][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i+N_RADIUS,j,threadIdx.x+ke)];
}
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, s_u[z][suj][suk]
, __fmaf_rn(coefx_1, __fadd_rn(s_u[(z+1)%nz_s_u][suj][suk],s_u[(z-1+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_1, __fadd_rn(s_u[z][suj+1][suk],s_u[z][suj-1][suk])
, __fmaf_rn(coefz_1, __fadd_rn(s_u[z][suj][suk+1],s_u[z][suj][suk-1])
, __fmaf_rn(coefx_2, __fadd_rn(s_u[(z+2)%nz_s_u][suj][suk],s_u[(z-2+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_2, __fadd_rn(s_u[z][suj+2][suk],s_u[z][suj-2][suk])
, __fmaf_rn(coefz_2, __fadd_rn(s_u[z][suj][suk+2],s_u[z][suj][suk-2])
, __fmaf_rn(coefx_3, __fadd_rn(s_u[(z+3)%nz_s_u][suj][suk],s_u[(z-3+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_3, __fadd_rn(s_u[z][suj+3][suk],s_u[z][suj-3][suk])
, __fmaf_rn(coefz_3, __fadd_rn(s_u[z][suj][suk+3],s_u[z][suj][suk-3])
, __fmaf_rn(coefx_4, __fadd_rn(s_u[(z+4)%nz_s_u][suj][suk],s_u[(z-4+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_4, __fadd_rn(s_u[z][suj+4][suk],s_u[z][suj-4][suk])
, __fmul_rn(coefz_4, __fadd_rn(s_u[z][suj][suk+4],s_u[z][suj][suk-4])
)))))))))))));
v[IDX3_l(i,j,k)] = __fmaf_rn(2.f, s_u[z][suj][suk],
__fmaf_rn(vp[IDX3(i,j,k)], lap, -v[IDX3_l(i,j,k)])
);
}
}
}
__global__ void target_pml_3d_kernel(
llint nx, llint ny, llint nz,
llint x3, llint x4, llint y3, llint y4, llint z3, llint z4,
llint lx, llint ly, llint lz,
float hdx_2, float hdy_2, float hdz_2,
float coef0,
float coefx_1, float coefx_2, float coefx_3, float coefx_4,
float coefy_1, float coefy_2, float coefy_3, float coefy_4,
float coefz_1, float coefz_2, float coefz_3, float coefz_4,
const float *__restrict__ u, float *__restrict__ v, const float *__restrict__ vp,
float *__restrict__ phi, const float *__restrict__ eta
) {
const int nz_s_u = 1+2*N_RADIUS;
__shared__ float s_u[nz_s_u][N_THREADS_PER_PLANE_DIM_Y+2*N_RADIUS][N_THREADS_PER_PLANE_DIM_X+2*N_RADIUS];
const llint j0 = y3 + blockIdx.y * blockDim.y;
const llint k0 = z3 + blockIdx.x * blockDim.x;
const llint je = min(j0 + N_THREADS_PER_PLANE_DIM_Y, y4);
const llint ke = min(k0 + N_THREADS_PER_PLANE_DIM_X, z4);
const llint j = j0 + threadIdx.y;
const llint k = k0 + threadIdx.x;
const llint sje = (j0+N_THREADS_PER_PLANE_DIM_Y<y4) ? N_THREADS_PER_PLANE_DIM_Y : ((y4-y3-1)%N_THREADS_PER_PLANE_DIM_Y+1);
const llint ske = (k0+N_THREADS_PER_PLANE_DIM_X<z4) ? N_THREADS_PER_PLANE_DIM_X : ((z4-z3-1)%N_THREADS_PER_PLANE_DIM_X+1);
const llint suj = threadIdx.y + N_RADIUS;
const llint suk = threadIdx.x + N_RADIUS;
// Preparation
for (llint i = x3-N_RADIUS; i < x3+N_RADIUS; i++) {
int z = (i + nz_s_u) % nz_s_u;
s_u[z][suj][suk] = u[IDX3_l(i,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z][threadIdx.y][suk] = u[IDX3_l(i, j - N_RADIUS, k)];
s_u[z][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z][suj][threadIdx.x] = u[IDX3_l(i,j,k - N_RADIUS)];
s_u[z][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i,j,threadIdx.x+ke)];
}
}
// Streaming
for (llint i = x3; i < x4; i++) {
int z = (i + nz_s_u) % nz_s_u;
int z_R = (i + N_RADIUS + nz_s_u) % nz_s_u;
s_u[z_R][suj][suk] = u[IDX3_l(i+N_RADIUS,j,k)];
if (threadIdx.y < N_RADIUS) {
s_u[z_R][threadIdx.y][suk] = u[IDX3_l(i+N_RADIUS, j - N_RADIUS, k)];
s_u[z_R][threadIdx.y+sje+N_RADIUS][suk] = u[IDX3_l(i+N_RADIUS, threadIdx.y+je, k)];
}
if (threadIdx.x < N_RADIUS) {
s_u[z_R][suj][threadIdx.x] = u[IDX3_l(i+N_RADIUS,j,k - N_RADIUS)];
s_u[z_R][suj][threadIdx.x+ske+N_RADIUS] = u[IDX3_l(i+N_RADIUS,j,threadIdx.x+ke)];
}
__syncthreads();
if (j < y4 && k < z4) {
float lap = __fmaf_rn(coef0, s_u[z][suj][suk]
, __fmaf_rn(coefx_1, __fadd_rn(s_u[(z+1)%nz_s_u][suj][suk],s_u[(z-1+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_1, __fadd_rn(s_u[z][suj+1][suk],s_u[z][suj-1][suk])
, __fmaf_rn(coefz_1, __fadd_rn(s_u[z][suj][suk+1],s_u[z][suj][suk-1])
, __fmaf_rn(coefx_2, __fadd_rn(s_u[(z+2)%nz_s_u][suj][suk],s_u[(z-2+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_2, __fadd_rn(s_u[z][suj+2][suk],s_u[z][suj-2][suk])
, __fmaf_rn(coefz_2, __fadd_rn(s_u[z][suj][suk+2],s_u[z][suj][suk-2])
, __fmaf_rn(coefx_3, __fadd_rn(s_u[(z+3)%nz_s_u][suj][suk],s_u[(z-3+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_3, __fadd_rn(s_u[z][suj+3][suk],s_u[z][suj-3][suk])
, __fmaf_rn(coefz_3, __fadd_rn(s_u[z][suj][suk+3],s_u[z][suj][suk-3])
, __fmaf_rn(coefx_4, __fadd_rn(s_u[(z+4)%nz_s_u][suj][suk],s_u[(z-4+nz_s_u)%nz_s_u][suj][suk])
, __fmaf_rn(coefy_4, __fadd_rn(s_u[z][suj+4][suk],s_u[z][suj-4][suk])
, __fmul_rn(coefz_4, __fadd_rn(s_u[z][suj][suk+4],s_u[z][suj][suk-4])
)))))))))))));
const float s_eta_c = eta[IDX3_eta1(i,j,k)];
v[IDX3_l(i,j,k)] = __fdiv_rn(
__fmaf_rn(
__fmaf_rn(2.f, s_eta_c,
__fsub_rn(2.f,
__fmul_rn(s_eta_c, s_eta_c)
)
),
s_u[z][suj][suk],
__fmaf_rn(
vp[IDX3(i,j,k)],
__fadd_rn(lap, phi[IDX3(i,j,k)]),
-v[IDX3_l(i,j,k)]
)
),
__fmaf_rn(2.f, s_eta_c, 1.f)
);
phi[IDX3(i,j,k)] = __fdiv_rn(
__fsub_rn(
phi[IDX3(i,j,k)],
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i+1,j,k)], eta[IDX3_eta1(i-1,j,k)]),
__fsub_rn(s_u[(z+1)%nz_s_u][suj][suk], s_u[(z-1+nz_s_u)%nz_s_u][suj][suk])
), hdx_2,
__fmaf_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j+1,k)], eta[IDX3_eta1(i,j-1,k)]),
__fsub_rn(s_u[z][suj+1][suk], s_u[z][suj-1][suk])
), hdy_2,
__fmul_rn(
__fmul_rn(
__fsub_rn(eta[IDX3_eta1(i,j,k+1)], eta[IDX3_eta1(i,j,k-1)]),
__fsub_rn(s_u[z][suj][suk+1], s_u[z][suj][suk-1])
),
hdz_2)
))
)
,
__fadd_rn(1.f, s_eta_c)
);
}
}
}
__global__ void kernel_add_source_kernel(float *g_u, llint idx, float source) {
g_u[idx] += source;
}
extern "C" void target(
uint nsteps, double *time_kernel,
llint nx, llint ny, llint nz,
llint x1, llint x2, llint x3, llint x4, llint x5, llint x6,
llint y1, llint y2, llint y3, llint y4, llint y5, llint y6,
llint z1, llint z2, llint z3, llint z4, llint z5, llint z6,
llint lx, llint ly, llint lz,
llint sx, llint sy, llint sz,
float hdx_2, float hdy_2, float hdz_2,
const float *__restrict__ coefx, const float *__restrict__ coefy, const float *__restrict__ coefz,
float *__restrict__ u, const float *__restrict__ v, const float *__restrict__ vp,
const float *__restrict__ phi, const float *__restrict__ eta, const float *__restrict__ source
) {
struct timespec start, end;
const llint size_u = (nx + 2 * lx) * (ny + 2 * ly) * (nz + 2 * lz);
const llint size_v = size_u;
const llint size_phi = nx*ny*nz;
const llint size_vp = size_phi;
const llint size_eta = (nx+2)*(ny+2)*(nz+2);
const llint size_u_ext = (nx + 2 * lx)
* ((((ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y + 1) * N_THREADS_PER_PLANE_DIM_Y) + 2 * ly)
* ((((nz+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X + 1) * N_THREADS_PER_PLANE_DIM_X) + 2 * lz);
float *d_u, *d_v, *d_vp, *d_phi, *d_eta;
cudaMalloc(&d_u, sizeof(float) * size_u_ext);
cudaMalloc(&d_v, sizeof(float) * size_u_ext);
cudaMalloc(&d_vp, sizeof(float) * size_vp);
cudaMalloc(&d_phi, sizeof(float) * size_phi);
cudaMalloc(&d_eta, sizeof(float) * size_eta);
cudaMemcpy(d_u, u, sizeof(float) * size_u, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, v, sizeof(float) * size_v, cudaMemcpyHostToDevice);
cudaMemcpy(d_vp, vp, sizeof(float) * size_vp, cudaMemcpyHostToDevice);
cudaMemcpy(d_phi, phi, sizeof(float) * size_phi, cudaMemcpyHostToDevice);
cudaMemcpy(d_eta, eta, sizeof(float) * size_eta, cudaMemcpyHostToDevice);
const llint xmin = 0; const llint xmax = nx;
const llint ymin = 0; const llint ymax = ny;
dim3 threadsPerBlock(N_THREADS_PER_PLANE_DIM_X, N_THREADS_PER_PLANE_DIM_Y, 1);
int num_streams = 7;
cudaStream_t streams[num_streams];
for (int i = 0; i < num_streams; i++) {
cudaStreamCreateWithFlags(&(streams[i]), cudaStreamNonBlocking);
}
const uint npo = 100;
for (uint istep = 1; istep <= nsteps; ++istep) {
clock_gettime(CLOCK_REALTIME, &start);
dim3 n_block_front(
(z2-z1+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
target_pml_3d_kernel<<<n_block_front, threadsPerBlock, 0, streams[1]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z1,z2,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_top(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y2-y1+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
target_pml_3d_kernel<<<n_block_top, threadsPerBlock, 0, streams[2]>>>(nx,ny,nz,
xmin,xmax,y1,y2,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_left(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_left, threadsPerBlock, 0, streams[3]>>>(nx,ny,nz,
x1,x2,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_center(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y);
target_inner_3d_kernel<<<n_block_center, threadsPerBlock, 0, streams[0]>>>(nx,ny,nz,
x3,x4,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_right(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y4-y3+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_right, threadsPerBlock, 0, streams[4]>>>(nx,ny,nz,
x5,x6,y3,y4,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_bottom(
(z4-z3+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(y6-y5+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_bottom, threadsPerBlock, 0, streams[5]>>>(nx,ny,nz,
xmin,xmax,y5,y6,z3,z4,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
dim3 n_block_back(
(z6-z5+N_THREADS_PER_PLANE_DIM_X-1) / N_THREADS_PER_PLANE_DIM_X,
(ny+N_THREADS_PER_PLANE_DIM_Y-1) / N_THREADS_PER_PLANE_DIM_Y,
1);
target_pml_3d_kernel<<<n_block_back, threadsPerBlock, 0, streams[6]>>>(nx,ny,nz,
xmin,xmax,ymin,ymax,z5,z6,
lx,ly,lz,
hdx_2, hdy_2, hdz_2,
coefx[0]+coefy[0]+coefz[0],
coefx[1], coefx[2], coefx[3], coefx[4],
coefy[1], coefy[2], coefy[3], coefy[4],
coefz[1], coefz[2], coefz[3], coefz[4],
d_u, d_v, d_vp,
d_phi, d_eta);
for (int i = 0; i < num_streams; i++) {
cudaStreamSynchronize(streams[i]);
}
kernel_add_source_kernel<<<1, 1>>>(d_v, IDX3_l(sx,sy,sz), source[istep]);
clock_gettime(CLOCK_REALTIME, &end);
*time_kernel += (end.tv_sec - start.tv_sec) +
(double)(end.tv_nsec - start.tv_nsec) / 1.0e9;
float *t = d_u;
d_u = d_v;
d_v = t;
// Print out
if (istep % npo == 0) {
printf("time step %u / %u\n", istep, nsteps);
}
}
for (int i = 0; i < num_streams; i++) {
cudaStreamDestroy(streams[i]);
}
cudaMemcpy(u, d_u, sizeof(float) * size_u, cudaMemcpyDeviceToHost);
cudaFree(d_u);
cudaFree(d_v);
cudaFree(d_vp);
cudaFree(d_phi);
cudaFree(d_eta);
}
|
ac03643470da55c9ec856841bd5bdd81c291db78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <vector>
#include <string>
#include <thrust/device_vector.h>
#include <fstream>
#include <cupti_profiler.h>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <stdio.h>
#define PROFILE_ALL_EVENTS_METRICS 0
#define N 100
int counter1 = 200000000;
const char *path_0 = "/home/yichez/cupti_profiler/Experiment/blackbox5.6/vectormultiply.csv";
__global__ void
matMul(const int *A, const int *B, int *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] * B[i];
}
}
static void
initVec(int *vec, int n)
{
for (int i = 0; i < n; i++)
vec[i] = i;
}
static void compute()
{
size_t size = N * sizeof(int);
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventCreate(&stop);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
// int priority_hi = -1;
// hipStream_t st_hi;
// hipStreamCreateWithPriority(&st_hi, hipStreamNonBlocking, priority_hi);
matMul << <1, 100 >> > (d_A, d_B, d_C, N);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
int main()
{
freopen(path_0,"w",stdout);
using namespace std;
hipDevice_t device;
DRIVER_API_CALL(hipInit(0));
DRIVER_API_CALL(hipDeviceGet(&device, 0));
#if PROFILE_ALL_EVENTS_METRICS
const auto event_names = cupt
i_profiler::available_events(device);
const auto metric_names = cupti_profiler::available_metrics(device);
#else
vector<string> event_names {
//"elapsed_cycles_sm",
// "active_warps",
// "inst_issued0",
// "inst_executed",
// "tex1_cache_sector_queries",
"fb_subp1_write_sectors",
"fb_subp0_read_sectors",
"l2_subp0_write_sector_misses",
"l2_subp1_read_sector_misses",
"branch",
// "l2_subp0_write_sector_misses",
// "l2_subp1_read_sector_misses",
// "branch",
// "gld_inst_8bit",
//"elapsed_cycles_sm",
// "tex1_cache_sector_queries",
// "l2_subp0_read_tex_sector_queries",
// "l2_subp1_write_tex_sector_queries",
// "active_warps",
// "elapsed_cycles_sm",
// "l2_subp1_write_sysmem_sector_queries",
// "l2_subp0_read_sysmem_sector_queries",
// "inst_executed",
// "inst_issued0",
// "branch",
};
vector<string> metric_names {
// "dram_read_transactions",
// //"local_hit_rate",
// "dram_write_transactions",
//"inst_executed",
//"stall_memory_dependency", //*This metrics will cause profiler to be very slow*//
//"stall_inst_fetch", //*This metrics will cause profiler to be very slow*//
//"cf_issued",
//"tex_fu_utilization",
//"l2_write_transactions",
//"shared_store_transactions",
//"tex_cache_transactions",
};
#endif
hipCtx_t context;
hipCtxCreate(&context, 0, 0);
for(int j=0;j<counter1;j++)
{
cupti_profiler::profiler *p= new cupti_profiler::profiler(event_names, metric_names, context);
struct timeval ts,te;
p->start();
gettimeofday(&ts,NULL);
compute();
p->stop();
gettimeofday(&te,NULL);
p->print_event_values(std::cout,ts,te);
p->print_metric_values(std::cout,ts,te);
free(p);
}
fclose(stdout);
return 0;
}
| ac03643470da55c9ec856841bd5bdd81c291db78.cu | #include <cstdio>
#include <vector>
#include <string>
#include <thrust/device_vector.h>
#include <fstream>
#include <cupti_profiler.h>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#include <stdio.h>
#define PROFILE_ALL_EVENTS_METRICS 0
#define N 100
int counter1 = 200000000;
const char *path_0 = "/home/yichez/cupti_profiler/Experiment/blackbox5.6/vectormultiply.csv";
__global__ void
matMul(const int *A, const int *B, int *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] * B[i];
}
}
static void
initVec(int *vec, int n)
{
for (int i = 0; i < n; i++)
vec[i] = i;
}
static void compute()
{
size_t size = N * sizeof(int);
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// Allocate input vectors h_A and h_B in host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Initialize input vectors
initVec(h_A, N);
initVec(h_B, N);
memset(h_C, 0, size);
// Allocate vectors in device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// int priority_hi = -1;
// cudaStream_t st_hi;
// cudaStreamCreateWithPriority(&st_hi, cudaStreamNonBlocking, priority_hi);
matMul << <1, 100 >> > (d_A, d_B, d_C, N);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
}
int main()
{
freopen(path_0,"w",stdout);
using namespace std;
CUdevice device;
DRIVER_API_CALL(cuInit(0));
DRIVER_API_CALL(cuDeviceGet(&device, 0));
#if PROFILE_ALL_EVENTS_METRICS
const auto event_names = cupt
i_profiler::available_events(device);
const auto metric_names = cupti_profiler::available_metrics(device);
#else
vector<string> event_names {
//"elapsed_cycles_sm",
// "active_warps",
// "inst_issued0",
// "inst_executed",
// "tex1_cache_sector_queries",
"fb_subp1_write_sectors",
"fb_subp0_read_sectors",
"l2_subp0_write_sector_misses",
"l2_subp1_read_sector_misses",
"branch",
// "l2_subp0_write_sector_misses",
// "l2_subp1_read_sector_misses",
// "branch",
// "gld_inst_8bit",
//"elapsed_cycles_sm",
// "tex1_cache_sector_queries",
// "l2_subp0_read_tex_sector_queries",
// "l2_subp1_write_tex_sector_queries",
// "active_warps",
// "elapsed_cycles_sm",
// "l2_subp1_write_sysmem_sector_queries",
// "l2_subp0_read_sysmem_sector_queries",
// "inst_executed",
// "inst_issued0",
// "branch",
};
vector<string> metric_names {
// "dram_read_transactions",
// //"local_hit_rate",
// "dram_write_transactions",
//"inst_executed",
//"stall_memory_dependency", //*This metrics will cause profiler to be very slow*//
//"stall_inst_fetch", //*This metrics will cause profiler to be very slow*//
//"cf_issued",
//"tex_fu_utilization",
//"l2_write_transactions",
//"shared_store_transactions",
//"tex_cache_transactions",
};
#endif
CUcontext context;
cuCtxCreate(&context, 0, 0);
for(int j=0;j<counter1;j++)
{
cupti_profiler::profiler *p= new cupti_profiler::profiler(event_names, metric_names, context);
struct timeval ts,te;
p->start();
gettimeofday(&ts,NULL);
compute();
p->stop();
gettimeofday(&te,NULL);
p->print_event_values(std::cout,ts,te);
p->print_metric_values(std::cout,ts,te);
free(p);
}
fclose(stdout);
return 0;
}
|
6ca6f8ccf53edc6c18f5dcfa49ec67aa7809d880.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _GEMM16x7_CU_
#define _GEMM16x7_CU_
namespace gemm16x7 {
const int TW_ROW = 16, TW_COL = 7, TW_DEPTH = 16;
#define tile_a_x_tile_b (tile_a[ty][0]*tile_b[0][tx]+tile_a[ty][1]*tile_b[1][tx]+tile_a[ty][2]*tile_b[2][tx]+tile_a[ty][3]*tile_b[3][tx]+ \
tile_a[ty][4]*tile_b[4][tx]+tile_a[ty][5]*tile_b[5][tx]+tile_a[ty][6]*tile_b[6][tx]+tile_a[ty][7]*tile_b[7][tx]+ \
tile_a[ty][8]*tile_b[8][tx]+tile_a[ty][9]*tile_b[9][tx]+tile_a[ty][10]*tile_b[10][tx]+tile_a[ty][11]*tile_b[11][tx]+ \
tile_a[ty][12]*tile_b[12][tx]+tile_a[ty][13]*tile_b[13][tx]+tile_a[ty][14]*tile_b[14][tx]+tile_a[ty][15]*tile_b[15][tx])
template <typename Dtype>
__global__ void kernel_gemm_nn(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL) {
// if (tx + j < TW_DEPTH) {
tile_a[ty][tx] = a[row * K + (i + tx)];
tile_a[ty][tx + TW_COL] = a[row * K + (i + tx + TW_COL)];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[row * K + (i + tx + TW_COL * 2)];
// }
// }
// for (int j = 0; j < TW_DEPTH; j += TW_ROW) {
// if (ty + j < TW_DEPTH) {
tile_b[ty][tx] = b[(i + ty) * N + col];
// }
// }
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL) {
// if (tx + j < TW_DEPTH) {
tile_a[ty][tx] = (i + tx < K) ? a[row * K + i + tx] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[row * K + i + tx + TW_COL] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[row * K + i + tx + TW_COL * 2] : 0;
// }
// }
// for (int j = 0; j < TW_DEPTH; j += TW_ROW) {
// if (ty + j < TW_DEPTH) {
tile_b[ty][tx] = (i + ty < K) ? b[(i + ty) * N + col] : 0;
// }
// }
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
template <typename Dtype>
__global__ void kernel_gemm_tn(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = a[(i + tx) * M + row];
tile_a[ty][tx + TW_COL] = a[(i + tx + TW_COL) * M + row];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[(i + tx + TW_COL * 2) * M + row];
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = b[(i + ty) * N + col];
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = (i + tx < K) ? a[(i + tx) * M + row] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[(i + tx + TW_COL) * M + row] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[(i + tx + TW_COL * 2) * M + row] : 0;
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = (i + ty < K) ? b[(i + ty) * N + col] : 0;
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
template <typename Dtype>
__global__ void kernel_gemm_nt(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = a[row * K + i + tx];
tile_a[ty][tx + TW_COL] = a[row * K + i + tx + TW_COL];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[row * K + i + tx + TW_COL * 2];
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = b[col * K + i + ty];
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = (i + tx < K) ? a[row * K + i + tx] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[row * K + i + tx + TW_COL] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[row * K + i + tx + TW_COL * 2] : 0;
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = (i + ty < K) ? b[col * K + i + ty] : 0;
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
template <typename Dtype>
__global__ void kernel_gemm_tt(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = a[(i + tx) * M + row];
tile_a[ty][tx + TW_COL] = a[(i + tx + TW_COL) * M + row];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[(i + tx + TW_COL * 2) * M + row];
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = b[col * K + i + ty];
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = (i + tx < K) ? a[(i + tx) * M + row] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[(i + tx + TW_COL) * M + row] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[(i + tx + TW_COL * 2) * M + row] : 0;
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = (i + ty < K) ? b[col * K + i + ty] : 0;
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
// gemm kernel
// 4: 1.3
// 7: 2.7
// 8: 4
// 14: 4.15
// 16: 5.08, pragma unroll(4): 5.18, manually unroll: 5.25
// 18: 4.5
// 20: 4.5
// 32: 4.85
// cublas: 10.5
// gemm interface
void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
int grid_rows = M / TW_ROW;
int grid_cols = N / TW_COL;
dim3 gridSize(grid_cols, grid_rows);
dim3 blockSize(TW_COL, TW_ROW);
if (TransA == CblasNoTrans && TransB == CblasNoTrans)
hipLaunchKernelGGL(( kernel_gemm_nn<float>), dim3(gridSize), dim3(blockSize), 0, 0, M, N, K, alpha, A, B, beta, C);
else if (TransA != CblasNoTrans && TransB == CblasNoTrans)
hipLaunchKernelGGL(( kernel_gemm_tn<float>), dim3(gridSize), dim3(blockSize), 0, 0, M, N, K, alpha, A, B, beta, C);
else if (TransA == CblasNoTrans && TransB != CblasNoTrans)
hipLaunchKernelGGL(( kernel_gemm_nt<float>), dim3(gridSize), dim3(blockSize), 0, 0, M, N, K, alpha, A, B, beta, C);
else
hipLaunchKernelGGL(( kernel_gemm_tt<float>), dim3(gridSize), dim3(blockSize), 0, 0, M, N, K, alpha, A, B, beta, C);
// hipError_t er1 = hipPeekAtLastError();
// CUDA_CHECK(er1);
}
}
#endif // _GEMM16x7_CU_ | 6ca6f8ccf53edc6c18f5dcfa49ec67aa7809d880.cu | #ifndef _GEMM16x7_CU_
#define _GEMM16x7_CU_
namespace gemm16x7 {
const int TW_ROW = 16, TW_COL = 7, TW_DEPTH = 16;
#define tile_a_x_tile_b (tile_a[ty][0]*tile_b[0][tx]+tile_a[ty][1]*tile_b[1][tx]+tile_a[ty][2]*tile_b[2][tx]+tile_a[ty][3]*tile_b[3][tx]+ \
tile_a[ty][4]*tile_b[4][tx]+tile_a[ty][5]*tile_b[5][tx]+tile_a[ty][6]*tile_b[6][tx]+tile_a[ty][7]*tile_b[7][tx]+ \
tile_a[ty][8]*tile_b[8][tx]+tile_a[ty][9]*tile_b[9][tx]+tile_a[ty][10]*tile_b[10][tx]+tile_a[ty][11]*tile_b[11][tx]+ \
tile_a[ty][12]*tile_b[12][tx]+tile_a[ty][13]*tile_b[13][tx]+tile_a[ty][14]*tile_b[14][tx]+tile_a[ty][15]*tile_b[15][tx])
template <typename Dtype>
__global__ void kernel_gemm_nn(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL) {
// if (tx + j < TW_DEPTH) {
tile_a[ty][tx] = a[row * K + (i + tx)];
tile_a[ty][tx + TW_COL] = a[row * K + (i + tx + TW_COL)];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[row * K + (i + tx + TW_COL * 2)];
// }
// }
// for (int j = 0; j < TW_DEPTH; j += TW_ROW) {
// if (ty + j < TW_DEPTH) {
tile_b[ty][tx] = b[(i + ty) * N + col];
// }
// }
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL) {
// if (tx + j < TW_DEPTH) {
tile_a[ty][tx] = (i + tx < K) ? a[row * K + i + tx] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[row * K + i + tx + TW_COL] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[row * K + i + tx + TW_COL * 2] : 0;
// }
// }
// for (int j = 0; j < TW_DEPTH; j += TW_ROW) {
// if (ty + j < TW_DEPTH) {
tile_b[ty][tx] = (i + ty < K) ? b[(i + ty) * N + col] : 0;
// }
// }
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
template <typename Dtype>
__global__ void kernel_gemm_tn(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = a[(i + tx) * M + row];
tile_a[ty][tx + TW_COL] = a[(i + tx + TW_COL) * M + row];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[(i + tx + TW_COL * 2) * M + row];
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = b[(i + ty) * N + col];
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = (i + tx < K) ? a[(i + tx) * M + row] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[(i + tx + TW_COL) * M + row] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[(i + tx + TW_COL * 2) * M + row] : 0;
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = (i + ty < K) ? b[(i + ty) * N + col] : 0;
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
template <typename Dtype>
__global__ void kernel_gemm_nt(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = a[row * K + i + tx];
tile_a[ty][tx + TW_COL] = a[row * K + i + tx + TW_COL];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[row * K + i + tx + TW_COL * 2];
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = b[col * K + i + ty];
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = (i + tx < K) ? a[row * K + i + tx] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[row * K + i + tx + TW_COL] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[row * K + i + tx + TW_COL * 2] : 0;
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = (i + ty < K) ? b[col * K + i + ty] : 0;
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
template <typename Dtype>
__global__ void kernel_gemm_tt(const int M, const int N, const int K,
const Dtype alpha, const Dtype *a, const Dtype *b, const Dtype beta,
Dtype *c) {
__shared__ Dtype tile_a[TW_ROW][TW_DEPTH];
__shared__ Dtype tile_b[TW_DEPTH][TW_COL];
int ty = threadIdx.y;
int tx = threadIdx.x;
int row = blockIdx.y * blockDim.y + ty;
int col = blockIdx.x * blockDim.x + tx;
Dtype ans = 0;
int i;
for (i = 0; i < K - TW_DEPTH; i += TW_DEPTH)
{
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = a[(i + tx) * M + row];
tile_a[ty][tx + TW_COL] = a[(i + tx + TW_COL) * M + row];
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = a[(i + tx + TW_COL * 2) * M + row];
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = b[col * K + i + ty];
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
}
// for (int j = 0; j < TW_DEPTH; j += TW_COL)
// if (tx + j < TW_DEPTH)
tile_a[ty][tx] = (i + tx < K) ? a[(i + tx) * M + row] : 0;
tile_a[ty][tx + TW_COL] = (i + tx + TW_COL < K) ? a[(i + tx + TW_COL) * M + row] : 0;
if (tx < 2)
tile_a[ty][tx + TW_COL * 2] = (i + tx + TW_COL * 2 < K) ? a[(i + tx + TW_COL * 2) * M + row] : 0;
// for (int j = 0; j < TW_DEPTH; j += TW_ROW)
// if (ty + j < TW_DEPTH)
tile_b[ty][tx] = (i + ty < K) ? b[col * K + i + ty] : 0;
__syncthreads();
ans += tile_a_x_tile_b;
__syncthreads();
ans *= alpha;
if (beta != 0)
c[row * N + col] = c[row * N + col] * beta + ans;
else
c[row * N + col] = ans;
}
// gemm kernel
// 4: 1.3
// 7: 2.7
// 8: 4
// 14: 4.15
// 16: 5.08, pragma unroll(4): 5.18, manually unroll: 5.25
// 18: 4.5
// 20: 4.5
// 32: 4.85
// cublas: 10.5
// gemm interface
void caffe_gpu_gemm(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
int grid_rows = M / TW_ROW;
int grid_cols = N / TW_COL;
dim3 gridSize(grid_cols, grid_rows);
dim3 blockSize(TW_COL, TW_ROW);
if (TransA == CblasNoTrans && TransB == CblasNoTrans)
kernel_gemm_nn<float><<<gridSize, blockSize>>>(M, N, K, alpha, A, B, beta, C);
else if (TransA != CblasNoTrans && TransB == CblasNoTrans)
kernel_gemm_tn<float><<<gridSize, blockSize>>>(M, N, K, alpha, A, B, beta, C);
else if (TransA == CblasNoTrans && TransB != CblasNoTrans)
kernel_gemm_nt<float><<<gridSize, blockSize>>>(M, N, K, alpha, A, B, beta, C);
else
kernel_gemm_tt<float><<<gridSize, blockSize>>>(M, N, K, alpha, A, B, beta, C);
// cudaError_t er1 = cudaPeekAtLastError();
// CUDA_CHECK(er1);
}
}
#endif // _GEMM16x7_CU_ |
5252809822f136c74043f4e7f5560f012f211c68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_fr1;
int xdim0_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim0_update_halo_kernel1_fr1;
int ydim0_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim1_update_halo_kernel1_fr1;
int xdim1_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim1_update_halo_kernel1_fr1;
int ydim1_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim2_update_halo_kernel1_fr1;
int xdim2_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim2_update_halo_kernel1_fr1;
int ydim2_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim3_update_halo_kernel1_fr1;
int xdim3_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim3_update_halo_kernel1_fr1;
int ydim3_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim4_update_halo_kernel1_fr1;
int xdim4_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim4_update_halo_kernel1_fr1;
int ydim4_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim5_update_halo_kernel1_fr1;
int xdim5_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim5_update_halo_kernel1_fr1;
int ydim5_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim6_update_halo_kernel1_fr1;
int xdim6_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim6_update_halo_kernel1_fr1;
int ydim6_update_halo_kernel1_fr1_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_fr1*(y)+xdim0_update_halo_kernel1_fr1*ydim0_update_halo_kernel1_fr1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_fr1*(y)+xdim1_update_halo_kernel1_fr1*ydim1_update_halo_kernel1_fr1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_fr1*(y)+xdim2_update_halo_kernel1_fr1*ydim2_update_halo_kernel1_fr1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_fr1*(y)+xdim3_update_halo_kernel1_fr1*ydim3_update_halo_kernel1_fr1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_fr1*(y)+xdim4_update_halo_kernel1_fr1*ydim4_update_halo_kernel1_fr1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_fr1*(y)+xdim5_update_halo_kernel1_fr1*ydim5_update_halo_kernel1_fr1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_fr1*(y)+xdim6_update_halo_kernel1_fr1*ydim6_update_halo_kernel1_fr1*(z))
//user function
__device__
inline void update_halo_kernel1_fr1(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,0,-1)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,0,-1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,0,-1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,0,-1)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,0,-1)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,0,-1)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,0,-1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_fr1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel1_fr1 + idx_z * 1 * xdim0_update_halo_kernel1_fr1 * ydim0_update_halo_kernel1_fr1;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel1_fr1 + idx_z * 1 * xdim1_update_halo_kernel1_fr1 * ydim1_update_halo_kernel1_fr1;
arg2 += idx_x * 1 + idx_y * 1 * xdim2_update_halo_kernel1_fr1 + idx_z * 1 * xdim2_update_halo_kernel1_fr1 * ydim2_update_halo_kernel1_fr1;
arg3 += idx_x * 1 + idx_y * 1 * xdim3_update_halo_kernel1_fr1 + idx_z * 1 * xdim3_update_halo_kernel1_fr1 * ydim3_update_halo_kernel1_fr1;
arg4 += idx_x * 1 + idx_y * 1 * xdim4_update_halo_kernel1_fr1 + idx_z * 1 * xdim4_update_halo_kernel1_fr1 * ydim4_update_halo_kernel1_fr1;
arg5 += idx_x * 1 + idx_y * 1 * xdim5_update_halo_kernel1_fr1 + idx_z * 1 * xdim5_update_halo_kernel1_fr1 * ydim5_update_halo_kernel1_fr1;
arg6 += idx_x * 1 + idx_y * 1 * xdim6_update_halo_kernel1_fr1 + idx_z * 1 * xdim6_update_halo_kernel1_fr1 * ydim6_update_halo_kernel1_fr1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_fr1(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
ops_timing_realloc(52,"update_halo_kernel1_fr1");
OPS_kernels[52].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0]*args[4].dat->dim;
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0]*args[5].dat->dim;
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0]*args[6].dat->dim;
int ydim6 = args[6].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel1_fr1_h || ydim0 != ydim0_update_halo_kernel1_fr1_h || xdim1 != xdim1_update_halo_kernel1_fr1_h || ydim1 != ydim1_update_halo_kernel1_fr1_h || xdim2 != xdim2_update_halo_kernel1_fr1_h || ydim2 != ydim2_update_halo_kernel1_fr1_h || xdim3 != xdim3_update_halo_kernel1_fr1_h || ydim3 != ydim3_update_halo_kernel1_fr1_h || xdim4 != xdim4_update_halo_kernel1_fr1_h || ydim4 != ydim4_update_halo_kernel1_fr1_h || xdim5 != xdim5_update_halo_kernel1_fr1_h || ydim5 != ydim5_update_halo_kernel1_fr1_h || xdim6 != xdim6_update_halo_kernel1_fr1_h || ydim6 != ydim6_update_halo_kernel1_fr1_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel1_fr1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_fr1_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel1_fr1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_fr1_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel1_fr1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_fr1_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel1_fr1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_fr1_h = ydim1;
hipMemcpyToSymbol( xdim2_update_halo_kernel1_fr1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_fr1_h = xdim2;
hipMemcpyToSymbol( ydim2_update_halo_kernel1_fr1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_fr1_h = ydim2;
hipMemcpyToSymbol( xdim3_update_halo_kernel1_fr1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_fr1_h = xdim3;
hipMemcpyToSymbol( ydim3_update_halo_kernel1_fr1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_fr1_h = ydim3;
hipMemcpyToSymbol( xdim4_update_halo_kernel1_fr1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_fr1_h = xdim4;
hipMemcpyToSymbol( ydim4_update_halo_kernel1_fr1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_fr1_h = ydim4;
hipMemcpyToSymbol( xdim5_update_halo_kernel1_fr1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_fr1_h = xdim5;
hipMemcpyToSymbol( ydim5_update_halo_kernel1_fr1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_fr1_h = ydim5;
hipMemcpyToSymbol( xdim6_update_halo_kernel1_fr1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_fr1_h = xdim6;
hipMemcpyToSymbol( ydim6_update_halo_kernel1_fr1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_fr1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
ops_timers_core(&c1,&t1);
OPS_kernels[52].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel1_fr1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[52].time += t2-t1;
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
//Update kernel record
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg6);
}
| 5252809822f136c74043f4e7f5560f012f211c68.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_fr1;
int xdim0_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim0_update_halo_kernel1_fr1;
int ydim0_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim1_update_halo_kernel1_fr1;
int xdim1_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim1_update_halo_kernel1_fr1;
int ydim1_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim2_update_halo_kernel1_fr1;
int xdim2_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim2_update_halo_kernel1_fr1;
int ydim2_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim3_update_halo_kernel1_fr1;
int xdim3_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim3_update_halo_kernel1_fr1;
int ydim3_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim4_update_halo_kernel1_fr1;
int xdim4_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim4_update_halo_kernel1_fr1;
int ydim4_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim5_update_halo_kernel1_fr1;
int xdim5_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim5_update_halo_kernel1_fr1;
int ydim5_update_halo_kernel1_fr1_h = -1;
__constant__ int xdim6_update_halo_kernel1_fr1;
int xdim6_update_halo_kernel1_fr1_h = -1;
__constant__ int ydim6_update_halo_kernel1_fr1;
int ydim6_update_halo_kernel1_fr1_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_fr1*(y)+xdim0_update_halo_kernel1_fr1*ydim0_update_halo_kernel1_fr1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_fr1*(y)+xdim1_update_halo_kernel1_fr1*ydim1_update_halo_kernel1_fr1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_fr1*(y)+xdim2_update_halo_kernel1_fr1*ydim2_update_halo_kernel1_fr1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_fr1*(y)+xdim3_update_halo_kernel1_fr1*ydim3_update_halo_kernel1_fr1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_fr1*(y)+xdim4_update_halo_kernel1_fr1*ydim4_update_halo_kernel1_fr1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_fr1*(y)+xdim5_update_halo_kernel1_fr1*ydim5_update_halo_kernel1_fr1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_fr1*(y)+xdim6_update_halo_kernel1_fr1*ydim6_update_halo_kernel1_fr1*(z))
//user function
__device__
inline void update_halo_kernel1_fr1(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,0,-1)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,0,-1)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,0,-1)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,0,-1)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,0,-1)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,0,-1)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,0,-1)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_fr1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel1_fr1 + idx_z * 1 * xdim0_update_halo_kernel1_fr1 * ydim0_update_halo_kernel1_fr1;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel1_fr1 + idx_z * 1 * xdim1_update_halo_kernel1_fr1 * ydim1_update_halo_kernel1_fr1;
arg2 += idx_x * 1 + idx_y * 1 * xdim2_update_halo_kernel1_fr1 + idx_z * 1 * xdim2_update_halo_kernel1_fr1 * ydim2_update_halo_kernel1_fr1;
arg3 += idx_x * 1 + idx_y * 1 * xdim3_update_halo_kernel1_fr1 + idx_z * 1 * xdim3_update_halo_kernel1_fr1 * ydim3_update_halo_kernel1_fr1;
arg4 += idx_x * 1 + idx_y * 1 * xdim4_update_halo_kernel1_fr1 + idx_z * 1 * xdim4_update_halo_kernel1_fr1 * ydim4_update_halo_kernel1_fr1;
arg5 += idx_x * 1 + idx_y * 1 * xdim5_update_halo_kernel1_fr1 + idx_z * 1 * xdim5_update_halo_kernel1_fr1 * ydim5_update_halo_kernel1_fr1;
arg6 += idx_x * 1 + idx_y * 1 * xdim6_update_halo_kernel1_fr1 + idx_z * 1 * xdim6_update_halo_kernel1_fr1 * ydim6_update_halo_kernel1_fr1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_fr1(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
void ops_par_loop_update_halo_kernel1_fr1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
ops_timing_realloc(52,"update_halo_kernel1_fr1");
OPS_kernels[52].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0]*args[4].dat->dim;
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0]*args[5].dat->dim;
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0]*args[6].dat->dim;
int ydim6 = args[6].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_update_halo_kernel1_fr1_h || ydim0 != ydim0_update_halo_kernel1_fr1_h || xdim1 != xdim1_update_halo_kernel1_fr1_h || ydim1 != ydim1_update_halo_kernel1_fr1_h || xdim2 != xdim2_update_halo_kernel1_fr1_h || ydim2 != ydim2_update_halo_kernel1_fr1_h || xdim3 != xdim3_update_halo_kernel1_fr1_h || ydim3 != ydim3_update_halo_kernel1_fr1_h || xdim4 != xdim4_update_halo_kernel1_fr1_h || ydim4 != ydim4_update_halo_kernel1_fr1_h || xdim5 != xdim5_update_halo_kernel1_fr1_h || ydim5 != ydim5_update_halo_kernel1_fr1_h || xdim6 != xdim6_update_halo_kernel1_fr1_h || ydim6 != ydim6_update_halo_kernel1_fr1_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel1_fr1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_fr1_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel1_fr1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_fr1_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel1_fr1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_fr1_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel1_fr1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_fr1_h = ydim1;
cudaMemcpyToSymbol( xdim2_update_halo_kernel1_fr1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_fr1_h = xdim2;
cudaMemcpyToSymbol( ydim2_update_halo_kernel1_fr1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_fr1_h = ydim2;
cudaMemcpyToSymbol( xdim3_update_halo_kernel1_fr1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_fr1_h = xdim3;
cudaMemcpyToSymbol( ydim3_update_halo_kernel1_fr1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_fr1_h = ydim3;
cudaMemcpyToSymbol( xdim4_update_halo_kernel1_fr1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_fr1_h = xdim4;
cudaMemcpyToSymbol( ydim4_update_halo_kernel1_fr1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_fr1_h = ydim4;
cudaMemcpyToSymbol( xdim5_update_halo_kernel1_fr1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_fr1_h = xdim5;
cudaMemcpyToSymbol( ydim5_update_halo_kernel1_fr1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_fr1_h = ydim5;
cudaMemcpyToSymbol( xdim6_update_halo_kernel1_fr1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_fr1_h = xdim6;
cudaMemcpyToSymbol( ydim6_update_halo_kernel1_fr1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_fr1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
int dat4 = args[4].dat->elem_size;
int dat5 = args[5].dat->elem_size;
int dat6 = args[6].dat->elem_size;
char *p_a[8];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d];
#endif //OPS_MPI
int base4 = dat4 * 1 *
(start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d];
#endif //OPS_MPI
int base5 = dat5 * 1 *
(start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d];
#endif //OPS_MPI
int base6 = dat6 * 1 *
(start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]);
p_a[6] = (char *)args[6].data_d + base6;
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
ops_timers_core(&c1,&t1);
OPS_kernels[52].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel1_fr1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[52].time += t2-t1;
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
//Update kernel record
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg3);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg4);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg5);
OPS_kernels[52].transfer += ops_compute_transfer(dim, range, &arg6);
}
|
4c024bf59cc7bf220d7c4470dba96fe0b1e33c93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <VLI.hpp>
#include <Timer.h>
#include <iostream>
using std::cout;
using std::endl;
__global__ void CheckPrime_Kernel(int A, int B, CVLI C, bool* Check)
{
// Because of the simplicity of this tutorial, we are going to assume that
// every block has 256 threads. Each thread simply multiplies two numbers,
// and then stores the result.
// The grid of blocks is 128 blocks long.
unsigned int tid = (blockIdx.y * 4096 * 256) + blockIdx.x * 256 + threadIdx.x; // This gives every thread a unique ID.
*Check = false;
// By no coincidence, we'll be using this thread ID to determine which data elements to multiply.
//pResult[tid] = pDataA[tid] * pDataB[tid]; // Each thread only multiplies one data element.
//pResult[tid] = pDataA[tid] * pDataB[tid] / 12.34567;
//pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567);
//pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]);
}
int main(int argc, char * argv[])
{
unsigned int hTimer;
CUT_DEVICE_INIT(argc, argv);
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
//dim3 blockGridRows(blockGridWidth, blockGridHeight);
//dim3 threadBlockRows(256, 1);
dim3 blockGridRows(4096, 4096, 1);
dim3 threadBlockRows(256, 1, 1);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
int A=0, B=0;
bool Check = true;
bool *Checkptr;
CUDA_SAFE_CALL( hipMalloc( (void **)&Checkptr, sizeof(bool)) );
CUDA_SAFE_CALL( hipMemcpy(Checkptr, &Check, sizeof(bool), hipMemcpyHostToDevice) );
CVLI C;
__int64 tStart, tEnd;
tStart = GetTimeus64();
for (unsigned int i=0; i<2; i++)
{
hipLaunchKernelGGL(( CheckPrime_Kernel), dim3(blockGridRows), dim3(threadBlockRows), 0, 0, A, B, C, Checkptr);
CUT_CHECK_ERROR("multiplyNumbersGPU() execution failed\n");
CUDA_SAFE_CALL(hipDeviceSynchronize());
}
CUDA_SAFE_CALL( hipMemcpy(&Check, Checkptr, sizeof(bool), hipMemcpyDeviceToHost) );
cout << "Check = " << Check << endl;
tEnd = GetTimeus64();
cout << "Time taken for kernel execution: " << ((double)(tEnd-tStart))/(1000000.) << " seconds." << endl;
CUT_SAFE_CALL(cutStopTimer(hTimer));
double gpuTime = cutGetTimerValue(hTimer);
cout << "GPU time: " << gpuTime*0.001 << " seconds." << endl;
CUT_SAFE_CALL(cutDeleteTimer(hTimer));
CUT_EXIT(argc, argv);
}
| 4c024bf59cc7bf220d7c4470dba96fe0b1e33c93.cu | #include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <VLI.hpp>
#include <Timer.h>
#include <iostream>
using std::cout;
using std::endl;
__global__ void CheckPrime_Kernel(int A, int B, CVLI C, bool* Check)
{
// Because of the simplicity of this tutorial, we are going to assume that
// every block has 256 threads. Each thread simply multiplies two numbers,
// and then stores the result.
// The grid of blocks is 128 blocks long.
unsigned int tid = (blockIdx.y * 4096 * 256) + blockIdx.x * 256 + threadIdx.x; // This gives every thread a unique ID.
*Check = false;
// By no coincidence, we'll be using this thread ID to determine which data elements to multiply.
//pResult[tid] = pDataA[tid] * pDataB[tid]; // Each thread only multiplies one data element.
//pResult[tid] = pDataA[tid] * pDataB[tid] / 12.34567;
//pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567);
//pResult[tid] = sqrt(pDataA[tid] * pDataB[tid] / 12.34567) * sin(pDataA[tid]);
}
int main(int argc, char * argv[])
{
unsigned int hTimer;
CUT_DEVICE_INIT(argc, argv);
CUT_SAFE_CALL(cutCreateTimer(&hTimer));
//dim3 blockGridRows(blockGridWidth, blockGridHeight);
//dim3 threadBlockRows(256, 1);
dim3 blockGridRows(4096, 4096, 1);
dim3 threadBlockRows(256, 1, 1);
CUT_SAFE_CALL(cutResetTimer(hTimer));
CUT_SAFE_CALL(cutStartTimer(hTimer));
int A=0, B=0;
bool Check = true;
bool *Checkptr;
CUDA_SAFE_CALL( cudaMalloc( (void **)&Checkptr, sizeof(bool)) );
CUDA_SAFE_CALL( cudaMemcpy(Checkptr, &Check, sizeof(bool), cudaMemcpyHostToDevice) );
CVLI C;
__int64 tStart, tEnd;
tStart = GetTimeus64();
for (unsigned int i=0; i<2; i++)
{
CheckPrime_Kernel<<<blockGridRows, threadBlockRows>>>(A, B, C, Checkptr);
CUT_CHECK_ERROR("multiplyNumbersGPU() execution failed\n");
CUDA_SAFE_CALL(cudaThreadSynchronize());
}
CUDA_SAFE_CALL( cudaMemcpy(&Check, Checkptr, sizeof(bool), cudaMemcpyDeviceToHost) );
cout << "Check = " << Check << endl;
tEnd = GetTimeus64();
cout << "Time taken for kernel execution: " << ((double)(tEnd-tStart))/(1000000.) << " seconds." << endl;
CUT_SAFE_CALL(cutStopTimer(hTimer));
double gpuTime = cutGetTimerValue(hTimer);
cout << "GPU time: " << gpuTime*0.001 << " seconds." << endl;
CUT_SAFE_CALL(cutDeleteTimer(hTimer));
CUT_EXIT(argc, argv);
}
|
11c043ab64fe3b765c283f1b904d814e49af8fba.hip | // !!! This is a file automatically generated by hipify!!!
/*
CUDA BarnesHut v2.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2011, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 1024
#define THREADS4 256
#define THREADS5 256
#define THREADS6 512
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 1
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 1 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 3
#define WARPSIZE 32
#define MAXDEPTH 32
/******************************************************************************/
// childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations
__constant__ int nnodesd, nbodiesd;
__constant__ float dtimed, dthfd, epssqd, itolsqd;
__constant__ volatile float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd;
__constant__ volatile float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd;
__constant__ volatile int *errd, *sortd, *childd, *countd, *startd;
__device__ volatile int stepd, bottomd, maxdepthd, blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel()
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel()
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = min(minx, val);
maxx = max(maxx, val);
val = posyd[j];
miny = min(miny, val);
maxy = max(maxy, val);
val = poszd[j];
minz = min(minz, val);
maxz = max(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = min(minx, sminx[k]);
smaxx[i] = maxx = max(maxx, smaxx[k]);
sminy[i] = miny = min(miny, sminy[k]);
smaxy[i] = maxy = max(maxy, smaxy[k]);
sminz[i] = minz = min(minz, sminz[k]);
smaxz[i] = maxz = max(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
inc = gridDim.x - 1;
if (inc == atomicInc((unsigned int *)&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = min(minx, minxd[j]);
maxx = max(maxx, maxxd[j]);
miny = min(miny, minyd[j]);
maxy = max(maxy, maxyd[j]);
minz = min(minz, minzd[j]);
maxz = max(maxz, maxzd[j]);
}
// compute 'radius'
val = max(maxx - minx, maxy - miny);
radiusd = max(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel()
{
register int i, j, k, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius;
j = 0;
// determine which child to follow
if (rootx < px) j = 1;
if (rooty < py) j += 2;
if (rootz < pz) j += 4;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
j = 0;
// determine which child to follow
if (posxd[n] < px) j = 1;
if (posyd[n] < py) j += 2;
if (poszd[n] < pz) j += 4;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
if (ch == -1) {
// if null, just insert the new body
childd[locked] = i;
} else { // there already is a body in this position
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
patch = max(patch, cell);
x = (j & 1) * r;
y = ((j >> 1) & 1) * r;
z = ((j >> 2) & 1) * r;
r *= 0.5f;
massd[cell] = -1.0f;
startd[cell] = -1;
x = posxd[cell] = posxd[n] - r + x;
y = posyd[cell] = posyd[n] - r + y;
z = poszd[cell] = poszd[n] - r + z;
for (k = 0; k < 8; k++) childd[cell*8+k] = -1;
if (patch != cell) {
childd[n*8+j] = cell;
}
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j += 2;
if (z < poszd[ch]) j += 4;
childd[cell*8+j] = ch;
n = cell;
j = 0;
if (x < px) j = 1;
if (y < py) j += 2;
if (z < pz) j += 4;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
__threadfence(); // push out subtree
childd[locked] = patch;
}
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
}
__syncthreads(); // throttle
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel()
{
register int i, j, k, ch, inc, missing, cnt, bottom;
register float m, cm, px, py, pz;
__shared__ volatile int child[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
missing = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (missing == 0) {
// new cell, so initialize
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
child[missing*THREADS3+threadIdx.x] = ch; // cache missing children
m = massd[ch];
missing++;
if (m >= 0.0f) {
// child is ready
missing--;
if (ch >= nbodiesd) { // count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
j++;
}
}
cnt += j;
}
if (missing != 0) {
do {
// poll missing child
ch = child[(missing-1)*THREADS3+threadIdx.x];
m = massd[ch];
if (m >= 0.0f) {
// child is now ready
missing--;
if (ch >= nbodiesd) {
// count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
// repeat until we are done or child is not ready
} while ((m >= 0.0f) && (missing != 0));
}
if (missing == 0) {
// all children are ready, so store computed information
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
k += inc; // move on to next cell
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel()
{
register int i, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else if (ch >= 0) {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
k -= dec; // move on to next cell
}
__syncthreads(); // throttle
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel()
{
register int i, j, k, n, depth, base, sbase, diff;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile float dq[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile int step, maxdepth;
if (0 == threadIdx.x) {
step = stepd;
maxdepth = maxdepthd;
tmp = radiusd;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepth; i++) {
dq[i] = dq[i - 1] * 0.25f;
}
if (maxdepth > MAXDEPTH) {
*errd = maxdepth;
}
}
__syncthreads();
if (maxdepth <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
node[j] = nnodesd;
pos[j] = 0;
}
__threadfence(); // make sure it's visible
while (depth >= j) {
// stack is not empty
while (pos[depth] < 8) {
// node on top of stack has more children to process
n = childd[node[depth]*8+pos[depth]]; // load child pointer
if (sbase == threadIdx.x) {
// I'm the first thread in the warp
pos[depth]++;
}
__threadfence(); // make sure it's visible
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
depth++;
if (sbase == threadIdx.x) {
node[depth] = n;
pos[depth] = 0;
}
__threadfence(); // make sure it's visible
}
} else {
depth = max(j, depth - 1); // early out because all remaining children are also zero
}
}
depth--; // done with this level
}
if (step > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel()
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
register int nnodes, nbodies, step, timesteps;
register int runtime, mintime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
clock_t starttime, endtime;
hipEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
fprintf(stderr, "CUDA BarnesHut v2.1\n");
if (argc != 3) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps\n");
exit(-1);
}
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
hipFuncSetCacheConfig(BoundingBoxKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(TreeBuildingKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(SummarizationKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(SortKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(ForceCalculationKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(IntegrationKernel, hipFuncCachePreferL1);
hipGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
fprintf(stderr, "nodes = %d\n", nnodes+1);
fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (hipSuccess != hipMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (hipSuccess != hipMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (hipSuccess != hipMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (hipSuccess != hipMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (hipSuccess != hipMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (hipSuccess != hipMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (hipSuccess != hipMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
// alias arrays
int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE);
velxl = (float *)&childl[0*inc];
velyl = (float *)&childl[1*inc];
velzl = (float *)&childl[2*inc];
accxl = (float *)&childl[3*inc];
accyl = (float *)&childl[4*inc];
acczl = (float *)&childl[5*inc];
sortl = (int *)&childl[6*inc];
if (hipSuccess != hipMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (hipSuccess != hipMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (hipSuccess != hipMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (hipSuccess != hipMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (hipSuccess != hipMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (hipSuccess != hipMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
if (hipSuccess != hipMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(errd, &errl, sizeof(void*))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(sortd, &sortl, sizeof(void*))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(countd, &countl, sizeof(void*))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(startd, &startl, sizeof(void*))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(childd, &childl, sizeof(void*))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(massd, &massl, sizeof(void*))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(posxd, &posxl, sizeof(void*))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(posyd, &posyl, sizeof(void*))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(poszd, &poszl, sizeof(void*))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velxd, &velxl, sizeof(void*))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velyd, &velyl, sizeof(void*))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velzd, &velzl, sizeof(void*))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(accxd, &accxl, sizeof(void*))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(accyd, &accyl, sizeof(void*))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(acczd, &acczl, sizeof(void*))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxxd, &maxxl, sizeof(void*))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxyd, &maxyl, sizeof(void*))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxzd, &maxzl, sizeof(void*))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minxd, &minxl, sizeof(void*))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minyd, &minyl, sizeof(void*))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minzd, &minzl, sizeof(void*))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed");
}
// generate input
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (hipSuccess != hipMemcpy(massl, mass, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (hipSuccess != hipMemcpy(posxl, posx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (hipSuccess != hipMemcpy(posyl, posy, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (hipSuccess != hipMemcpy(poszl, posz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (hipSuccess != hipMemcpy(velxl, velx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (hipSuccess != hipMemcpy(velyl, vely, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (hipSuccess != hipMemcpy(velzl, velz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (lauch GPU kernels)
hipEventCreate(&start); hipEventCreate(&stop);
starttime = clock();
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitializationKernel), dim3(1), dim3(1), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
hipEventRecord(start, 0);
hipLaunchKernelGGL(( BoundingBoxKernel), dim3(blocks * FACTOR1), dim3(THREADS1), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( TreeBuildingKernel), dim3(blocks * FACTOR2), dim3(THREADS2), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SummarizationKernel), dim3(blocks * FACTOR3), dim3(THREADS3), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SortKernel), dim3(blocks * FACTOR4), dim3(THREADS4), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ForceCalculationKernel), dim3(blocks * FACTOR5), dim3(THREADS5), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( IntegrationKernel), dim3(blocks * FACTOR6), dim3(THREADS6), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
}
endtime = clock();
CudaTest("kernel launch failed");
hipEventDestroy(start); hipEventDestroy(stop);
// transfer result back to CPU
if (hipSuccess != hipMemcpy(&error, errl, sizeof(int), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (hipSuccess != hipMemcpy(posx, posxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (hipSuccess != hipMemcpy(posy, posyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (hipSuccess != hipMemcpy(posz, poszl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (hipSuccess != hipMemcpy(velx, velxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (hipSuccess != hipMemcpy(vely, velyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (hipSuccess != hipMemcpy(velz, velzl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC);
fprintf(stderr, "runtime: %d ms (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
fprintf(stderr, " %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
fprintf(stderr, ") = %.1f\n", time);
} else {
fprintf(stderr, ") = %.1f FAILED %d\n", time, error);
}
if ((run == 0) || (mintime > runtime)) mintime = runtime;
}
fprintf(stderr, "mintime: %d ms\n", mintime);
// print output
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
hipFree(errl);
hipFree(childl);
hipFree(massl);
hipFree(posxl);
hipFree(posyl);
hipFree(poszl);
hipFree(countl);
hipFree(startl);
hipFree(maxxl);
hipFree(maxyl);
hipFree(maxzl);
hipFree(minxl);
hipFree(minyl);
hipFree(minzl);
return 0;
}
| 11c043ab64fe3b765c283f1b904d814e49af8fba.cu | /*
CUDA BarnesHut v2.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2011, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 1024
#define THREADS4 256
#define THREADS5 256
#define THREADS6 512
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 1
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 1 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 3
#define WARPSIZE 32
#define MAXDEPTH 32
/******************************************************************************/
// childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations
__constant__ int nnodesd, nbodiesd;
__constant__ float dtimed, dthfd, epssqd, itolsqd;
__constant__ volatile float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd;
__constant__ volatile float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd;
__constant__ volatile int *errd, *sortd, *childd, *countd, *startd;
__device__ volatile int stepd, bottomd, maxdepthd, blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel()
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel()
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = min(minx, val);
maxx = max(maxx, val);
val = posyd[j];
miny = min(miny, val);
maxy = max(maxy, val);
val = poszd[j];
minz = min(minz, val);
maxz = max(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = min(minx, sminx[k]);
smaxx[i] = maxx = max(maxx, smaxx[k]);
sminy[i] = miny = min(miny, sminy[k]);
smaxy[i] = maxy = max(maxy, smaxy[k]);
sminz[i] = minz = min(minz, sminz[k]);
smaxz[i] = maxz = max(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
inc = gridDim.x - 1;
if (inc == atomicInc((unsigned int *)&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = min(minx, minxd[j]);
maxx = max(maxx, maxxd[j]);
miny = min(miny, minyd[j]);
maxy = max(maxy, maxyd[j]);
minz = min(minz, minzd[j]);
maxz = max(maxz, maxzd[j]);
}
// compute 'radius'
val = max(maxx - minx, maxy - miny);
radiusd = max(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel()
{
register int i, j, k, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius;
j = 0;
// determine which child to follow
if (rootx < px) j = 1;
if (rooty < py) j += 2;
if (rootz < pz) j += 4;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
j = 0;
// determine which child to follow
if (posxd[n] < px) j = 1;
if (posyd[n] < py) j += 2;
if (poszd[n] < pz) j += 4;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
if (ch == -1) {
// if null, just insert the new body
childd[locked] = i;
} else { // there already is a body in this position
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
patch = max(patch, cell);
x = (j & 1) * r;
y = ((j >> 1) & 1) * r;
z = ((j >> 2) & 1) * r;
r *= 0.5f;
massd[cell] = -1.0f;
startd[cell] = -1;
x = posxd[cell] = posxd[n] - r + x;
y = posyd[cell] = posyd[n] - r + y;
z = poszd[cell] = poszd[n] - r + z;
for (k = 0; k < 8; k++) childd[cell*8+k] = -1;
if (patch != cell) {
childd[n*8+j] = cell;
}
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j += 2;
if (z < poszd[ch]) j += 4;
childd[cell*8+j] = ch;
n = cell;
j = 0;
if (x < px) j = 1;
if (y < py) j += 2;
if (z < pz) j += 4;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
__threadfence(); // push out subtree
childd[locked] = patch;
}
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
}
__syncthreads(); // throttle
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel()
{
register int i, j, k, ch, inc, missing, cnt, bottom;
register float m, cm, px, py, pz;
__shared__ volatile int child[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
missing = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (missing == 0) {
// new cell, so initialize
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
child[missing*THREADS3+threadIdx.x] = ch; // cache missing children
m = massd[ch];
missing++;
if (m >= 0.0f) {
// child is ready
missing--;
if (ch >= nbodiesd) { // count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
j++;
}
}
cnt += j;
}
if (missing != 0) {
do {
// poll missing child
ch = child[(missing-1)*THREADS3+threadIdx.x];
m = massd[ch];
if (m >= 0.0f) {
// child is now ready
missing--;
if (ch >= nbodiesd) {
// count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
// repeat until we are done or child is not ready
} while ((m >= 0.0f) && (missing != 0));
}
if (missing == 0) {
// all children are ready, so store computed information
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
k += inc; // move on to next cell
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel()
{
register int i, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else if (ch >= 0) {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
k -= dec; // move on to next cell
}
__syncthreads(); // throttle
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel()
{
register int i, j, k, n, depth, base, sbase, diff;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile float dq[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile int step, maxdepth;
if (0 == threadIdx.x) {
step = stepd;
maxdepth = maxdepthd;
tmp = radiusd;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepth; i++) {
dq[i] = dq[i - 1] * 0.25f;
}
if (maxdepth > MAXDEPTH) {
*errd = maxdepth;
}
}
__syncthreads();
if (maxdepth <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
node[j] = nnodesd;
pos[j] = 0;
}
__threadfence(); // make sure it's visible
while (depth >= j) {
// stack is not empty
while (pos[depth] < 8) {
// node on top of stack has more children to process
n = childd[node[depth]*8+pos[depth]]; // load child pointer
if (sbase == threadIdx.x) {
// I'm the first thread in the warp
pos[depth]++;
}
__threadfence(); // make sure it's visible
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
depth++;
if (sbase == threadIdx.x) {
node[depth] = n;
pos[depth] = 0;
}
__threadfence(); // make sure it's visible
}
} else {
depth = max(j, depth - 1); // early out because all remaining children are also zero
}
}
depth--; // done with this level
}
if (step > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel()
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
register int nnodes, nbodies, step, timesteps;
register int runtime, mintime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[7];
clock_t starttime, endtime;
cudaEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
register double rsc, vsc, r, v, x, y, z, sq, scale;
// perform some checks
fprintf(stderr, "CUDA BarnesHut v2.1\n");
if (argc != 3) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps\n");
exit(-1);
}
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
cudaFuncSetCacheConfig(BoundingBoxKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(TreeBuildingKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(SummarizationKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(SortKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(IntegrationKernel, cudaFuncCachePreferL1);
cudaGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 7; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
// allocate memory
if (run == 0) {
fprintf(stderr, "nodes = %d\n", nnodes+1);
fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (cudaSuccess != cudaMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (cudaSuccess != cudaMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (cudaSuccess != cudaMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (cudaSuccess != cudaMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (cudaSuccess != cudaMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (cudaSuccess != cudaMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (cudaSuccess != cudaMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (cudaSuccess != cudaMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
// alias arrays
int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE);
velxl = (float *)&childl[0*inc];
velyl = (float *)&childl[1*inc];
velzl = (float *)&childl[2*inc];
accxl = (float *)&childl[3*inc];
accyl = (float *)&childl[4*inc];
acczl = (float *)&childl[5*inc];
sortl = (int *)&childl[6*inc];
if (cudaSuccess != cudaMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (cudaSuccess != cudaMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (cudaSuccess != cudaMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (cudaSuccess != cudaMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (cudaSuccess != cudaMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (cudaSuccess != cudaMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
if (cudaSuccess != cudaMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(errd, &errl, sizeof(void*))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(sortd, &sortl, sizeof(void*))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(countd, &countl, sizeof(void*))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(startd, &startl, sizeof(void*))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(childd, &childl, sizeof(void*))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(massd, &massl, sizeof(void*))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(posxd, &posxl, sizeof(void*))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(posyd, &posyl, sizeof(void*))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(poszd, &poszl, sizeof(void*))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velxd, &velxl, sizeof(void*))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velyd, &velyl, sizeof(void*))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velzd, &velzl, sizeof(void*))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(accxd, &accxl, sizeof(void*))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(accyd, &accyl, sizeof(void*))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(acczd, &acczl, sizeof(void*))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxxd, &maxxl, sizeof(void*))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxyd, &maxyl, sizeof(void*))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxzd, &maxzl, sizeof(void*))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minxd, &minxl, sizeof(void*))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minyd, &minyl, sizeof(void*))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minzd, &minzl, sizeof(void*))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed");
}
// generate input
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
if (cudaSuccess != cudaMemcpy(massl, mass, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (cudaSuccess != cudaMemcpy(posxl, posx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (cudaSuccess != cudaMemcpy(posyl, posy, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (cudaSuccess != cudaMemcpy(poszl, posz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (cudaSuccess != cudaMemcpy(velxl, velx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (cudaSuccess != cudaMemcpy(velyl, vely, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (cudaSuccess != cudaMemcpy(velzl, velz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (lauch GPU kernels)
cudaEventCreate(&start); cudaEventCreate(&stop);
starttime = clock();
cudaEventRecord(start, 0);
InitializationKernel<<<1, 1>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
cudaEventRecord(start, 0);
BoundingBoxKernel<<<blocks * FACTOR1, THREADS1>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
cudaEventRecord(start, 0);
TreeBuildingKernel<<<blocks * FACTOR2, THREADS2>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
cudaEventRecord(start, 0);
SummarizationKernel<<<blocks * FACTOR3, THREADS3>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
cudaEventRecord(start, 0);
SortKernel<<<blocks * FACTOR4, THREADS4>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
cudaEventRecord(start, 0);
ForceCalculationKernel<<<blocks * FACTOR5, THREADS5>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
cudaEventRecord(start, 0);
IntegrationKernel<<<blocks * FACTOR6, THREADS6>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
}
endtime = clock();
CudaTest("kernel launch failed");
cudaEventDestroy(start); cudaEventDestroy(stop);
// transfer result back to CPU
if (cudaSuccess != cudaMemcpy(&error, errl, sizeof(int), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (cudaSuccess != cudaMemcpy(posx, posxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (cudaSuccess != cudaMemcpy(posy, posyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (cudaSuccess != cudaMemcpy(posz, poszl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (cudaSuccess != cudaMemcpy(velx, velxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (cudaSuccess != cudaMemcpy(vely, velyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (cudaSuccess != cudaMemcpy(velz, velzl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC);
fprintf(stderr, "runtime: %d ms (", runtime);
time = 0;
for (i = 1; i < 7; i++) {
fprintf(stderr, " %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
fprintf(stderr, ") = %.1f\n", time);
} else {
fprintf(stderr, ") = %.1f FAILED %d\n", time, error);
}
if ((run == 0) || (mintime > runtime)) mintime = runtime;
}
fprintf(stderr, "mintime: %d ms\n", mintime);
// print output
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
cudaFree(errl);
cudaFree(childl);
cudaFree(massl);
cudaFree(posxl);
cudaFree(posyl);
cudaFree(poszl);
cudaFree(countl);
cudaFree(startl);
cudaFree(maxxl);
cudaFree(maxyl);
cudaFree(maxzl);
cudaFree(minxl);
cudaFree(minyl);
cudaFree(minzl);
return 0;
}
|
5705878548d8bb142936f7c0d153cf2d5a314683.hip | // !!! This is a file automatically generated by hipify!!!
#include "cudacommon.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Timer.h"
#include "Utility.h"
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: December 15, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op)
{
;
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
//
// Arguments:
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: December 15, 2009
//
// Modifications:
//
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
;
}
| 5705878548d8bb142936f7c0d153cf2d5a314683.cu | #include "cudacommon.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "Timer.h"
#include "Utility.h"
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: December 15, 2009
//
// Modifications:
//
// ****************************************************************************
void addBenchmarkSpecOptions(OptionParser &op)
{
;
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
//
// Arguments:
//
// Returns: nothing
//
// Programmer: Kyle Spafford
// Creation: December 15, 2009
//
// Modifications:
//
// ****************************************************************************
void RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
;
}
|
4908b265407943498a55c37de6911002f03a35d1.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <hip/hip_runtime.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
/// Features
#define BLOOM 1
#define BLOOM2PASS 0
#define BILINEAR 1
#define USE_TEXTURES 1
/// Constant Settings
#define GAMMA 2.2f
#define EXPOSURE 1.5f
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
glm::vec3 eyePos;
glm::vec3 eyeNor;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex;
int texWidth, texHeight;
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
#if BLOOM
static glm::vec3 *dev_bloom1 = NULL;
static glm::vec3 *dev_bloom2 = NULL;
#endif
static int * dev_depth = NULL;
static int * dev_fragMutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__global__
void toneMap(const int w, const int h, glm::vec3 *framebuffer, const float gamma, const float exposure) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 col = framebuffer[index];
//col = glm::pow(col, glm::vec3(1.0f / gamma));
col = glm::vec3(1.0f) - glm::exp(-exposure * col);
col = glm::pow(col, glm::vec3(1.0f / gamma));
framebuffer[index] = col;
}
}
__device__ __host__
glm::vec3 bytesToRGB(const TextureData* textureData, const int idx) {
return glm::vec3(textureData[idx] / 255.f, textureData[idx + 1] / 255.f, textureData[idx + 2] / 255.f);
}
// get a texture color,
__device__ __host__
glm::vec3 texture2D(const int w, const int h, const TextureData* textureData, const glm::vec2 UV) {
glm::vec2 uv = glm::mod(UV, glm::vec2(1.0f)); // repeat UV
float xf = floor(uv.x * w);
float yf = floor(uv.y * h);
int x = (int)xf;
int y = (int)yf;
glm::vec3 col;
#if BILINEAR
float xw = uv.x * w - xf;
float yw = uv.y * h - yf;
glm::vec3 col00, col01, col10, col11;
col00 = bytesToRGB(textureData, 3 * (x + y * w));
col01 = bytesToRGB(textureData, 3 * (x + 1 + y * w));
col10 = bytesToRGB(textureData, 3 * (x + (y + 1) * w));
col11 = bytesToRGB(textureData, 3 * (x + 1 + (y + 1) * w));
col = (1.f - yw) * ((1.f - xw) * col00 + xw * col01) + yw * ((1.f - xw) * col10 + xw * col11);
#else
int idx = 3 * (x + y * w);
col = bytesToRGB(textureData, idx);
#endif
// apply gamma correction
col = glm::pow(col, glm::vec3(GAMMA));
return col;
}
#if BLOOM
// check for color components above 1, transfer to buffer with half res
__global__
void bloomHighPass(int wHalf, int hHalf, const glm::vec3 *framebuffer, glm::vec3 *bloombuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int bloomIdx = x + y * wHalf;
if (x < wHalf && y < hHalf) {
glm::vec3 col = glm::vec3(0);
// get avg of 4 px from framebuffer
for (int yOff = 0; yOff <= 1; yOff++) {
for (int xOff = 0; xOff <= 1; xOff++) {
int x2 = 2 * x + xOff;
int y2 = 2 * y + yOff;
int fbIdx = x2 + y2 * (2 * wHalf);
glm::vec3 fbCol = framebuffer[fbIdx];
float intensity = dot(fbCol, fbCol);
intensity -= 3.f; // threshold
intensity *= 0.5f; // stretch response curve
intensity = intensity < 0.f ? 0.f : intensity; // clamp
intensity = intensity > 1.f ? 1.f : intensity;
intensity = intensity * intensity * (3.f - 2.f * intensity); // smoothstep
col += 0.25f * intensity * fbCol;
}
}
bloombuffer[bloomIdx] = col;
}
}
__global__
void bloomHorizontalGather(int w, int h, const glm::vec3 *bufIn, glm::vec3 *bufOut) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + y * w;
if (x < w && y < h) {
float weight[5] = { 0.227027027f, 0.194594595f, 0.121621622f, 0.054054054f, 0.016216216f};
glm::vec3 col = bufIn[idx] * weight[0];
for (int i = 1; i < 5; i++) {
int prev = x - i;
int next = x + i;
prev = prev < 0 ? 0 : prev;
next = next >= w ? w - 1 : next;
col += weight[i] * bufIn[prev + y * w];
col += weight[i] * bufIn[next + y * w];
}
bufOut[idx] = col;
}
}
__global__
void bloomVerticalGather(int w, int h, const glm::vec3 *bufIn, glm::vec3 *bufOut) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + y * w;
if (x < w && y < h) {
float weight[5] = { 0.227027027f, 0.194594595f, 0.121621622f, 0.054054054f, 0.016216216f };
glm::vec3 col = bufIn[idx] * weight[0];
for (int i = 1; i < 5; i++) {
int prev = y - i;
int next = y + i;
prev = prev < 0 ? 0 : prev;
next = next >= h ? h - 1 : next;
col += weight[i] * bufIn[x + prev * w];
col += weight[i] * bufIn[x + next * w];
}
bufOut[idx] = col;
}
}
__global__
void bloomComposite(int w, int h, glm::vec3 *framebuffer, const glm::vec3 *bloombuffer) {
// going to bilinear upsample the bloomBuffer to get composite color
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + y * w;
if (x < w && y < h) {
// get 4 samples of bloom buffer and interpolate
// if the current px is odd, it's in latter half of x / y of pixel
float wx = x & 1 ? 0.75f : 0.25f;
float wy = y & 1 ? 0.75f : 0.25f;
int wb = w / 2;
int hb = h / 2;
int xb = x / 2;
int yb = y / 2;
// quadrant offset
int x0 = x & 1 ? (xb) : (xb > 0 ? xb - 1 : 0);
int x1 = x & 1 ? (xb >= (wb - 1) ? wb - 1 : xb + 1) : (xb);
int y0 = y & 1 ? (yb) : (yb > 0 ? yb - 1 : 0);
int y1 = y & 1 ? (yb >= (hb - 1) ? hb - 1 : yb + 1) : (yb);
glm::vec3 col00, col01, col10, col11;
col00 = bloombuffer[x0 + y0 * wb];
col01 = bloombuffer[x1 + y0 * wb];
col10 = bloombuffer[x0 + y1 * wb];
col11 = bloombuffer[x1 + y1 * wb];
// add the color, HDR is resolved by tone mapping
framebuffer[idx] += wy * (wx * col00 + (1.f - wx) * col01) + (1.f - wy) * (wx * col10 + (1.f - wx) * col11);
}
}
#endif
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
if (glm::length(fragmentBuffer[index].color) < 0.0001f) {
framebuffer[index] = glm::vec3(0);
return;
}
glm::vec3 lightDir[3] = {
glm::normalize(glm::vec3(1)),
glm::normalize(glm::vec3(-1, -0.1, -0.8)),
glm::normalize(glm::vec3(0, -1, 0))
};
float lightIntensity[3] = {
1.5f, 0.3f, 0.2f
};
glm::vec3 lightCol[3] = {
glm::vec3(1.0f, 0.9f, 0.7f),
glm::vec3(0.8f, 0.9f, 1.0f),
glm::vec3(0.4f, 1.0f, 0.5f)
};
glm::vec3 matDiffuse;
#if USE_TEXTURES
if (fragmentBuffer[index].dev_diffuseTex != NULL) {
matDiffuse = texture2D(fragmentBuffer[index].texWidth, fragmentBuffer[index].texHeight,
fragmentBuffer[index].dev_diffuseTex, fragmentBuffer[index].texcoord0);
matDiffuse = glm::max(matDiffuse, glm::vec3(0.05f));
}
else {
matDiffuse = glm::vec3(0.75f);
}
#else
matDiffuse = glm::vec3(0.75f);
#endif
// simple blinn phong
glm::vec3 col = glm::vec3(0);
glm::vec3 nor = fragmentBuffer[index].eyeNor;
for (int i = 0; i < 3; i++) {
glm::vec3 halfVec = glm::normalize(lightDir[i] - glm::normalize(fragmentBuffer[index].eyePos));
float lambert = glm::dot(nor, lightDir[i]);
lambert = lambert < 0 ? 0 : lambert;
float blinn = pow(glm::dot(halfVec, nor), 32.0f);
blinn = blinn < 0 ? 0 : blinn;
col += lightIntensity[i] * lightCol[i] * (glm::vec3(blinn) + matDiffuse * lambert);
}
framebuffer[index] = col;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
hipFree(dev_fragmentBuffer);
hipMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
hipFree(dev_framebuffer);
hipMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
hipMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
hipFree(dev_depth);
hipMalloc(&dev_depth, width * height * sizeof(int));
hipFree(dev_fragMutex);
hipMalloc(&dev_fragMutex, width * height * sizeof(int));
#if BLOOM
hipFree(dev_bloom1);
hipFree(dev_bloom2);
hipMalloc(&dev_bloom1, width * height / 4 * sizeof(glm::vec3));
hipMalloc(&dev_bloom2, width * height / 4 * sizeof(glm::vec3));
hipMemset(dev_bloom1, 0, width * height / 4 * sizeof(glm::vec3));
hipMemset(dev_bloom2, 0, width * height / 4 * sizeof(glm::vec3));
#endif
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
__global__
void initMutex(int w, int h, int * mutex) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
mutex[index] = 0;
}
}
/**
* kern function with support for stride to sometimes replace hipMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
hipMalloc(&dev_bufferView, bufferView.byteLength);
hipMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, hipMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
hipMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
hipMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
hipMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
hipMalloc(&dev_diffuseTex, s);
hipMemcpy(dev_diffuseTex, &image.image.at(0), s, hipMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
hipDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
hipMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, hipFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
hipFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
glm::vec4 posIn = glm::vec4(primitive.dev_position[vid], 1.0f);
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
glm::vec4 posTransformed = MVP * posIn;
// divide the pos by its w element to transform into NDC space
posTransformed /= posTransformed.w;
// Finally transform x and y to viewport space
posTransformed.x = 0.5f * (posTransformed.x + 1.0f) * width;
posTransformed.y = 0.5f * (-posTransformed.y + 1.0f) * height;
primitive.dev_verticesOut[vid].pos = posTransformed; // screen position
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
primitive.dev_verticesOut[vid].eyePos = glm::vec3(MV * posIn); // view position for lighting
#if USE_TEXTURES
if (primitive.dev_diffuseTex != NULL) {
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
}
else {
primitive.dev_verticesOut[vid].dev_diffuseTex = NULL;
}
#endif
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
}
}
// parallelize rasterization by triangle
__global__ void _rasterizeTriangle(const int numTris, const Primitive* primitives,
Fragment* frags, int* depthBuffer, const int width, const int height, int * mutex) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numTris) return;
Primitive pri = primitives[idx];
glm::vec3 tri[3] = { glm::vec3(pri.v[0].pos), glm::vec3(pri.v[1].pos), glm::vec3(pri.v[2].pos) };
glm::vec3 triNor[3] = { glm::vec3(pri.v[0].eyeNor), glm::vec3(pri.v[1].eyeNor), glm::vec3(pri.v[2].eyeNor) };
glm::vec3 triPos[3] = { glm::vec3(pri.v[0].eyePos), glm::vec3(pri.v[1].eyePos), glm::vec3(pri.v[2].eyePos) };
AABB aabb = getAABBForTriangle(tri);
for (int y = (int) aabb.min.y; y <= (int) aabb.max.y; y++) {
if (y < 0 || y > height) continue;
for (int x = (int) aabb.min.x; x <= (int) aabb.max.x; x++) {
if (x < 0 || x > width) continue;
glm::vec2 pt = glm::vec2(x, y);
int pxIdx = y * width + x;
glm::vec3 bary = calculateBarycentricCoordinate(tri, pt);
if (!isBarycentricCoordInBounds(bary)) {
//frags[pxIdx].color = glm::vec3(0);
continue;
}
float zPersp = getZAtCoordinatePersp(bary, tri);
glm::vec3 interNor = glm::normalize(getPerspectiveInterpolatedVector(bary, triNor, tri, zPersp));
glm::vec3 interPos = getPerspectiveInterpolatedVector(bary, triPos, tri, zPersp);
int depth = (int)( getZAtCoordinate(bary, tri) * INT_MAX);
bool isSet;
do {
isSet = (atomicCAS(&mutex[pxIdx], 0, 1) == 0);
if (isSet) {
if (depthBuffer[pxIdx] > depth) {
// replaced fragment with this triangle
frags[pxIdx].color = interNor;
frags[pxIdx].eyeNor = interNor;
frags[pxIdx].eyePos = interPos;
depthBuffer[pxIdx] = depth;
#if USE_TEXTURES
if (pri.v[0].dev_diffuseTex != NULL) {
glm::vec3 triUV[3] = {
glm::vec3(pri.v[0].texcoord0, 0.f),
glm::vec3(pri.v[1].texcoord0, 0.f),
glm::vec3(pri.v[2].texcoord0, 0.f)
};
glm::vec2 interUV = glm::vec2(getPerspectiveInterpolatedVector(bary, triUV, tri, zPersp));
frags[pxIdx].dev_diffuseTex = pri.v[0].dev_diffuseTex;
frags[pxIdx].texcoord0 = interUV;
frags[pxIdx].texHeight = pri.v[0].texHeight;
frags[pxIdx].texWidth = pri.v[0].texWidth;
}
else {
frags[pxIdx].dev_diffuseTex = NULL;
}
#endif
}
mutex[pxIdx] = 0;
}
} while (!isSet);
}
}
}
int ech = 0;
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
hipDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
hipMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
checkCUDAError("init depth");
initMutex << < blockCount2d, blockSize2d >> > (width, height, dev_fragMutex);
checkCUDAError("init mutex");
const int numThreads = 128;
dim3 triBlockCount = (totalNumPrimitives + numThreads - 1) / numThreads;
_rasterizeTriangle << < triBlockCount, numThreads >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer,
dev_depth, width, height, dev_fragMutex);
checkCUDAError("rasterize tris");
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
#if BLOOM
// make downsampled high pass
dim3 blockDownsampleCount2d((width / 2 - 1) / blockSize2d.x + 1,
(height / 2 - 1) / blockSize2d.y + 1);
bloomHighPass << < blockDownsampleCount2d, blockSize2d >> > (width / 2, height / 2, dev_framebuffer, dev_bloom1);
// apply gaussian
bloomHorizontalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom1, dev_bloom2);
bloomVerticalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom2, dev_bloom1);
#if BLOOM2PASS
bloomHorizontalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom1, dev_bloom2);
bloomVerticalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom2, dev_bloom1);
#endif
// upsample and composite
bloomComposite << < blockCount2d, blockSize2d >> > (width, height, dev_framebuffer, dev_bloom1);
#endif
// HDR tonemap
toneMap << <blockCount2d, blockSize2d >> >(width, height, dev_framebuffer, GAMMA, EXPOSURE);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
hipLaunchKernelGGL(( sendImageToPBO), dim3(blockCount2d), dim3(blockSize2d), 0, 0, pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
hipFree(p->dev_indices);
hipFree(p->dev_position);
hipFree(p->dev_normal);
hipFree(p->dev_texcoord0);
hipFree(p->dev_diffuseTex);
hipFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
hipFree(dev_primitives);
dev_primitives = NULL;
hipFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
hipFree(dev_framebuffer);
dev_framebuffer = NULL;
hipFree(dev_depth);
dev_depth = NULL;
hipFree(dev_fragMutex);
dev_fragMutex = NULL;
#if BLOOM
hipFree(dev_bloom1);
dev_bloom1 = NULL;
hipFree(dev_bloom2);
dev_bloom2 = NULL;
#endif
checkCUDAError("rasterize Free");
}
| 4908b265407943498a55c37de6911002f03a35d1.cu | /**
* @file rasterize.cu
* @brief CUDA-accelerated rasterization pipeline.
* @authors Skeleton code: Yining Karl Li, Kai Ninomiya, Shuai Shao (Shrek)
* @date 2012-2016
* @copyright University of Pennsylvania & STUDENT
*/
#include <cmath>
#include <cstdio>
#include <cuda.h>
#include <chrono>
#include <cuda_runtime.h>
#include <thrust/random.h>
#include <util/checkCUDAError.h>
#include <util/tiny_gltf_loader.h>
#include "rasterizeTools.h"
#include "rasterize.h"
#include <glm/gtc/quaternion.hpp>
#include <glm/gtc/matrix_transform.hpp>
/// Features
#define BLOOM 1
#define BLOOM2PASS 0
#define BILINEAR 1
#define USE_TEXTURES 1
/// Constant Settings
#define GAMMA 2.2f
#define EXPOSURE 1.5f
namespace {
typedef unsigned short VertexIndex;
typedef glm::vec3 VertexAttributePosition;
typedef glm::vec3 VertexAttributeNormal;
typedef glm::vec2 VertexAttributeTexcoord;
typedef unsigned char TextureData;
typedef unsigned char BufferByte;
enum PrimitiveType{
Point = 1,
Line = 2,
Triangle = 3
};
struct VertexOut {
glm::vec4 pos;
glm::vec3 eyePos;
glm::vec3 eyeNor;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex = NULL;
int texWidth, texHeight;
};
struct Primitive {
PrimitiveType primitiveType = Triangle; // C++ 11 init
VertexOut v[3];
};
struct Fragment {
glm::vec3 color;
glm::vec3 eyePos; // eye space position used for shading
glm::vec3 eyeNor;
glm::vec2 texcoord0;
TextureData* dev_diffuseTex;
int texWidth, texHeight;
};
struct PrimitiveDevBufPointers {
int primitiveMode; //from tinygltfloader macro
PrimitiveType primitiveType;
int numPrimitives;
int numIndices;
int numVertices;
// Vertex In, const after loaded
VertexIndex* dev_indices;
VertexAttributePosition* dev_position;
VertexAttributeNormal* dev_normal;
VertexAttributeTexcoord* dev_texcoord0;
// Materials, add more attributes when needed
TextureData* dev_diffuseTex;
int diffuseTexWidth;
int diffuseTexHeight;
// TextureData* dev_specularTex;
// TextureData* dev_normalTex;
// ...
// Vertex Out, vertex used for rasterization, this is changing every frame
VertexOut* dev_verticesOut;
};
}
static std::map<std::string, std::vector<PrimitiveDevBufPointers>> mesh2PrimitivesMap;
static int width = 0;
static int height = 0;
static int totalNumPrimitives = 0;
static Primitive *dev_primitives = NULL;
static Fragment *dev_fragmentBuffer = NULL;
static glm::vec3 *dev_framebuffer = NULL;
#if BLOOM
static glm::vec3 *dev_bloom1 = NULL;
static glm::vec3 *dev_bloom2 = NULL;
#endif
static int * dev_depth = NULL;
static int * dev_fragMutex = NULL;
/**
* Kernel that writes the image to the OpenGL PBO directly.
*/
__global__
void sendImageToPBO(uchar4 *pbo, int w, int h, glm::vec3 *image) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 color;
color.x = glm::clamp(image[index].x, 0.0f, 1.0f) * 255.0;
color.y = glm::clamp(image[index].y, 0.0f, 1.0f) * 255.0;
color.z = glm::clamp(image[index].z, 0.0f, 1.0f) * 255.0;
// Each thread writes one pixel location in the texture (textel)
pbo[index].w = 0;
pbo[index].x = color.x;
pbo[index].y = color.y;
pbo[index].z = color.z;
}
}
__global__
void toneMap(const int w, const int h, glm::vec3 *framebuffer, const float gamma, const float exposure) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
glm::vec3 col = framebuffer[index];
//col = glm::pow(col, glm::vec3(1.0f / gamma));
col = glm::vec3(1.0f) - glm::exp(-exposure * col);
col = glm::pow(col, glm::vec3(1.0f / gamma));
framebuffer[index] = col;
}
}
__device__ __host__
glm::vec3 bytesToRGB(const TextureData* textureData, const int idx) {
return glm::vec3(textureData[idx] / 255.f, textureData[idx + 1] / 255.f, textureData[idx + 2] / 255.f);
}
// get a texture color,
__device__ __host__
glm::vec3 texture2D(const int w, const int h, const TextureData* textureData, const glm::vec2 UV) {
glm::vec2 uv = glm::mod(UV, glm::vec2(1.0f)); // repeat UV
float xf = floor(uv.x * w);
float yf = floor(uv.y * h);
int x = (int)xf;
int y = (int)yf;
glm::vec3 col;
#if BILINEAR
float xw = uv.x * w - xf;
float yw = uv.y * h - yf;
glm::vec3 col00, col01, col10, col11;
col00 = bytesToRGB(textureData, 3 * (x + y * w));
col01 = bytesToRGB(textureData, 3 * (x + 1 + y * w));
col10 = bytesToRGB(textureData, 3 * (x + (y + 1) * w));
col11 = bytesToRGB(textureData, 3 * (x + 1 + (y + 1) * w));
col = (1.f - yw) * ((1.f - xw) * col00 + xw * col01) + yw * ((1.f - xw) * col10 + xw * col11);
#else
int idx = 3 * (x + y * w);
col = bytesToRGB(textureData, idx);
#endif
// apply gamma correction
col = glm::pow(col, glm::vec3(GAMMA));
return col;
}
#if BLOOM
// check for color components above 1, transfer to buffer with half res
__global__
void bloomHighPass(int wHalf, int hHalf, const glm::vec3 *framebuffer, glm::vec3 *bloombuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int bloomIdx = x + y * wHalf;
if (x < wHalf && y < hHalf) {
glm::vec3 col = glm::vec3(0);
// get avg of 4 px from framebuffer
for (int yOff = 0; yOff <= 1; yOff++) {
for (int xOff = 0; xOff <= 1; xOff++) {
int x2 = 2 * x + xOff;
int y2 = 2 * y + yOff;
int fbIdx = x2 + y2 * (2 * wHalf);
glm::vec3 fbCol = framebuffer[fbIdx];
float intensity = dot(fbCol, fbCol);
intensity -= 3.f; // threshold
intensity *= 0.5f; // stretch response curve
intensity = intensity < 0.f ? 0.f : intensity; // clamp
intensity = intensity > 1.f ? 1.f : intensity;
intensity = intensity * intensity * (3.f - 2.f * intensity); // smoothstep
col += 0.25f * intensity * fbCol;
}
}
bloombuffer[bloomIdx] = col;
}
}
__global__
void bloomHorizontalGather(int w, int h, const glm::vec3 *bufIn, glm::vec3 *bufOut) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + y * w;
if (x < w && y < h) {
float weight[5] = { 0.227027027f, 0.194594595f, 0.121621622f, 0.054054054f, 0.016216216f};
glm::vec3 col = bufIn[idx] * weight[0];
for (int i = 1; i < 5; i++) {
int prev = x - i;
int next = x + i;
prev = prev < 0 ? 0 : prev;
next = next >= w ? w - 1 : next;
col += weight[i] * bufIn[prev + y * w];
col += weight[i] * bufIn[next + y * w];
}
bufOut[idx] = col;
}
}
__global__
void bloomVerticalGather(int w, int h, const glm::vec3 *bufIn, glm::vec3 *bufOut) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + y * w;
if (x < w && y < h) {
float weight[5] = { 0.227027027f, 0.194594595f, 0.121621622f, 0.054054054f, 0.016216216f };
glm::vec3 col = bufIn[idx] * weight[0];
for (int i = 1; i < 5; i++) {
int prev = y - i;
int next = y + i;
prev = prev < 0 ? 0 : prev;
next = next >= h ? h - 1 : next;
col += weight[i] * bufIn[x + prev * w];
col += weight[i] * bufIn[x + next * w];
}
bufOut[idx] = col;
}
}
__global__
void bloomComposite(int w, int h, glm::vec3 *framebuffer, const glm::vec3 *bloombuffer) {
// going to bilinear upsample the bloomBuffer to get composite color
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int idx = x + y * w;
if (x < w && y < h) {
// get 4 samples of bloom buffer and interpolate
// if the current px is odd, it's in latter half of x / y of pixel
float wx = x & 1 ? 0.75f : 0.25f;
float wy = y & 1 ? 0.75f : 0.25f;
int wb = w / 2;
int hb = h / 2;
int xb = x / 2;
int yb = y / 2;
// quadrant offset
int x0 = x & 1 ? (xb) : (xb > 0 ? xb - 1 : 0);
int x1 = x & 1 ? (xb >= (wb - 1) ? wb - 1 : xb + 1) : (xb);
int y0 = y & 1 ? (yb) : (yb > 0 ? yb - 1 : 0);
int y1 = y & 1 ? (yb >= (hb - 1) ? hb - 1 : yb + 1) : (yb);
glm::vec3 col00, col01, col10, col11;
col00 = bloombuffer[x0 + y0 * wb];
col01 = bloombuffer[x1 + y0 * wb];
col10 = bloombuffer[x0 + y1 * wb];
col11 = bloombuffer[x1 + y1 * wb];
// add the color, HDR is resolved by tone mapping
framebuffer[idx] += wy * (wx * col00 + (1.f - wx) * col01) + (1.f - wy) * (wx * col10 + (1.f - wx) * col11);
}
}
#endif
/**
* Writes fragment colors to the framebuffer
*/
__global__
void render(int w, int h, Fragment *fragmentBuffer, glm::vec3 *framebuffer) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = x + (y * w);
if (x < w && y < h) {
if (glm::length(fragmentBuffer[index].color) < 0.0001f) {
framebuffer[index] = glm::vec3(0);
return;
}
glm::vec3 lightDir[3] = {
glm::normalize(glm::vec3(1)),
glm::normalize(glm::vec3(-1, -0.1, -0.8)),
glm::normalize(glm::vec3(0, -1, 0))
};
float lightIntensity[3] = {
1.5f, 0.3f, 0.2f
};
glm::vec3 lightCol[3] = {
glm::vec3(1.0f, 0.9f, 0.7f),
glm::vec3(0.8f, 0.9f, 1.0f),
glm::vec3(0.4f, 1.0f, 0.5f)
};
glm::vec3 matDiffuse;
#if USE_TEXTURES
if (fragmentBuffer[index].dev_diffuseTex != NULL) {
matDiffuse = texture2D(fragmentBuffer[index].texWidth, fragmentBuffer[index].texHeight,
fragmentBuffer[index].dev_diffuseTex, fragmentBuffer[index].texcoord0);
matDiffuse = glm::max(matDiffuse, glm::vec3(0.05f));
}
else {
matDiffuse = glm::vec3(0.75f);
}
#else
matDiffuse = glm::vec3(0.75f);
#endif
// simple blinn phong
glm::vec3 col = glm::vec3(0);
glm::vec3 nor = fragmentBuffer[index].eyeNor;
for (int i = 0; i < 3; i++) {
glm::vec3 halfVec = glm::normalize(lightDir[i] - glm::normalize(fragmentBuffer[index].eyePos));
float lambert = glm::dot(nor, lightDir[i]);
lambert = lambert < 0 ? 0 : lambert;
float blinn = pow(glm::dot(halfVec, nor), 32.0f);
blinn = blinn < 0 ? 0 : blinn;
col += lightIntensity[i] * lightCol[i] * (glm::vec3(blinn) + matDiffuse * lambert);
}
framebuffer[index] = col;
}
}
/**
* Called once at the beginning of the program to allocate memory.
*/
void rasterizeInit(int w, int h) {
width = w;
height = h;
cudaFree(dev_fragmentBuffer);
cudaMalloc(&dev_fragmentBuffer, width * height * sizeof(Fragment));
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
cudaFree(dev_framebuffer);
cudaMalloc(&dev_framebuffer, width * height * sizeof(glm::vec3));
cudaMemset(dev_framebuffer, 0, width * height * sizeof(glm::vec3));
cudaFree(dev_depth);
cudaMalloc(&dev_depth, width * height * sizeof(int));
cudaFree(dev_fragMutex);
cudaMalloc(&dev_fragMutex, width * height * sizeof(int));
#if BLOOM
cudaFree(dev_bloom1);
cudaFree(dev_bloom2);
cudaMalloc(&dev_bloom1, width * height / 4 * sizeof(glm::vec3));
cudaMalloc(&dev_bloom2, width * height / 4 * sizeof(glm::vec3));
cudaMemset(dev_bloom1, 0, width * height / 4 * sizeof(glm::vec3));
cudaMemset(dev_bloom2, 0, width * height / 4 * sizeof(glm::vec3));
#endif
checkCUDAError("rasterizeInit");
}
__global__
void initDepth(int w, int h, int * depth)
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
depth[index] = INT_MAX;
}
}
__global__
void initMutex(int w, int h, int * mutex) {
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x < w && y < h)
{
int index = x + (y * w);
mutex[index] = 0;
}
}
/**
* kern function with support for stride to sometimes replace cudaMemcpy
* One thread is responsible for copying one component
*/
__global__
void _deviceBufferCopy(int N, BufferByte* dev_dst, const BufferByte* dev_src, int n, int byteStride, int byteOffset, int componentTypeByteSize) {
// Attribute (vec3 position)
// component (3 * float)
// byte (4 * byte)
// id of component
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < N) {
int count = i / n;
int offset = i - count * n; // which component of the attribute
for (int j = 0; j < componentTypeByteSize; j++) {
dev_dst[count * componentTypeByteSize * n
+ offset * componentTypeByteSize
+ j]
=
dev_src[byteOffset
+ count * (byteStride == 0 ? componentTypeByteSize * n : byteStride)
+ offset * componentTypeByteSize
+ j];
}
}
}
__global__
void _nodeMatrixTransform(
int numVertices,
VertexAttributePosition* position,
VertexAttributeNormal* normal,
glm::mat4 MV, glm::mat3 MV_normal) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
position[vid] = glm::vec3(MV * glm::vec4(position[vid], 1.0f));
normal[vid] = glm::normalize(MV_normal * normal[vid]);
}
}
glm::mat4 getMatrixFromNodeMatrixVector(const tinygltf::Node & n) {
glm::mat4 curMatrix(1.0);
const std::vector<double> &m = n.matrix;
if (m.size() > 0) {
// matrix, copy it
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
curMatrix[i][j] = (float)m.at(4 * i + j);
}
}
} else {
// no matrix, use rotation, scale, translation
if (n.translation.size() > 0) {
curMatrix[3][0] = n.translation[0];
curMatrix[3][1] = n.translation[1];
curMatrix[3][2] = n.translation[2];
}
if (n.rotation.size() > 0) {
glm::mat4 R;
glm::quat q;
q[0] = n.rotation[0];
q[1] = n.rotation[1];
q[2] = n.rotation[2];
R = glm::mat4_cast(q);
curMatrix = curMatrix * R;
}
if (n.scale.size() > 0) {
curMatrix = curMatrix * glm::scale(glm::vec3(n.scale[0], n.scale[1], n.scale[2]));
}
}
return curMatrix;
}
void traverseNode (
std::map<std::string, glm::mat4> & n2m,
const tinygltf::Scene & scene,
const std::string & nodeString,
const glm::mat4 & parentMatrix
)
{
const tinygltf::Node & n = scene.nodes.at(nodeString);
glm::mat4 M = parentMatrix * getMatrixFromNodeMatrixVector(n);
n2m.insert(std::pair<std::string, glm::mat4>(nodeString, M));
auto it = n.children.begin();
auto itEnd = n.children.end();
for (; it != itEnd; ++it) {
traverseNode(n2m, scene, *it, M);
}
}
void rasterizeSetBuffers(const tinygltf::Scene & scene) {
totalNumPrimitives = 0;
std::map<std::string, BufferByte*> bufferViewDevPointers;
// 1. copy all `bufferViews` to device memory
{
std::map<std::string, tinygltf::BufferView>::const_iterator it(
scene.bufferViews.begin());
std::map<std::string, tinygltf::BufferView>::const_iterator itEnd(
scene.bufferViews.end());
for (; it != itEnd; it++) {
const std::string key = it->first;
const tinygltf::BufferView &bufferView = it->second;
if (bufferView.target == 0) {
continue; // Unsupported bufferView.
}
const tinygltf::Buffer &buffer = scene.buffers.at(bufferView.buffer);
BufferByte* dev_bufferView;
cudaMalloc(&dev_bufferView, bufferView.byteLength);
cudaMemcpy(dev_bufferView, &buffer.data.front() + bufferView.byteOffset, bufferView.byteLength, cudaMemcpyHostToDevice);
checkCUDAError("Set BufferView Device Mem");
bufferViewDevPointers.insert(std::make_pair(key, dev_bufferView));
}
}
// 2. for each mesh:
// for each primitive:
// build device buffer of indices, materail, and each attributes
// and store these pointers in a map
{
std::map<std::string, glm::mat4> nodeString2Matrix;
auto rootNodeNamesList = scene.scenes.at(scene.defaultScene);
{
auto it = rootNodeNamesList.begin();
auto itEnd = rootNodeNamesList.end();
for (; it != itEnd; ++it) {
traverseNode(nodeString2Matrix, scene, *it, glm::mat4(1.0f));
}
}
// parse through node to access mesh
auto itNode = nodeString2Matrix.begin();
auto itEndNode = nodeString2Matrix.end();
for (; itNode != itEndNode; ++itNode) {
const tinygltf::Node & N = scene.nodes.at(itNode->first);
const glm::mat4 & matrix = itNode->second;
const glm::mat3 & matrixNormal = glm::transpose(glm::inverse(glm::mat3(matrix)));
auto itMeshName = N.meshes.begin();
auto itEndMeshName = N.meshes.end();
for (; itMeshName != itEndMeshName; ++itMeshName) {
const tinygltf::Mesh & mesh = scene.meshes.at(*itMeshName);
auto res = mesh2PrimitivesMap.insert(std::pair<std::string, std::vector<PrimitiveDevBufPointers>>(mesh.name, std::vector<PrimitiveDevBufPointers>()));
std::vector<PrimitiveDevBufPointers> & primitiveVector = (res.first)->second;
// for each primitive
for (size_t i = 0; i < mesh.primitives.size(); i++) {
const tinygltf::Primitive &primitive = mesh.primitives[i];
if (primitive.indices.empty())
return;
// TODO: add new attributes for your PrimitiveDevBufPointers when you add new attributes
VertexIndex* dev_indices = NULL;
VertexAttributePosition* dev_position = NULL;
VertexAttributeNormal* dev_normal = NULL;
VertexAttributeTexcoord* dev_texcoord0 = NULL;
// ----------Indices-------------
const tinygltf::Accessor &indexAccessor = scene.accessors.at(primitive.indices);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(indexAccessor.bufferView);
BufferByte* dev_bufferView = bufferViewDevPointers.at(indexAccessor.bufferView);
// assume type is SCALAR for indices
int n = 1;
int numIndices = indexAccessor.count;
int componentTypeByteSize = sizeof(VertexIndex);
int byteLength = numIndices * n * componentTypeByteSize;
dim3 numThreadsPerBlock(128);
dim3 numBlocks((numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
cudaMalloc(&dev_indices, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
numIndices,
(BufferByte*)dev_indices,
dev_bufferView,
n,
indexAccessor.byteStride,
indexAccessor.byteOffset,
componentTypeByteSize);
checkCUDAError("Set Index Buffer");
// ---------Primitive Info-------
// Warning: LINE_STRIP is not supported in tinygltfloader
int numPrimitives;
PrimitiveType primitiveType;
switch (primitive.mode) {
case TINYGLTF_MODE_TRIANGLES:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices / 3;
break;
case TINYGLTF_MODE_TRIANGLE_STRIP:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_TRIANGLE_FAN:
primitiveType = PrimitiveType::Triangle;
numPrimitives = numIndices - 2;
break;
case TINYGLTF_MODE_LINE:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices / 2;
break;
case TINYGLTF_MODE_LINE_LOOP:
primitiveType = PrimitiveType::Line;
numPrimitives = numIndices + 1;
break;
case TINYGLTF_MODE_POINTS:
primitiveType = PrimitiveType::Point;
numPrimitives = numIndices;
break;
default:
// output error
break;
};
// ----------Attributes-------------
auto it(primitive.attributes.begin());
auto itEnd(primitive.attributes.end());
int numVertices = 0;
// for each attribute
for (; it != itEnd; it++) {
const tinygltf::Accessor &accessor = scene.accessors.at(it->second);
const tinygltf::BufferView &bufferView = scene.bufferViews.at(accessor.bufferView);
int n = 1;
if (accessor.type == TINYGLTF_TYPE_SCALAR) {
n = 1;
}
else if (accessor.type == TINYGLTF_TYPE_VEC2) {
n = 2;
}
else if (accessor.type == TINYGLTF_TYPE_VEC3) {
n = 3;
}
else if (accessor.type == TINYGLTF_TYPE_VEC4) {
n = 4;
}
BufferByte * dev_bufferView = bufferViewDevPointers.at(accessor.bufferView);
BufferByte ** dev_attribute = NULL;
numVertices = accessor.count;
int componentTypeByteSize;
// Note: since the type of our attribute array (dev_position) is static (float32)
// We assume the glTF model attribute type are 5126(FLOAT) here
if (it->first.compare("POSITION") == 0) {
componentTypeByteSize = sizeof(VertexAttributePosition) / n;
dev_attribute = (BufferByte**)&dev_position;
}
else if (it->first.compare("NORMAL") == 0) {
componentTypeByteSize = sizeof(VertexAttributeNormal) / n;
dev_attribute = (BufferByte**)&dev_normal;
}
else if (it->first.compare("TEXCOORD_0") == 0) {
componentTypeByteSize = sizeof(VertexAttributeTexcoord) / n;
dev_attribute = (BufferByte**)&dev_texcoord0;
}
std::cout << accessor.bufferView << " - " << it->second << " - " << it->first << '\n';
dim3 numThreadsPerBlock(128);
dim3 numBlocks((n * numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
int byteLength = numVertices * n * componentTypeByteSize;
cudaMalloc(dev_attribute, byteLength);
_deviceBufferCopy << <numBlocks, numThreadsPerBlock >> > (
n * numVertices,
*dev_attribute,
dev_bufferView,
n,
accessor.byteStride,
accessor.byteOffset,
componentTypeByteSize);
std::string msg = "Set Attribute Buffer: " + it->first;
checkCUDAError(msg.c_str());
}
// malloc for VertexOut
VertexOut* dev_vertexOut;
cudaMalloc(&dev_vertexOut, numVertices * sizeof(VertexOut));
checkCUDAError("Malloc VertexOut Buffer");
// ----------Materials-------------
// You can only worry about this part once you started to
// implement textures for your rasterizer
TextureData* dev_diffuseTex = NULL;
int diffuseTexWidth = 0;
int diffuseTexHeight = 0;
if (!primitive.material.empty()) {
const tinygltf::Material &mat = scene.materials.at(primitive.material);
printf("material.name = %s\n", mat.name.c_str());
if (mat.values.find("diffuse") != mat.values.end()) {
std::string diffuseTexName = mat.values.at("diffuse").string_value;
if (scene.textures.find(diffuseTexName) != scene.textures.end()) {
const tinygltf::Texture &tex = scene.textures.at(diffuseTexName);
if (scene.images.find(tex.source) != scene.images.end()) {
const tinygltf::Image &image = scene.images.at(tex.source);
size_t s = image.image.size() * sizeof(TextureData);
cudaMalloc(&dev_diffuseTex, s);
cudaMemcpy(dev_diffuseTex, &image.image.at(0), s, cudaMemcpyHostToDevice);
diffuseTexWidth = image.width;
diffuseTexHeight = image.height;
checkCUDAError("Set Texture Image data");
}
}
}
// TODO: write your code for other materails
// You may have to take a look at tinygltfloader
// You can also use the above code loading diffuse material as a start point
}
// ---------Node hierarchy transform--------
cudaDeviceSynchronize();
dim3 numBlocksNodeTransform((numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_nodeMatrixTransform << <numBlocksNodeTransform, numThreadsPerBlock >> > (
numVertices,
dev_position,
dev_normal,
matrix,
matrixNormal);
checkCUDAError("Node hierarchy transformation");
// at the end of the for loop of primitive
// push dev pointers to map
primitiveVector.push_back(PrimitiveDevBufPointers{
primitive.mode,
primitiveType,
numPrimitives,
numIndices,
numVertices,
dev_indices,
dev_position,
dev_normal,
dev_texcoord0,
dev_diffuseTex,
diffuseTexWidth,
diffuseTexHeight,
dev_vertexOut //VertexOut
});
totalNumPrimitives += numPrimitives;
} // for each primitive
} // for each mesh
} // for each node
}
// 3. Malloc for dev_primitives
{
cudaMalloc(&dev_primitives, totalNumPrimitives * sizeof(Primitive));
}
// Finally, cudaFree raw dev_bufferViews
{
std::map<std::string, BufferByte*>::const_iterator it(bufferViewDevPointers.begin());
std::map<std::string, BufferByte*>::const_iterator itEnd(bufferViewDevPointers.end());
//bufferViewDevPointers
for (; it != itEnd; it++) {
cudaFree(it->second);
}
checkCUDAError("Free BufferView Device Mem");
}
}
__global__
void _vertexTransformAndAssembly(
int numVertices,
PrimitiveDevBufPointers primitive,
glm::mat4 MVP, glm::mat4 MV, glm::mat3 MV_normal,
int width, int height) {
// vertex id
int vid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (vid < numVertices) {
glm::vec4 posIn = glm::vec4(primitive.dev_position[vid], 1.0f);
// Multiply the MVP matrix for each vertex position, this will transform everything into clipping space
glm::vec4 posTransformed = MVP * posIn;
// divide the pos by its w element to transform into NDC space
posTransformed /= posTransformed.w;
// Finally transform x and y to viewport space
posTransformed.x = 0.5f * (posTransformed.x + 1.0f) * width;
posTransformed.y = 0.5f * (-posTransformed.y + 1.0f) * height;
primitive.dev_verticesOut[vid].pos = posTransformed; // screen position
primitive.dev_verticesOut[vid].eyeNor = glm::normalize(MV_normal * primitive.dev_normal[vid]);
primitive.dev_verticesOut[vid].eyePos = glm::vec3(MV * posIn); // view position for lighting
#if USE_TEXTURES
if (primitive.dev_diffuseTex != NULL) {
primitive.dev_verticesOut[vid].dev_diffuseTex = primitive.dev_diffuseTex;
primitive.dev_verticesOut[vid].texcoord0 = primitive.dev_texcoord0[vid];
primitive.dev_verticesOut[vid].texHeight = primitive.diffuseTexHeight;
primitive.dev_verticesOut[vid].texWidth = primitive.diffuseTexWidth;
}
else {
primitive.dev_verticesOut[vid].dev_diffuseTex = NULL;
}
#endif
}
}
static int curPrimitiveBeginId = 0;
__global__
void _primitiveAssembly(int numIndices, int curPrimitiveBeginId, Primitive* dev_primitives, PrimitiveDevBufPointers primitive) {
// index id
int iid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (iid < numIndices) {
int pid; // id for cur primitives vector
if (primitive.primitiveMode == TINYGLTF_MODE_TRIANGLES) {
pid = iid / (int)primitive.primitiveType;
dev_primitives[pid + curPrimitiveBeginId].v[iid % (int)primitive.primitiveType]
= primitive.dev_verticesOut[primitive.dev_indices[iid]];
}
}
}
// parallelize rasterization by triangle
__global__ void _rasterizeTriangle(const int numTris, const Primitive* primitives,
Fragment* frags, int* depthBuffer, const int width, const int height, int * mutex) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= numTris) return;
Primitive pri = primitives[idx];
glm::vec3 tri[3] = { glm::vec3(pri.v[0].pos), glm::vec3(pri.v[1].pos), glm::vec3(pri.v[2].pos) };
glm::vec3 triNor[3] = { glm::vec3(pri.v[0].eyeNor), glm::vec3(pri.v[1].eyeNor), glm::vec3(pri.v[2].eyeNor) };
glm::vec3 triPos[3] = { glm::vec3(pri.v[0].eyePos), glm::vec3(pri.v[1].eyePos), glm::vec3(pri.v[2].eyePos) };
AABB aabb = getAABBForTriangle(tri);
for (int y = (int) aabb.min.y; y <= (int) aabb.max.y; y++) {
if (y < 0 || y > height) continue;
for (int x = (int) aabb.min.x; x <= (int) aabb.max.x; x++) {
if (x < 0 || x > width) continue;
glm::vec2 pt = glm::vec2(x, y);
int pxIdx = y * width + x;
glm::vec3 bary = calculateBarycentricCoordinate(tri, pt);
if (!isBarycentricCoordInBounds(bary)) {
//frags[pxIdx].color = glm::vec3(0);
continue;
}
float zPersp = getZAtCoordinatePersp(bary, tri);
glm::vec3 interNor = glm::normalize(getPerspectiveInterpolatedVector(bary, triNor, tri, zPersp));
glm::vec3 interPos = getPerspectiveInterpolatedVector(bary, triPos, tri, zPersp);
int depth = (int)( getZAtCoordinate(bary, tri) * INT_MAX);
bool isSet;
do {
isSet = (atomicCAS(&mutex[pxIdx], 0, 1) == 0);
if (isSet) {
if (depthBuffer[pxIdx] > depth) {
// replaced fragment with this triangle
frags[pxIdx].color = interNor;
frags[pxIdx].eyeNor = interNor;
frags[pxIdx].eyePos = interPos;
depthBuffer[pxIdx] = depth;
#if USE_TEXTURES
if (pri.v[0].dev_diffuseTex != NULL) {
glm::vec3 triUV[3] = {
glm::vec3(pri.v[0].texcoord0, 0.f),
glm::vec3(pri.v[1].texcoord0, 0.f),
glm::vec3(pri.v[2].texcoord0, 0.f)
};
glm::vec2 interUV = glm::vec2(getPerspectiveInterpolatedVector(bary, triUV, tri, zPersp));
frags[pxIdx].dev_diffuseTex = pri.v[0].dev_diffuseTex;
frags[pxIdx].texcoord0 = interUV;
frags[pxIdx].texHeight = pri.v[0].texHeight;
frags[pxIdx].texWidth = pri.v[0].texWidth;
}
else {
frags[pxIdx].dev_diffuseTex = NULL;
}
#endif
}
mutex[pxIdx] = 0;
}
} while (!isSet);
}
}
}
int ech = 0;
/**
* Perform rasterization.
*/
void rasterize(uchar4 *pbo, const glm::mat4 & MVP, const glm::mat4 & MV, const glm::mat3 MV_normal) {
int sideLength2d = 8;
dim3 blockSize2d(sideLength2d, sideLength2d);
dim3 blockCount2d((width - 1) / blockSize2d.x + 1,
(height - 1) / blockSize2d.y + 1);
// Vertex Process & primitive assembly
{
curPrimitiveBeginId = 0;
dim3 numThreadsPerBlock(128);
auto it = mesh2PrimitivesMap.begin();
auto itEnd = mesh2PrimitivesMap.end();
for (; it != itEnd; ++it) {
auto p = (it->second).begin(); // each primitive
auto pEnd = (it->second).end();
for (; p != pEnd; ++p) {
dim3 numBlocksForVertices((p->numVertices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
dim3 numBlocksForIndices((p->numIndices + numThreadsPerBlock.x - 1) / numThreadsPerBlock.x);
_vertexTransformAndAssembly << < numBlocksForVertices, numThreadsPerBlock >> >(p->numVertices, *p, MVP, MV, MV_normal, width, height);
checkCUDAError("Vertex Processing");
cudaDeviceSynchronize();
_primitiveAssembly << < numBlocksForIndices, numThreadsPerBlock >> >
(p->numIndices,
curPrimitiveBeginId,
dev_primitives,
*p);
checkCUDAError("Primitive Assembly");
curPrimitiveBeginId += p->numPrimitives;
}
}
checkCUDAError("Vertex Processing and Primitive Assembly");
}
cudaMemset(dev_fragmentBuffer, 0, width * height * sizeof(Fragment));
initDepth << <blockCount2d, blockSize2d >> >(width, height, dev_depth);
checkCUDAError("init depth");
initMutex << < blockCount2d, blockSize2d >> > (width, height, dev_fragMutex);
checkCUDAError("init mutex");
const int numThreads = 128;
dim3 triBlockCount = (totalNumPrimitives + numThreads - 1) / numThreads;
_rasterizeTriangle << < triBlockCount, numThreads >> > (totalNumPrimitives, dev_primitives, dev_fragmentBuffer,
dev_depth, width, height, dev_fragMutex);
checkCUDAError("rasterize tris");
// Copy depthbuffer colors into framebuffer
render << <blockCount2d, blockSize2d >> >(width, height, dev_fragmentBuffer, dev_framebuffer);
checkCUDAError("fragment shader");
#if BLOOM
// make downsampled high pass
dim3 blockDownsampleCount2d((width / 2 - 1) / blockSize2d.x + 1,
(height / 2 - 1) / blockSize2d.y + 1);
bloomHighPass << < blockDownsampleCount2d, blockSize2d >> > (width / 2, height / 2, dev_framebuffer, dev_bloom1);
// apply gaussian
bloomHorizontalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom1, dev_bloom2);
bloomVerticalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom2, dev_bloom1);
#if BLOOM2PASS
bloomHorizontalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom1, dev_bloom2);
bloomVerticalGather << < blockDownsampleCount2d, blockSize2d >> >(width / 2, height / 2, dev_bloom2, dev_bloom1);
#endif
// upsample and composite
bloomComposite << < blockCount2d, blockSize2d >> > (width, height, dev_framebuffer, dev_bloom1);
#endif
// HDR tonemap
toneMap << <blockCount2d, blockSize2d >> >(width, height, dev_framebuffer, GAMMA, EXPOSURE);
checkCUDAError("fragment shader");
// Copy framebuffer into OpenGL buffer for OpenGL previewing
sendImageToPBO<<<blockCount2d, blockSize2d>>>(pbo, width, height, dev_framebuffer);
checkCUDAError("copy render result to pbo");
}
/**
* Called once at the end of the program to free CUDA memory.
*/
void rasterizeFree() {
// deconstruct primitives attribute/indices device buffer
auto it(mesh2PrimitivesMap.begin());
auto itEnd(mesh2PrimitivesMap.end());
for (; it != itEnd; ++it) {
for (auto p = it->second.begin(); p != it->second.end(); ++p) {
cudaFree(p->dev_indices);
cudaFree(p->dev_position);
cudaFree(p->dev_normal);
cudaFree(p->dev_texcoord0);
cudaFree(p->dev_diffuseTex);
cudaFree(p->dev_verticesOut);
//TODO: release other attributes and materials
}
}
////////////
cudaFree(dev_primitives);
dev_primitives = NULL;
cudaFree(dev_fragmentBuffer);
dev_fragmentBuffer = NULL;
cudaFree(dev_framebuffer);
dev_framebuffer = NULL;
cudaFree(dev_depth);
dev_depth = NULL;
cudaFree(dev_fragMutex);
dev_fragMutex = NULL;
#if BLOOM
cudaFree(dev_bloom1);
dev_bloom1 = NULL;
cudaFree(dev_bloom2);
dev_bloom2 = NULL;
#endif
checkCUDAError("rasterize Free");
}
|
2bfaca4d2410676d8bdca7ac8afb531e79a7edc0.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
extern "C" {
#include "CUDA_header.h"
}
#define EPS 1E-3f
/* Divides 'dividend' by 'divisor', rounding up.
*/
static inline
int divisionCeil(int dividend, int divisor){
return (dividend + divisor - 1) / divisor;
}
// Returns the first power of 2 that is >= 'base'.
static inline
int higherEqualPow2(int base){
int result = 1;
while(result < base) result <<= 1;
return result;
}
/* Multi-block reduce.
* Accepts only vectors that are power of 2.
*/
__global__ static
void reduce(int *vec, int *result){
extern __shared__ int sdata[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = vec[idx];
__syncthreads();
// Reduce
for(int stride = blockDim.x >> 1; stride > 0; stride >>= 1){
if(threadIdx.x < stride)
sdata[threadIdx.x] += sdata[threadIdx.x+stride];
__syncthreads();
}
result[blockIdx.x] = sdata[0];
}
/*
* Collision Count procedure implemented in CUDA.
*
* This procedure parallelizes the sequential algorithm:
* for i in 0:N-2
* for j in i+1:N-1
* contacts += (bead[i] == bead[j])
* by performing just the outer 'for' in parallel.
*/
__global__
void count_contacts_cu(float3 *coords, int *result, int nCoords, int star){
int baseIdx = blockIdx.x * 1024;
int horizontalId = threadIdx.x + blockIdx.x * blockDim.x;
// We read our element in a register (surplus threads will read anything)
float3 buf = coords[horizontalId % nCoords];
// Read first 2 blocks into shared memory
extern __shared__ float3 sCoords[];
sCoords[threadIdx.x] = coords[ (baseIdx + threadIdx.x) % nCoords ];
sCoords[threadIdx.x + 1024] = coords[ (baseIdx + threadIdx.x + 1024) % nCoords ];
__syncthreads();
// Move our base index
baseIdx = baseIdx + 2048; // We could use modulus here, but doesn't seem necessary
// Count contacts
int iterations = 0;
int contacts = 0;
int offset = 1;
while(iterations < star){
// Do 1024 iterations, or maybe less
int limit = min(iterations + 1024, star);
for(; iterations < limit; iterations++){
// We want to check if sqrt( (Vx - Vy)(Vx - Vy) ) <= 1 Vx and Vy are float3 vectors
// Which is the same as (Vx - Vy)(Vx - Vy) <= 1 and the product is an inner product
// So we begin by taking the difference
float3 diff = make_float3(
buf.x - sCoords[threadIdx.x + offset].x,
buf.y - sCoords[threadIdx.x + offset].y,
buf.z - sCoords[threadIdx.x + offset].z
);
// horizontalId + iterations + 1 is the element we are comparing to
if(diff.x*diff.x + diff.y*diff.y + diff.z*diff.z == 1){
contacts += 1;
}
offset++;
}
// If offset == 1025, this means beads in shared memory need to be replaced
if(offset == 1025){
// Change blocks in shared memory when needed
// Unfortunately we need to synchronize threads here
__syncthreads();
// Rewrite older block with earlier block
sCoords[threadIdx.x] = sCoords[threadIdx.x + 1024];
// Read new block
sCoords[threadIdx.x + 1024] = coords[ (baseIdx + threadIdx.x) % nCoords ];
// We also have to sync here
__syncthreads();
// Move base index
baseIdx += 1024;
offset = 1;
}
}
// If the vector has an even number of elements
// Because of this, half of the elements must execute one more iteration
// Notice that the way the 'for...loop' above was implemented, when the
// code reach this point, the shared memory has valid elements for one
// more iteration, so we don't need to verify it again.
// Do one more iteration:
if(horizontalId < nCoords/2 && nCoords%2 == 0){
float3 diff = make_float3(
buf.x - sCoords[threadIdx.x + offset].x,
buf.y - sCoords[threadIdx.x + offset].y,
buf.z - sCoords[threadIdx.x + offset].z
);
// horizontalId + iterations + 1 is the element we are comparing to
if(diff.x*diff.x + diff.y*diff.y + diff.z*diff.z == 1){
contacts += 1;
}
offset++;
iterations++;
}
// Sync before reducing contacts on shared memory
__syncthreads();
// Fill shared memory with contacts
// We ignore contact from surplus threads
extern __shared__ int sdata[];
sdata[threadIdx.x] = contacts * (horizontalId < nCoords);
__syncthreads();
// Reduce 1024 elements
for(int stride = 512; stride > 0; stride >>= 1){
if(threadIdx.x < stride)
sdata[threadIdx.x] += sdata[threadIdx.x+stride];
__syncthreads();
}
// Export result
if(threadIdx.x == 0){
result[blockIdx.x] = sdata[0];
}
}
/* Gets the next cuda stream in the circular list of streams.
*/
static
hipStream_t get_next_stream(){
const int nStreams = 8;
static hipStream_t streams[nStreams];
static unsigned int launches = 0;
// Allocate cuda streams in the first execution
static int streamInit = 0;
if(streamInit == 0){
streamInit = 1;
for(int i = 0; i < nStreams; i++){
hipStreamCreate(&streams[i]);
}
}
launches++;
return streams[launches%nStreams];
}
/* Given a vector with 3D coordinates of points in the space,
* this function calculates the number of contacts among
* points, using CUDA-enable GPU.
*
* This functions just launches the kernel, returning a
* structure that can later be used to fetch the result
* back from the device memory.
*/
extern "C" struct CollisionCountPromise
count_contacts_launch(ElfFloat3d *vector, int size){
if(size == 0){
CollisionCountPromise retval = { NULL, NULL };
return retval;
}
float3 *d_vector;
int *d_result;
hipStream_t stream = get_next_stream();
// Allocate cuda vector for the 3D coordinates
hipMalloc(&d_vector, sizeof(float3) * size);
hipMemcpyAsync(d_vector, vector, sizeof(float3) * size, hipMemcpyHostToDevice, stream);
// Prepare kernel launch parameters
const int elemInShmem = 2048; // 2048 because we need 2 blocks of 1024 elements in shmem.
int nThreads = 1024; // We allocate maximum number of threads per block.
int nBlocks = divisionCeil(size, nThreads);
int nShMem = elemInShmem * sizeof(float3); // Shared memory required
// Calculate the number of iterations S* (S star)
// It is the number of iterations where --all-- threads execute work
int star;
if(size%2 == 0)
star = (size - 2)/2;
else star = (size - 1)/2;
// Allocate cuda memory for the number of contacts
// This will also be used as a working vector for reducing among blocks
int resultSize = higherEqualPow2(nBlocks);
hipMalloc(&d_result, sizeof(int) * resultSize);
hipMemsetAsync(d_result, 0, sizeof(int) * resultSize, stream); // Reset is needed due to size overestimation
// Finally launch kernels
hipLaunchKernelGGL(( count_contacts_cu), dim3(nBlocks), dim3(nThreads), nShMem, stream, d_vector, d_result, size, star);
// Reduce the result vector
nBlocks = resultSize/1024;
int workSize = resultSize;
int *d_toReduce = d_result;
int *d_reduced = (int *) d_vector;
while(true){
if(nBlocks == 0){
hipLaunchKernelGGL(( reduce), dim3(1), dim3(workSize), sizeof(int) * workSize, stream, d_toReduce, d_reduced);
break;
}
hipLaunchKernelGGL(( reduce), dim3(nBlocks), dim3(1024), sizeof(int) * 1024, stream, d_toReduce, d_reduced);
// For the next run, vectors should be swapped
int *aux = d_reduced;
d_reduced = d_toReduce;
d_toReduce = aux;
// For the next run, the workSize and nBlocks are lower
workSize = nBlocks;
nBlocks = workSize/1024;
}
const struct CollisionCountPromise ret = { d_toReduce, d_reduced };
return ret;
}
/* This procedure fetches the result from the call to the
* _launch correspondent.
* The pointers within the promise structure are freed, so
* it shouldn't be used anywhere after a call to this function.
*/
extern "C" int count_contacts_fetch(struct CollisionCountPromise promise){
if(promise.d_toReduce == NULL && promise.d_reduced == NULL)
return 0;
const int n = 1;
int result[n];
hipMemcpy(&result, promise.d_reduced, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(promise.d_toReduce);
hipFree(promise.d_reduced);
return result[0];
}
| 2bfaca4d2410676d8bdca7ac8afb531e79a7edc0.cu | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
extern "C" {
#include "CUDA_header.h"
}
#define EPS 1E-3f
/* Divides 'dividend' by 'divisor', rounding up.
*/
static inline
int divisionCeil(int dividend, int divisor){
return (dividend + divisor - 1) / divisor;
}
// Returns the first power of 2 that is >= 'base'.
static inline
int higherEqualPow2(int base){
int result = 1;
while(result < base) result <<= 1;
return result;
}
/* Multi-block reduce.
* Accepts only vectors that are power of 2.
*/
__global__ static
void reduce(int *vec, int *result){
extern __shared__ int sdata[];
int tid = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = vec[idx];
__syncthreads();
// Reduce
for(int stride = blockDim.x >> 1; stride > 0; stride >>= 1){
if(threadIdx.x < stride)
sdata[threadIdx.x] += sdata[threadIdx.x+stride];
__syncthreads();
}
result[blockIdx.x] = sdata[0];
}
/*
* Collision Count procedure implemented in CUDA.
*
* This procedure parallelizes the sequential algorithm:
* for i in 0:N-2
* for j in i+1:N-1
* contacts += (bead[i] == bead[j])
* by performing just the outer 'for' in parallel.
*/
__global__
void count_contacts_cu(float3 *coords, int *result, int nCoords, int star){
int baseIdx = blockIdx.x * 1024;
int horizontalId = threadIdx.x + blockIdx.x * blockDim.x;
// We read our element in a register (surplus threads will read anything)
float3 buf = coords[horizontalId % nCoords];
// Read first 2 blocks into shared memory
extern __shared__ float3 sCoords[];
sCoords[threadIdx.x] = coords[ (baseIdx + threadIdx.x) % nCoords ];
sCoords[threadIdx.x + 1024] = coords[ (baseIdx + threadIdx.x + 1024) % nCoords ];
__syncthreads();
// Move our base index
baseIdx = baseIdx + 2048; // We could use modulus here, but doesn't seem necessary
// Count contacts
int iterations = 0;
int contacts = 0;
int offset = 1;
while(iterations < star){
// Do 1024 iterations, or maybe less
int limit = min(iterations + 1024, star);
for(; iterations < limit; iterations++){
// We want to check if sqrt( (Vx - Vy)(Vx - Vy) ) <= 1 Vx and Vy are float3 vectors
// Which is the same as (Vx - Vy)(Vx - Vy) <= 1 and the product is an inner product
// So we begin by taking the difference
float3 diff = make_float3(
buf.x - sCoords[threadIdx.x + offset].x,
buf.y - sCoords[threadIdx.x + offset].y,
buf.z - sCoords[threadIdx.x + offset].z
);
// horizontalId + iterations + 1 is the element we are comparing to
if(diff.x*diff.x + diff.y*diff.y + diff.z*diff.z == 1){
contacts += 1;
}
offset++;
}
// If offset == 1025, this means beads in shared memory need to be replaced
if(offset == 1025){
// Change blocks in shared memory when needed
// Unfortunately we need to synchronize threads here
__syncthreads();
// Rewrite older block with earlier block
sCoords[threadIdx.x] = sCoords[threadIdx.x + 1024];
// Read new block
sCoords[threadIdx.x + 1024] = coords[ (baseIdx + threadIdx.x) % nCoords ];
// We also have to sync here
__syncthreads();
// Move base index
baseIdx += 1024;
offset = 1;
}
}
// If the vector has an even number of elements
// Because of this, half of the elements must execute one more iteration
// Notice that the way the 'for...loop' above was implemented, when the
// code reach this point, the shared memory has valid elements for one
// more iteration, so we don't need to verify it again.
// Do one more iteration:
if(horizontalId < nCoords/2 && nCoords%2 == 0){
float3 diff = make_float3(
buf.x - sCoords[threadIdx.x + offset].x,
buf.y - sCoords[threadIdx.x + offset].y,
buf.z - sCoords[threadIdx.x + offset].z
);
// horizontalId + iterations + 1 is the element we are comparing to
if(diff.x*diff.x + diff.y*diff.y + diff.z*diff.z == 1){
contacts += 1;
}
offset++;
iterations++;
}
// Sync before reducing contacts on shared memory
__syncthreads();
// Fill shared memory with contacts
// We ignore contact from surplus threads
extern __shared__ int sdata[];
sdata[threadIdx.x] = contacts * (horizontalId < nCoords);
__syncthreads();
// Reduce 1024 elements
for(int stride = 512; stride > 0; stride >>= 1){
if(threadIdx.x < stride)
sdata[threadIdx.x] += sdata[threadIdx.x+stride];
__syncthreads();
}
// Export result
if(threadIdx.x == 0){
result[blockIdx.x] = sdata[0];
}
}
/* Gets the next cuda stream in the circular list of streams.
*/
static
cudaStream_t get_next_stream(){
const int nStreams = 8;
static cudaStream_t streams[nStreams];
static unsigned int launches = 0;
// Allocate cuda streams in the first execution
static int streamInit = 0;
if(streamInit == 0){
streamInit = 1;
for(int i = 0; i < nStreams; i++){
cudaStreamCreate(&streams[i]);
}
}
launches++;
return streams[launches%nStreams];
}
/* Given a vector with 3D coordinates of points in the space,
* this function calculates the number of contacts among
* points, using CUDA-enable GPU.
*
* This functions just launches the kernel, returning a
* structure that can later be used to fetch the result
* back from the device memory.
*/
extern "C" struct CollisionCountPromise
count_contacts_launch(ElfFloat3d *vector, int size){
if(size == 0){
CollisionCountPromise retval = { NULL, NULL };
return retval;
}
float3 *d_vector;
int *d_result;
cudaStream_t stream = get_next_stream();
// Allocate cuda vector for the 3D coordinates
cudaMalloc(&d_vector, sizeof(float3) * size);
cudaMemcpyAsync(d_vector, vector, sizeof(float3) * size, cudaMemcpyHostToDevice, stream);
// Prepare kernel launch parameters
const int elemInShmem = 2048; // 2048 because we need 2 blocks of 1024 elements in shmem.
int nThreads = 1024; // We allocate maximum number of threads per block.
int nBlocks = divisionCeil(size, nThreads);
int nShMem = elemInShmem * sizeof(float3); // Shared memory required
// Calculate the number of iterations S* (S star)
// It is the number of iterations where --all-- threads execute work
int star;
if(size%2 == 0)
star = (size - 2)/2;
else star = (size - 1)/2;
// Allocate cuda memory for the number of contacts
// This will also be used as a working vector for reducing among blocks
int resultSize = higherEqualPow2(nBlocks);
cudaMalloc(&d_result, sizeof(int) * resultSize);
cudaMemsetAsync(d_result, 0, sizeof(int) * resultSize, stream); // Reset is needed due to size overestimation
// Finally launch kernels
count_contacts_cu<<<nBlocks, nThreads, nShMem, stream>>>(d_vector, d_result, size, star);
// Reduce the result vector
nBlocks = resultSize/1024;
int workSize = resultSize;
int *d_toReduce = d_result;
int *d_reduced = (int *) d_vector;
while(true){
if(nBlocks == 0){
reduce<<<1, workSize, sizeof(int) * workSize, stream>>>(d_toReduce, d_reduced);
break;
}
reduce<<<nBlocks, 1024, sizeof(int) * 1024, stream>>>(d_toReduce, d_reduced);
// For the next run, vectors should be swapped
int *aux = d_reduced;
d_reduced = d_toReduce;
d_toReduce = aux;
// For the next run, the workSize and nBlocks are lower
workSize = nBlocks;
nBlocks = workSize/1024;
}
const struct CollisionCountPromise ret = { d_toReduce, d_reduced };
return ret;
}
/* This procedure fetches the result from the call to the
* _launch correspondent.
* The pointers within the promise structure are freed, so
* it shouldn't be used anywhere after a call to this function.
*/
extern "C" int count_contacts_fetch(struct CollisionCountPromise promise){
if(promise.d_toReduce == NULL && promise.d_reduced == NULL)
return 0;
const int n = 1;
int result[n];
cudaMemcpy(&result, promise.d_reduced, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(promise.d_toReduce);
cudaFree(promise.d_reduced);
return result[0];
}
|
7b600e09a5dc87699be048ed0917c23b9346bc79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "julia.cu.h"
namespace {
struct hipComplex
{
real r;
real i;
__device__ hipComplex(real rr = 0.0, real ii = 0.0) : r{rr}, i{ii} { ; }
__device__ real magnitude2() const
{
return r*r + i*i;
}
__device__ hipComplex operator+(const hipComplex& other) const
{
return hipComplex{r + other.r, i + other.i};
}
__device__ hipComplex operator*(const hipComplex& other) const
{
return hipComplex{r*other.r - i*other.i, i*other.r + r*other.i};
}
};
__device__ int julia(real seedR, real seedI, real x, real y, size_t maxIter)
{
hipComplex c{seedR, seedI}; // -0.8; 0.156
hipComplex a{x, y};
int i=0;
for (; i<maxIter; ++i)
{
a = a*a + c;
if (a.magnitude2() > 1000)
{
break;
}
}
return i;
}
__global__ void juliaKernel(uchar4* ptr,
real seedR, real seedI,
int w, int h,
real x0, real x1, real y0, real y1,
const uchar4* gradient, size_t gradientSize)
{
const int px = threadIdx.x + blockIdx.x * blockDim.x;
const int py = threadIdx.y + blockIdx.y * blockDim.y;
const int offset = px + py*w;
const real x = x0 + (x1 - x0)*static_cast<real>(px)/w;
const real y = y0 + (y1 - y0)*static_cast<real>(py)/h;
if (px >= w || py >= h)
{
return;
}
uchar4& pixel = ptr[offset];
const size_t maxIter = gradientSize;
const int juliaValue = julia(seedR, seedI, x, y, maxIter);
const real c = static_cast<real>(juliaValue) / static_cast<real>(maxIter);
pixel = gradient[ static_cast<int>(static_cast<real>(gradientSize - 1) * c) ];
}
} // namespace
void renderJuliaSet(void* devPtr,
real seedR, real seedI,
int w, int h,
real x0, real x1, real y0, real y1,
const void* gradient, size_t gradientSize)
{
const unsigned int blockSize = 32;
dim3 grid{(w + blockSize-1)/blockSize, (h + blockSize - 1)/blockSize};
dim3 block{blockSize, blockSize};
hipLaunchKernelGGL(( juliaKernel), dim3(grid), dim3(block), 0, 0,
static_cast<uchar4*>(devPtr),
seedR, seedI,
w, h,
x0, x1, y0, y1,
static_cast<const uchar4*>(gradient), gradientSize);
}
| 7b600e09a5dc87699be048ed0917c23b9346bc79.cu | #include "julia.cu.h"
namespace {
struct cuComplex
{
real r;
real i;
__device__ cuComplex(real rr = 0.0, real ii = 0.0) : r{rr}, i{ii} { ; }
__device__ real magnitude2() const
{
return r*r + i*i;
}
__device__ cuComplex operator+(const cuComplex& other) const
{
return cuComplex{r + other.r, i + other.i};
}
__device__ cuComplex operator*(const cuComplex& other) const
{
return cuComplex{r*other.r - i*other.i, i*other.r + r*other.i};
}
};
__device__ int julia(real seedR, real seedI, real x, real y, size_t maxIter)
{
cuComplex c{seedR, seedI}; // -0.8; 0.156
cuComplex a{x, y};
int i=0;
for (; i<maxIter; ++i)
{
a = a*a + c;
if (a.magnitude2() > 1000)
{
break;
}
}
return i;
}
__global__ void juliaKernel(uchar4* ptr,
real seedR, real seedI,
int w, int h,
real x0, real x1, real y0, real y1,
const uchar4* gradient, size_t gradientSize)
{
const int px = threadIdx.x + blockIdx.x * blockDim.x;
const int py = threadIdx.y + blockIdx.y * blockDim.y;
const int offset = px + py*w;
const real x = x0 + (x1 - x0)*static_cast<real>(px)/w;
const real y = y0 + (y1 - y0)*static_cast<real>(py)/h;
if (px >= w || py >= h)
{
return;
}
uchar4& pixel = ptr[offset];
const size_t maxIter = gradientSize;
const int juliaValue = julia(seedR, seedI, x, y, maxIter);
const real c = static_cast<real>(juliaValue) / static_cast<real>(maxIter);
pixel = gradient[ static_cast<int>(static_cast<real>(gradientSize - 1) * c) ];
}
} // namespace
void renderJuliaSet(void* devPtr,
real seedR, real seedI,
int w, int h,
real x0, real x1, real y0, real y1,
const void* gradient, size_t gradientSize)
{
const unsigned int blockSize = 32;
dim3 grid{(w + blockSize-1)/blockSize, (h + blockSize - 1)/blockSize};
dim3 block{blockSize, blockSize};
juliaKernel<<<grid, block>>>
(static_cast<uchar4*>(devPtr),
seedR, seedI,
w, h,
x0, x1, y0, y1,
static_cast<const uchar4*>(gradient), gradientSize);
}
|
30c4ece9690e71513e4d05fa9df36654210ba770.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <vector>
#include <boost/tokenizer.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <random>
#include "timer.h"
using namespace std;
std::vector < std::vector< std::string > > parse_csv(const char* filepath)
{
std::vector< std::vector< std::string > > cells;
std::string line;
std::ifstream ifs(filepath);
// csv
while (std::getline(ifs, line)) {
std::vector< std::string > data;
// 1
boost::tokenizer< boost::escaped_list_separator< char > > tokens(line);
for (const std::string& token : tokens) {
data.push_back(token);
}
// 1
cells.push_back(data);
}
return cells;
}
int main(int argc, const char* argv[])
{
int N = atoi(argv[2]);
int counter = 0;
int ngpus = 4;
const size_t iBytes = N * sizeof(float);
static int RATIO = 1000000;
unsigned int t, travdirtime;
float **d_A = (float **)malloc(sizeof(float *) * ngpus);
float **d_B = (float **)malloc(sizeof(float *) * ngpus);
// float **d_C = (float **)malloc(sizeof(float *) * ngpus);
float **h_A = (float **)malloc(sizeof(float *) * ngpus);
float **h_B = (float **)malloc(sizeof(float *) * ngpus);
hipStream_t *stream = (hipStream_t *)malloc(sizeof(hipStream_t) * ngpus);
for (int i = 0; i < ngpus; i++)
{
hipSetDevice(i);
hipMalloc((void **) &d_A[i], iBytes);
hipMalloc((void **) &d_B[i], iBytes);
// hipMalloc((void **) &d_C[i], iBytes);
hipHostMalloc((void **) &h_A[i], iBytes);
hipHostMalloc((void **) &h_B[i], iBytes);
hipStreamCreate(&stream[i]);
}
/*
thrust::host_vector<float> h_x(N);
thrust::host_vector<float> h_y(N);
*/
const auto cells = parse_csv(argv[1]);
hipSetDevice(0);
counter = 0;
for (const auto& rows : cells) {
h_A[0][counter] = std::stof(rows[0]);
if(counter % RATIO == 0)
{
cout << counter / RATIO << "stored... " << endl;
}
counter = counter + 1;
}
hipDeviceEnablePeerAccess(0, 1);
// printf("> GPU%d enabled direct access to GPU%d\n", , j);
cout << "host to device" << endl;
start_timer(&t);
hipMemcpy(d_A[0], h_A[0], iBytes, hipMemcpyHostToDevice);
travdirtime = stop_timer(&t);
print_timer(travdirtime);
cout << "device to device" << endl;
start_timer(&t);
hipMemcpy(d_A[1], d_A[0], iBytes, hipMemcpyDeviceToDevice);
travdirtime = stop_timer(&t);
print_timer(travdirtime);
/*
const auto cells2 = parse_csv(argv[2]);
counter = 0;
for (const auto& rows : cells2) {
h_y[counter] = std::stof(rows[1]);
counter = counter + 1;
}
*/
}
| 30c4ece9690e71513e4d05fa9df36654210ba770.cu | #include <algorithm>
#include <cfloat>
#include <chrono>
#include <fstream>
#include <iostream>
#include <random>
#include <sstream>
#include <vector>
#include <boost/tokenizer.hpp>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <string>
#include <cstring>
#include <cctype>
#include <cstdlib>
#include <cstdio>
#include <iostream>
#include <fstream>
#include <bitset>
#include <random>
#include "timer.h"
using namespace std;
std::vector < std::vector< std::string > > parse_csv(const char* filepath)
{
std::vector< std::vector< std::string > > cells;
std::string line;
std::ifstream ifs(filepath);
// csvを走査
while (std::getline(ifs, line)) {
std::vector< std::string > data;
// 1行を走査
boost::tokenizer< boost::escaped_list_separator< char > > tokens(line);
for (const std::string& token : tokens) {
data.push_back(token);
}
// 1行読み込んだ結果を入れる
cells.push_back(data);
}
return cells;
}
int main(int argc, const char* argv[])
{
int N = atoi(argv[2]);
int counter = 0;
int ngpus = 4;
const size_t iBytes = N * sizeof(float);
static int RATIO = 1000000;
unsigned int t, travdirtime;
float **d_A = (float **)malloc(sizeof(float *) * ngpus);
float **d_B = (float **)malloc(sizeof(float *) * ngpus);
// float **d_C = (float **)malloc(sizeof(float *) * ngpus);
float **h_A = (float **)malloc(sizeof(float *) * ngpus);
float **h_B = (float **)malloc(sizeof(float *) * ngpus);
cudaStream_t *stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * ngpus);
for (int i = 0; i < ngpus; i++)
{
cudaSetDevice(i);
cudaMalloc((void **) &d_A[i], iBytes);
cudaMalloc((void **) &d_B[i], iBytes);
// cudaMalloc((void **) &d_C[i], iBytes);
cudaMallocHost((void **) &h_A[i], iBytes);
cudaMallocHost((void **) &h_B[i], iBytes);
cudaStreamCreate(&stream[i]);
}
/*
thrust::host_vector<float> h_x(N);
thrust::host_vector<float> h_y(N);
*/
const auto cells = parse_csv(argv[1]);
cudaSetDevice(0);
counter = 0;
for (const auto& rows : cells) {
h_A[0][counter] = std::stof(rows[0]);
if(counter % RATIO == 0)
{
cout << counter / RATIO << "stored... " << endl;
}
counter = counter + 1;
}
cudaDeviceEnablePeerAccess(0, 1);
// printf("> GPU%d enabled direct access to GPU%d\n", , j);
cout << "host to device" << endl;
start_timer(&t);
cudaMemcpy(d_A[0], h_A[0], iBytes, cudaMemcpyHostToDevice);
travdirtime = stop_timer(&t);
print_timer(travdirtime);
cout << "device to device" << endl;
start_timer(&t);
cudaMemcpy(d_A[1], d_A[0], iBytes, cudaMemcpyDeviceToDevice);
travdirtime = stop_timer(&t);
print_timer(travdirtime);
/*
const auto cells2 = parse_csv(argv[2]);
counter = 0;
for (const auto& rows : cells2) {
h_y[counter] = std::stof(rows[1]);
counter = counter + 1;
}
*/
}
|
048ea6de02a84f79ab9bc246532bf7268486c14b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <gmp.h>
#include <cassert>
#include "cgbn/cgbn.h"
#include "utility/support.h"
#include "mnt.h"
#define TPI 32
#define BITS 768
#define TPB 128 // the number of threads per block to launch (must be divisible by 32
typedef struct cubex_result {
std::vector<uint8_t*>* coeff0;
std::vector<uint8_t*>* coeff1;
std::vector<uint8_t*>* coeff2;
};
typedef struct {
cgbn_mem_t<BITS> x;
cgbn_mem_t<BITS> y;
cgbn_mem_t<BITS> m;
cgbn_mem_t<BITS> mul_lo;
cgbn_mem_t<BITS> mul_hi;
} my_instance_t;
typedef struct {
cgbn_mem_t<BITS> x;
cgbn_mem_t<BITS> y;
cgbn_mem_t<BITS> m;
cgbn_mem_t<BITS> result;
} add_instance_t;
typedef cgbn_context_t<TPI> context_t;
typedef cgbn_env_t<context_t, 768> env1024_t;
const uint64_t MNT4_INV = 0xf2044cfbe45e7fff;
const uint64_t MNT6_INV = 0xc90776e23fffffff;
void freeMem(std::vector<uint8_t*>* bigint_vector) {
for (int i = 0; i < bigint_vector->size(); i ++) {
free(bigint_vector->at(i));
}
}
// A linear algorithm that runs on the device. TODO: Pretty sad to not make use of the shuffle
// num is of size 2*n. modulus is of size n
// result is of size n.
__device__
void reduce_wide_device(uint32_t* result, uint32_t* num, uint32_t* modulus, uint32_t inv_32, int n) {
uint32_t *res = num;
// mp_limb_t res[2*n];
// mpn_mul_n(res, this->mont_repr.data, other.data, n);
/*
The Montgomery reduction here is based on Algorithm 14.32 in
Handbook of Applied Cryptography
<http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
*/
for (size_t i = 0; i < n; ++i)
{
uint32_t k = inv * res[i];
/* calculate res = res + k * mod * b^i */
mp_limb_t carryout = mpn_addmul_1(res+i, modulus, n, k);
carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout);
assert(carryout == 0);
}
if (mpn_cmp(res+n, modulus, n) >= 0)
{
const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus, n);
assert(borrow == 0);
}
mpn_copyi(result, res+n, n);
}
// num is of size 2*n. modulus is of size n
// result is of size n.
void reduce_wide(mp_limb_t* result, mp_limb_t* num, mp_limb_t* modulus, uint64_t inv, int n) {
mp_limb_t *res = num;
// mp_limb_t res[2*n];
// mpn_mul_n(res, this->mont_repr.data, other.data, n);
/*
The Montgomery reduction here is based on Algorithm 14.32 in
Handbook of Applied Cryptography
<http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
*/
for (size_t i = 0; i < n; ++i)
{
mp_limb_t k = inv * res[i];
/* calculate res = res + k * mod * b^i */
mp_limb_t carryout = mpn_addmul_1(res+i, modulus, n, k);
carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout);
assert(carryout == 0);
}
if (mpn_cmp(res+n, modulus, n) >= 0)
{
const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus, n);
assert(borrow == 0);
}
mpn_copyi(result, res+n, n);
}
__device__
void store_np0(env1024_t::cgbn_t& l, uint32_t np0) {
#if defined(__CUDA_ARCH__)
#warning "including limbs code"
l._limbs[10] = np0;
l._limbs[11] = 0xe45e7fffu;
printf("one %x, np-0 = %x\n", l._limbs[10], l._limbs[11]);
#endif
}
__global__ void mul_by11_kernel(add_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, acc_r, acc_r1, acc_r2, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_t res, res1;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
cgbn_set(bn1024_env, acc_r, a);
for (int i = 0; i < 10; i ++) {
cgbn_add(bn1024_env, acc_r1, acc_r, a);
if (cgbn_compare(bn1024_env, acc_r1, m) >= 0) {
cgbn_sub(bn1024_env, acc_r2, acc_r1, m);
cgbn_set(bn1024_env, acc_r, acc_r2);
} else {
cgbn_set(bn1024_env, acc_r, acc_r1);
}
}
cgbn_store(bn1024_env, &(problem_instances[my_instance].result), acc_r);
}
__global__ void mul_by13_kernel(add_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, acc_r, acc_r1, acc_r2, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_t res, res1;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
cgbn_set(bn1024_env, acc_r, a);
for (int i = 0; i < 12; i ++) {
cgbn_add(bn1024_env, acc_r1, acc_r, a);
if (cgbn_compare(bn1024_env, acc_r1, m) >= 0) {
cgbn_sub(bn1024_env, acc_r2, acc_r1, m);
cgbn_set(bn1024_env, acc_r, acc_r2);
} else {
cgbn_set(bn1024_env, acc_r, acc_r1);
}
}
cgbn_store(bn1024_env, &(problem_instances[my_instance].result), acc_r);
}
__global__ void add_kernel(add_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_t res, res1;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
cgbn_add(bn1024_env, res1, a, b);
if (cgbn_compare(bn1024_env, res1, m) >= 0) {
cgbn_sub(bn1024_env, res, res1, m);
} else {
cgbn_set(bn1024_env, res, res1);
}
cgbn_store(bn1024_env, &(problem_instances[my_instance].result), res);
}
__global__ void my_kernel(my_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_wide_t mul_wide;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
// np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m));
cgbn_mul_wide(bn1024_env, mul_wide, a, b);
cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low);
cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high);
}
void set_literal(cgbn_mem_t<BITS>& h, uint32_t literal, int num) {
for (int i = 1; i < num; i ++ ) {
h._limbs[i] = 0;
}
h._limbs[0] = literal;
}
void set_literal_limbs(cgbn_mem_t<BITS>& h, uint32_t literal, int num, int size) {
for (int i = 0; i < num; i ++ ) {
h._limbs[i] = literal;
}
for (int i = num; i < size; i ++ ) {
h._limbs[i] = 0;
}
}
void print_uint8_array(uint8_t* array, int size) {
for (int i = 0; i < size; i ++) {
printf("%02x", array[i]);
}
printf("\n");
}
std::vector<uint8_t*>* compute_mul_by11_cuda(std::vector<uint8_t*> a, uint8_t* input_m_base, int num_bytes) {
int num_elements = a.size();
add_instance_t *gpuInstances;
add_instance_t* instance_array = (add_instance_t*) malloc(sizeof(add_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
// printf("Copying instances to the GPU ...\n");
NEW_CUDA_CHECK(hipSetDevice(0));
NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(add_instance_t)*num_elements));
NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(add_instance_t)*num_elements, hipMemcpyHostToDevice));
int tpb = TPB;
// printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
// printf("\n Threads per instance = %d", tpi);
// printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
// printf("\n Number of blocks = %d", num_blocks);
hipLaunchKernelGGL(( mul_by11_kernel), dim3(num_blocks), dim3(TPB), 0, 0, gpuInstances, num_elements);
NEW_CUDA_CHECK(hipDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
// printf("Copying results back to CPU ...\n");
NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(add_instance_t)*num_elements, hipMemcpyDeviceToHost));
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t));
std::memcpy((void*)result, (const void*)instance_array[i].result._limbs, num_bytes);
res_vector->emplace_back(result);
}
free(instance_array);
hipFree(gpuInstances);
return res_vector;
}
std::vector<uint8_t*>* compute_mul_by13_cuda(std::vector<uint8_t*> a, uint8_t* input_m_base, int num_bytes) {
int num_elements = a.size();
add_instance_t *gpuInstances;
add_instance_t* instance_array = (add_instance_t*) malloc(sizeof(add_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
// printf("Copying instances to the GPU ...\n");
NEW_CUDA_CHECK(hipSetDevice(0));
NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(add_instance_t)*num_elements));
NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(add_instance_t)*num_elements, hipMemcpyHostToDevice));
int tpb = TPB;
// printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
// printf("\n Threads per instance = %d", tpi);
// printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
// printf("\n Number of blocks = %d", num_blocks);
hipLaunchKernelGGL(( mul_by13_kernel), dim3(num_blocks), dim3(TPB), 0, 0, gpuInstances, num_elements);
NEW_CUDA_CHECK(hipDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
// printf("Copying results back to CPU ...\n");
NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(add_instance_t)*num_elements, hipMemcpyDeviceToHost));
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t));
std::memcpy((void*)result, (const void*)instance_array[i].result._limbs, num_bytes);
res_vector->emplace_back(result);
}
free(instance_array);
hipFree(gpuInstances);
return res_vector;
}
std::vector<uint8_t*>* compute_addcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes) {
int num_elements = a.size();
add_instance_t *gpuInstances;
add_instance_t* instance_array = (add_instance_t*) malloc(sizeof(add_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
NEW_CUDA_CHECK(hipSetDevice(0));
NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(add_instance_t)*num_elements));
NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(add_instance_t)*num_elements, hipMemcpyHostToDevice));
int tpb = TPB;
// printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
// printf("\n Threads per instance = %d", tpi);
// printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
// printf("\n Number of blocks = %d", num_blocks);
hipLaunchKernelGGL(( add_kernel), dim3(num_blocks), dim3(TPB), 0, 0, gpuInstances, num_elements);
NEW_CUDA_CHECK(hipDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(add_instance_t)*num_elements, hipMemcpyDeviceToHost));
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t));
std::memcpy((void*)result, (const void*)instance_array[i].result._limbs, num_bytes);
res_vector->emplace_back(result);
}
free(instance_array);
hipFree(gpuInstances);
return res_vector;
}
std::vector<uint8_t*>* compute_newcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = a.size();
my_instance_t *gpuInstances;
my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
printf("\nCopying instances to the GPU ...\n");
NEW_CUDA_CHECK(hipSetDevice(0));
NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements));
NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, hipMemcpyHostToDevice));
int tpb = TPB;
printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
printf("\n Threads per instance = %d", tpi);
printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
printf("\n Number of blocks = %d", num_blocks);
hipLaunchKernelGGL(( my_kernel), dim3(num_blocks), dim3(TPB), 0, 0, gpuInstances, num_elements);
NEW_CUDA_CHECK(hipDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
printf("\nCopying results back to CPU ...\n");
NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, hipMemcpyDeviceToHost));
int num_limbs = num_bytes / 8;
printf("\n Setting num 64 limbs = %d", num_limbs);
mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2);
mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes);
//printf("\n Dumping modulus:");
//gmp_printf("%Nx\n", modulus, num_limbs);
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
// Reduce
std::memcpy((void*)num, (const void*)instance_array[i].mul_lo._limbs, num_bytes);
std::memcpy((void*) (num + num_limbs), (const void*)instance_array[i].mul_hi._limbs, num_bytes);
mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
// printf("\n Dumping 64 byte limb wide num [%d]:", i);
// gmp_printf("%Nx\n", num, num_limbs * 2);
reduce_wide(fresult, num, modulus, inv, num_limbs);
// store the result.
res_vector->emplace_back((uint8_t*)fresult);
}
free(num);
free(modulus);
free(instance_array);
hipFree(gpuInstances);
return res_vector;
}
std::pair<std::vector<uint8_t*>, std::vector<uint8_t*> >
compute_quadex_cuda(std::vector<uint8_t*> x0_a0,
std::vector<uint8_t*> x0_a1,
std::vector<uint8_t*> y0_a0,
std::vector<uint8_t*> y0_a1,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = x0_a0.size();
std::vector<uint8_t*>* x0_y0;
std::vector<uint8_t*>* x0_y1;
std::vector<uint8_t*>* x1_y0;
std::vector<uint8_t*>* x1_y1;
std::vector<uint8_t*>* res_a0;
std::vector<uint8_t*>* res_a1;
// Logic:
// var x0_y0 = fq_mul(x.a0, y.a0);
// var x1_y1 = fq_mul(x.a1, y.a1);
// var x1_y0 = fq_mul(x.a1, y.a0);
// var x0_y1 = fq_mul(x.a0, y.a1);
// return {
// a0: fq_add(a0_b0, fq_mul(a1_b1, alpha)),
// a1: fq_add(a1_b0, a0_b1)
// };
//
x0_y0 = compute_newcuda(x0_a0, y0_a0, input_m_base, num_bytes, inv);
x0_y1 = compute_newcuda(x0_a0, y0_a1, input_m_base, num_bytes, inv);
x1_y0 = compute_newcuda(x0_a1, y0_a0, input_m_base, num_bytes, inv);
x1_y1 = compute_newcuda(x0_a1, y0_a1, input_m_base, num_bytes, inv);
res_a1 = compute_addcuda(*x1_y0, *x0_y1, input_m_base, num_bytes);
res_a0 = compute_mul_by13_cuda(*x1_y1, input_m_base, num_bytes);
res_a0 = compute_addcuda(*x0_y0, *res_a0, input_m_base, num_bytes);
std::pair<std::vector<uint8_t*>, std::vector<uint8_t*> > res = std::make_pair(*res_a0, *res_a1);
return res;
}
struct cubex_result
compute_cubex_cuda(std::vector<uint8_t*> x0_a0,
std::vector<uint8_t*> x0_a1,
std::vector<uint8_t*> x0_a2,
std::vector<uint8_t*> y0_a0,
std::vector<uint8_t*> y0_a1,
std::vector<uint8_t*> y0_a2,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = x0_a0.size();
std::vector<uint8_t*>* x0_y0;
std::vector<uint8_t*>* x0_y1;
std::vector<uint8_t*>* x0_y2;
std::vector<uint8_t*>* x1_y0;
std::vector<uint8_t*>* x1_y1;
std::vector<uint8_t*>* x1_y2;
std::vector<uint8_t*>* x2_y0;
std::vector<uint8_t*>* x2_y1;
std::vector<uint8_t*>* x2_y2;
// Logic:
// var alpha = fq(11);
// var fq3_mul = (x, y) => {
// var x0_y0 = fq_mul(x.a0, y.a0);
// var x0_y1 = fq_mul(x.a0, y.a1);
// var x0_y2 = fq_mul(x.a0, y.a2);
//
// var x1_y0 = fq_mul(x.a1, y.a0);
// var x1_y1 = fq_mul(x.a1, y.a1);
// var x1_y2 = fq_mul(x.a1, y.a2);
//
// var x2_y0 = fq_mul(x.a2, y.a0);
// var x2_y1 = fq_mul(x.a2, y.a1);
// var x2_y2 = fq_mul(x.a2, y.a2);
//
// return {
// a0: fq_add(x0_y0, fq_mul(alpha, fq_add(x1_y2, x2_y1))),
// a1: fq_add(x0_y1, fq_add(x1_y0, fq_mul(alpha, x2_y2))),
// a2: fq_add(x0_y2, fq_add(x1_y1, x2_y0))
// };
// };
x0_y0 = compute_newcuda(x0_a0, y0_a0, input_m_base, num_bytes, inv);
x0_y1 = compute_newcuda(x0_a0, y0_a1, input_m_base, num_bytes, inv);
x0_y2 = compute_newcuda(x0_a0, y0_a2, input_m_base, num_bytes, inv);
x1_y0 = compute_newcuda(x0_a1, y0_a0, input_m_base, num_bytes, inv);
x1_y1 = compute_newcuda(x0_a1, y0_a1, input_m_base, num_bytes, inv);
x1_y2 = compute_newcuda(x0_a1, y0_a2, input_m_base, num_bytes, inv);
x2_y0 = compute_newcuda(x0_a2, y0_a0, input_m_base, num_bytes, inv);
x2_y1 = compute_newcuda(x0_a2, y0_a1, input_m_base, num_bytes, inv);
x2_y2 = compute_newcuda(x0_a2, y0_a2, input_m_base, num_bytes, inv);
std::vector<uint8_t*>* res_a0_tmp1;
std::vector<uint8_t*>* res_a0_tmp2;
struct cubex_result res;
res_a0_tmp1 = compute_addcuda(*x1_y2, *x2_y1, input_m_base, num_bytes);
res_a0_tmp2 = compute_mul_by11_cuda(*res_a0_tmp1, input_m_base, num_bytes);
res.coeff0 = compute_addcuda(*x0_y0, *res_a0_tmp2, input_m_base, num_bytes);
std::vector<uint8_t*>* res_a1_tmp1;
std::vector<uint8_t*>* res_a1_tmp2;
res_a1_tmp1 = compute_mul_by11_cuda(*x2_y2, input_m_base, num_bytes);
res_a1_tmp2 = compute_addcuda(*x1_y0, *res_a1_tmp1, input_m_base, num_bytes);
res.coeff1 = compute_addcuda(*x0_y1, *res_a1_tmp2, input_m_base, num_bytes);
std::vector<uint8_t*>* res_a2_tmp1;
res_a2_tmp1 = compute_addcuda(*x1_y1, *x2_y0, input_m_base, num_bytes);
res.coeff2 = compute_addcuda(*x0_y2, *res_a2_tmp1, input_m_base, num_bytes);
freeMem(x0_y0);
free(x0_y0);
freeMem(x0_y1);
free(x0_y1);
freeMem(x0_y2);
free(x0_y2);
freeMem(x1_y0);
free(x1_y0);
freeMem(x1_y1);
free(x1_y1);
freeMem(x1_y2);
free(x1_y2);
freeMem(x2_y0);
free(x2_y0);
freeMem(x2_y1);
free(x2_y1);
freeMem(x2_y2);
free(x2_y2);
freeMem(res_a0_tmp1);
free(res_a0_tmp1);
freeMem(res_a0_tmp2);
free(res_a0_tmp2);
freeMem(res_a1_tmp1);
free(res_a1_tmp1);
freeMem(res_a1_tmp2);
free(res_a1_tmp2);
freeMem(res_a2_tmp1);
free(res_a2_tmp1);
return res;
}
// TODO: Reduce the unneccesary copy here.
// TODO: Fix modulus copy operation.
void compute_array_sum_g1_gpu(std::vector<uint8_t*> x, std::vector<uint8_t*> y,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = x.size();
my_instance_t *gpuInstances;
my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
printf("\nCopying instances to the GPU ...\n");
NEW_CUDA_CHECK(hipSetDevice(0));
NEW_CUDA_CHECK(hipMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements));
NEW_CUDA_CHECK(hipMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, hipMemcpyHostToDevice));
int tpb = TPB;
printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
printf("\n Threads per instance = %d", tpi);
printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
printf("\n Number of blocks = %d", num_blocks);
hipLaunchKernelGGL(( my_kernel), dim3(num_blocks), dim3(TPB), 0, 0, gpuInstances, num_elements);
NEW_CUDA_CHECK(hipDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
printf("\nCopying results back to CPU ...\n");
NEW_CUDA_CHECK(hipMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, hipMemcpyDeviceToHost));
int num_limbs = num_bytes / 8;
printf("\n Setting num 64 limbs = %d", num_limbs);
mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2);
mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes);
//printf("\n Dumping modulus:");
//gmp_printf("%Nx\n", modulus, num_limbs);
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
// Reduce
std::memcpy((void*)num, (const void*)instance_array[i].mul_lo._limbs, num_bytes);
std::memcpy((void*) (num + num_limbs), (const void*)instance_array[i].mul_hi._limbs, num_bytes);
mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
// printf("\n Dumping 64 byte limb wide num [%d]:", i);
// gmp_printf("%Nx\n", num, num_limbs * 2);
reduce_wide(fresult, num, modulus, inv, num_limbs);
// store the result.
res_vector->emplace_back((uint8_t*)fresult);
}
free(num);
free(modulus);
free(instance_array);
hipFree(gpuInstances);
return res_vector;
}
G1Point* compute_array_sum_g1(std::vector<uint8_t*> x, std::vector<uint8_t*> y,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
//
int num_elements = x0_a0.size();
int skip = 1;
// parallel add.
while (skip < num_elements) {
compute_cuda_sum_g1_internal(x, y, skip, input_m_base, num_bytes, inv);
skip = skip * 2;
}
// result is in x[0], y[0]
// TODO: return result.
}
| 048ea6de02a84f79ab9bc246532bf7268486c14b.cu | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <cuda.h>
#include <gmp.h>
#include <cassert>
#include "cgbn/cgbn.h"
#include "utility/support.h"
#include "mnt.h"
#define TPI 32
#define BITS 768
#define TPB 128 // the number of threads per block to launch (must be divisible by 32
typedef struct cubex_result {
std::vector<uint8_t*>* coeff0;
std::vector<uint8_t*>* coeff1;
std::vector<uint8_t*>* coeff2;
};
typedef struct {
cgbn_mem_t<BITS> x;
cgbn_mem_t<BITS> y;
cgbn_mem_t<BITS> m;
cgbn_mem_t<BITS> mul_lo;
cgbn_mem_t<BITS> mul_hi;
} my_instance_t;
typedef struct {
cgbn_mem_t<BITS> x;
cgbn_mem_t<BITS> y;
cgbn_mem_t<BITS> m;
cgbn_mem_t<BITS> result;
} add_instance_t;
typedef cgbn_context_t<TPI> context_t;
typedef cgbn_env_t<context_t, 768> env1024_t;
const uint64_t MNT4_INV = 0xf2044cfbe45e7fff;
const uint64_t MNT6_INV = 0xc90776e23fffffff;
void freeMem(std::vector<uint8_t*>* bigint_vector) {
for (int i = 0; i < bigint_vector->size(); i ++) {
free(bigint_vector->at(i));
}
}
// A linear algorithm that runs on the device. TODO: Pretty sad to not make use of the shuffle
// num is of size 2*n. modulus is of size n
// result is of size n.
__device__
void reduce_wide_device(uint32_t* result, uint32_t* num, uint32_t* modulus, uint32_t inv_32, int n) {
uint32_t *res = num;
// mp_limb_t res[2*n];
// mpn_mul_n(res, this->mont_repr.data, other.data, n);
/*
The Montgomery reduction here is based on Algorithm 14.32 in
Handbook of Applied Cryptography
<http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
*/
for (size_t i = 0; i < n; ++i)
{
uint32_t k = inv * res[i];
/* calculate res = res + k * mod * b^i */
mp_limb_t carryout = mpn_addmul_1(res+i, modulus, n, k);
carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout);
assert(carryout == 0);
}
if (mpn_cmp(res+n, modulus, n) >= 0)
{
const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus, n);
assert(borrow == 0);
}
mpn_copyi(result, res+n, n);
}
// num is of size 2*n. modulus is of size n
// result is of size n.
void reduce_wide(mp_limb_t* result, mp_limb_t* num, mp_limb_t* modulus, uint64_t inv, int n) {
mp_limb_t *res = num;
// mp_limb_t res[2*n];
// mpn_mul_n(res, this->mont_repr.data, other.data, n);
/*
The Montgomery reduction here is based on Algorithm 14.32 in
Handbook of Applied Cryptography
<http://cacr.uwaterloo.ca/hac/about/chap14.pdf>.
*/
for (size_t i = 0; i < n; ++i)
{
mp_limb_t k = inv * res[i];
/* calculate res = res + k * mod * b^i */
mp_limb_t carryout = mpn_addmul_1(res+i, modulus, n, k);
carryout = mpn_add_1(res+n+i, res+n+i, n-i, carryout);
assert(carryout == 0);
}
if (mpn_cmp(res+n, modulus, n) >= 0)
{
const mp_limb_t borrow = mpn_sub(res+n, res+n, n, modulus, n);
assert(borrow == 0);
}
mpn_copyi(result, res+n, n);
}
__device__
void store_np0(env1024_t::cgbn_t& l, uint32_t np0) {
#if defined(__CUDA_ARCH__)
#warning "including limbs code"
l._limbs[10] = np0;
l._limbs[11] = 0xe45e7fffu;
printf("one %x, np-0 = %x\n", l._limbs[10], l._limbs[11]);
#endif
}
__global__ void mul_by11_kernel(add_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, acc_r, acc_r1, acc_r2, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_t res, res1;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
cgbn_set(bn1024_env, acc_r, a);
for (int i = 0; i < 10; i ++) {
cgbn_add(bn1024_env, acc_r1, acc_r, a);
if (cgbn_compare(bn1024_env, acc_r1, m) >= 0) {
cgbn_sub(bn1024_env, acc_r2, acc_r1, m);
cgbn_set(bn1024_env, acc_r, acc_r2);
} else {
cgbn_set(bn1024_env, acc_r, acc_r1);
}
}
cgbn_store(bn1024_env, &(problem_instances[my_instance].result), acc_r);
}
__global__ void mul_by13_kernel(add_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, acc_r, acc_r1, acc_r2, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_t res, res1;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
cgbn_set(bn1024_env, acc_r, a);
for (int i = 0; i < 12; i ++) {
cgbn_add(bn1024_env, acc_r1, acc_r, a);
if (cgbn_compare(bn1024_env, acc_r1, m) >= 0) {
cgbn_sub(bn1024_env, acc_r2, acc_r1, m);
cgbn_set(bn1024_env, acc_r, acc_r2);
} else {
cgbn_set(bn1024_env, acc_r, acc_r1);
}
}
cgbn_store(bn1024_env, &(problem_instances[my_instance].result), acc_r);
}
__global__ void add_kernel(add_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_t res, res1;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
cgbn_add(bn1024_env, res1, a, b);
if (cgbn_compare(bn1024_env, res1, m) >= 0) {
cgbn_sub(bn1024_env, res, res1, m);
} else {
cgbn_set(bn1024_env, res, res1);
}
cgbn_store(bn1024_env, &(problem_instances[my_instance].result), res);
}
__global__ void my_kernel(my_instance_t *problem_instances, uint32_t instance_count) {
context_t bn_context; // create a CGBN context
env1024_t bn1024_env(bn_context); // construct a bn environment for 1024 bit math
env1024_t::cgbn_t a, b, m; // three 1024-bit values (spread across a warp)
env1024_t::cgbn_wide_t mul_wide;
// uint32_t np0;
int32_t my_instance=(blockIdx.x*blockDim.x + threadIdx.x)/TPI; // determine my instance number
if(my_instance>=instance_count) return; // return if my_instance is not valid
cgbn_load(bn1024_env, a, &(problem_instances[my_instance]).x);
cgbn_load(bn1024_env, b, &(problem_instances[my_instance]).y);
cgbn_load(bn1024_env, m, &(problem_instances[my_instance]).m);
// np0 = -cgbn_binary_inverse_ui32(bn1024_env, cgbn_get_ui32(bn1024_env, m));
cgbn_mul_wide(bn1024_env, mul_wide, a, b);
cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_lo), mul_wide._low);
cgbn_store(bn1024_env, &(problem_instances[my_instance].mul_hi), mul_wide._high);
}
void set_literal(cgbn_mem_t<BITS>& h, uint32_t literal, int num) {
for (int i = 1; i < num; i ++ ) {
h._limbs[i] = 0;
}
h._limbs[0] = literal;
}
void set_literal_limbs(cgbn_mem_t<BITS>& h, uint32_t literal, int num, int size) {
for (int i = 0; i < num; i ++ ) {
h._limbs[i] = literal;
}
for (int i = num; i < size; i ++ ) {
h._limbs[i] = 0;
}
}
void print_uint8_array(uint8_t* array, int size) {
for (int i = 0; i < size; i ++) {
printf("%02x", array[i]);
}
printf("\n");
}
std::vector<uint8_t*>* compute_mul_by11_cuda(std::vector<uint8_t*> a, uint8_t* input_m_base, int num_bytes) {
int num_elements = a.size();
add_instance_t *gpuInstances;
add_instance_t* instance_array = (add_instance_t*) malloc(sizeof(add_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
// printf("Copying instances to the GPU ...\n");
NEW_CUDA_CHECK(cudaSetDevice(0));
NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(add_instance_t)*num_elements));
NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(add_instance_t)*num_elements, cudaMemcpyHostToDevice));
int tpb = TPB;
// printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
// printf("\n Threads per instance = %d", tpi);
// printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
// printf("\n Number of blocks = %d", num_blocks);
mul_by11_kernel<<<num_blocks, TPB>>>(gpuInstances, num_elements);
NEW_CUDA_CHECK(cudaDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
// printf("Copying results back to CPU ...\n");
NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(add_instance_t)*num_elements, cudaMemcpyDeviceToHost));
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t));
std::memcpy((void*)result, (const void*)instance_array[i].result._limbs, num_bytes);
res_vector->emplace_back(result);
}
free(instance_array);
cudaFree(gpuInstances);
return res_vector;
}
std::vector<uint8_t*>* compute_mul_by13_cuda(std::vector<uint8_t*> a, uint8_t* input_m_base, int num_bytes) {
int num_elements = a.size();
add_instance_t *gpuInstances;
add_instance_t* instance_array = (add_instance_t*) malloc(sizeof(add_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
// printf("Copying instances to the GPU ...\n");
NEW_CUDA_CHECK(cudaSetDevice(0));
NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(add_instance_t)*num_elements));
NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(add_instance_t)*num_elements, cudaMemcpyHostToDevice));
int tpb = TPB;
// printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
// printf("\n Threads per instance = %d", tpi);
// printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
// printf("\n Number of blocks = %d", num_blocks);
mul_by13_kernel<<<num_blocks, TPB>>>(gpuInstances, num_elements);
NEW_CUDA_CHECK(cudaDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
// printf("Copying results back to CPU ...\n");
NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(add_instance_t)*num_elements, cudaMemcpyDeviceToHost));
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t));
std::memcpy((void*)result, (const void*)instance_array[i].result._limbs, num_bytes);
res_vector->emplace_back(result);
}
free(instance_array);
cudaFree(gpuInstances);
return res_vector;
}
std::vector<uint8_t*>* compute_addcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes) {
int num_elements = a.size();
add_instance_t *gpuInstances;
add_instance_t* instance_array = (add_instance_t*) malloc(sizeof(add_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
NEW_CUDA_CHECK(cudaSetDevice(0));
NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(add_instance_t)*num_elements));
NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(add_instance_t)*num_elements, cudaMemcpyHostToDevice));
int tpb = TPB;
// printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
// printf("\n Threads per instance = %d", tpi);
// printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
// printf("\n Number of blocks = %d", num_blocks);
add_kernel<<<num_blocks, TPB>>>(gpuInstances, num_elements);
NEW_CUDA_CHECK(cudaDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(add_instance_t)*num_elements, cudaMemcpyDeviceToHost));
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
uint8_t* result = (uint8_t*) malloc(num_bytes * sizeof(uint8_t));
std::memcpy((void*)result, (const void*)instance_array[i].result._limbs, num_bytes);
res_vector->emplace_back(result);
}
free(instance_array);
cudaFree(gpuInstances);
return res_vector;
}
std::vector<uint8_t*>* compute_newcuda(std::vector<uint8_t*> a, std::vector<uint8_t*> b, uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = a.size();
my_instance_t *gpuInstances;
my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
printf("\nCopying instances to the GPU ...\n");
NEW_CUDA_CHECK(cudaSetDevice(0));
NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements));
NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, cudaMemcpyHostToDevice));
int tpb = TPB;
printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
printf("\n Threads per instance = %d", tpi);
printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
printf("\n Number of blocks = %d", num_blocks);
my_kernel<<<num_blocks, TPB>>>(gpuInstances, num_elements);
NEW_CUDA_CHECK(cudaDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
printf("\nCopying results back to CPU ...\n");
NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, cudaMemcpyDeviceToHost));
int num_limbs = num_bytes / 8;
printf("\n Setting num 64 limbs = %d", num_limbs);
mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2);
mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes);
//printf("\n Dumping modulus:");
//gmp_printf("%Nx\n", modulus, num_limbs);
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
// Reduce
std::memcpy((void*)num, (const void*)instance_array[i].mul_lo._limbs, num_bytes);
std::memcpy((void*) (num + num_limbs), (const void*)instance_array[i].mul_hi._limbs, num_bytes);
mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
// printf("\n Dumping 64 byte limb wide num [%d]:", i);
// gmp_printf("%Nx\n", num, num_limbs * 2);
reduce_wide(fresult, num, modulus, inv, num_limbs);
// store the result.
res_vector->emplace_back((uint8_t*)fresult);
}
free(num);
free(modulus);
free(instance_array);
cudaFree(gpuInstances);
return res_vector;
}
std::pair<std::vector<uint8_t*>, std::vector<uint8_t*> >
compute_quadex_cuda(std::vector<uint8_t*> x0_a0,
std::vector<uint8_t*> x0_a1,
std::vector<uint8_t*> y0_a0,
std::vector<uint8_t*> y0_a1,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = x0_a0.size();
std::vector<uint8_t*>* x0_y0;
std::vector<uint8_t*>* x0_y1;
std::vector<uint8_t*>* x1_y0;
std::vector<uint8_t*>* x1_y1;
std::vector<uint8_t*>* res_a0;
std::vector<uint8_t*>* res_a1;
// Logic:
// var x0_y0 = fq_mul(x.a0, y.a0);
// var x1_y1 = fq_mul(x.a1, y.a1);
// var x1_y0 = fq_mul(x.a1, y.a0);
// var x0_y1 = fq_mul(x.a0, y.a1);
// return {
// a0: fq_add(a0_b0, fq_mul(a1_b1, alpha)),
// a1: fq_add(a1_b0, a0_b1)
// };
//
x0_y0 = compute_newcuda(x0_a0, y0_a0, input_m_base, num_bytes, inv);
x0_y1 = compute_newcuda(x0_a0, y0_a1, input_m_base, num_bytes, inv);
x1_y0 = compute_newcuda(x0_a1, y0_a0, input_m_base, num_bytes, inv);
x1_y1 = compute_newcuda(x0_a1, y0_a1, input_m_base, num_bytes, inv);
res_a1 = compute_addcuda(*x1_y0, *x0_y1, input_m_base, num_bytes);
res_a0 = compute_mul_by13_cuda(*x1_y1, input_m_base, num_bytes);
res_a0 = compute_addcuda(*x0_y0, *res_a0, input_m_base, num_bytes);
std::pair<std::vector<uint8_t*>, std::vector<uint8_t*> > res = std::make_pair(*res_a0, *res_a1);
return res;
}
struct cubex_result
compute_cubex_cuda(std::vector<uint8_t*> x0_a0,
std::vector<uint8_t*> x0_a1,
std::vector<uint8_t*> x0_a2,
std::vector<uint8_t*> y0_a0,
std::vector<uint8_t*> y0_a1,
std::vector<uint8_t*> y0_a2,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = x0_a0.size();
std::vector<uint8_t*>* x0_y0;
std::vector<uint8_t*>* x0_y1;
std::vector<uint8_t*>* x0_y2;
std::vector<uint8_t*>* x1_y0;
std::vector<uint8_t*>* x1_y1;
std::vector<uint8_t*>* x1_y2;
std::vector<uint8_t*>* x2_y0;
std::vector<uint8_t*>* x2_y1;
std::vector<uint8_t*>* x2_y2;
// Logic:
// var alpha = fq(11);
// var fq3_mul = (x, y) => {
// var x0_y0 = fq_mul(x.a0, y.a0);
// var x0_y1 = fq_mul(x.a0, y.a1);
// var x0_y2 = fq_mul(x.a0, y.a2);
//
// var x1_y0 = fq_mul(x.a1, y.a0);
// var x1_y1 = fq_mul(x.a1, y.a1);
// var x1_y2 = fq_mul(x.a1, y.a2);
//
// var x2_y0 = fq_mul(x.a2, y.a0);
// var x2_y1 = fq_mul(x.a2, y.a1);
// var x2_y2 = fq_mul(x.a2, y.a2);
//
// return {
// a0: fq_add(x0_y0, fq_mul(alpha, fq_add(x1_y2, x2_y1))),
// a1: fq_add(x0_y1, fq_add(x1_y0, fq_mul(alpha, x2_y2))),
// a2: fq_add(x0_y2, fq_add(x1_y1, x2_y0))
// };
// };
x0_y0 = compute_newcuda(x0_a0, y0_a0, input_m_base, num_bytes, inv);
x0_y1 = compute_newcuda(x0_a0, y0_a1, input_m_base, num_bytes, inv);
x0_y2 = compute_newcuda(x0_a0, y0_a2, input_m_base, num_bytes, inv);
x1_y0 = compute_newcuda(x0_a1, y0_a0, input_m_base, num_bytes, inv);
x1_y1 = compute_newcuda(x0_a1, y0_a1, input_m_base, num_bytes, inv);
x1_y2 = compute_newcuda(x0_a1, y0_a2, input_m_base, num_bytes, inv);
x2_y0 = compute_newcuda(x0_a2, y0_a0, input_m_base, num_bytes, inv);
x2_y1 = compute_newcuda(x0_a2, y0_a1, input_m_base, num_bytes, inv);
x2_y2 = compute_newcuda(x0_a2, y0_a2, input_m_base, num_bytes, inv);
std::vector<uint8_t*>* res_a0_tmp1;
std::vector<uint8_t*>* res_a0_tmp2;
struct cubex_result res;
res_a0_tmp1 = compute_addcuda(*x1_y2, *x2_y1, input_m_base, num_bytes);
res_a0_tmp2 = compute_mul_by11_cuda(*res_a0_tmp1, input_m_base, num_bytes);
res.coeff0 = compute_addcuda(*x0_y0, *res_a0_tmp2, input_m_base, num_bytes);
std::vector<uint8_t*>* res_a1_tmp1;
std::vector<uint8_t*>* res_a1_tmp2;
res_a1_tmp1 = compute_mul_by11_cuda(*x2_y2, input_m_base, num_bytes);
res_a1_tmp2 = compute_addcuda(*x1_y0, *res_a1_tmp1, input_m_base, num_bytes);
res.coeff1 = compute_addcuda(*x0_y1, *res_a1_tmp2, input_m_base, num_bytes);
std::vector<uint8_t*>* res_a2_tmp1;
res_a2_tmp1 = compute_addcuda(*x1_y1, *x2_y0, input_m_base, num_bytes);
res.coeff2 = compute_addcuda(*x0_y2, *res_a2_tmp1, input_m_base, num_bytes);
freeMem(x0_y0);
free(x0_y0);
freeMem(x0_y1);
free(x0_y1);
freeMem(x0_y2);
free(x0_y2);
freeMem(x1_y0);
free(x1_y0);
freeMem(x1_y1);
free(x1_y1);
freeMem(x1_y2);
free(x1_y2);
freeMem(x2_y0);
free(x2_y0);
freeMem(x2_y1);
free(x2_y1);
freeMem(x2_y2);
free(x2_y2);
freeMem(res_a0_tmp1);
free(res_a0_tmp1);
freeMem(res_a0_tmp2);
free(res_a0_tmp2);
freeMem(res_a1_tmp1);
free(res_a1_tmp1);
freeMem(res_a1_tmp2);
free(res_a1_tmp2);
freeMem(res_a2_tmp1);
free(res_a2_tmp1);
return res;
}
// TODO: Reduce the unneccesary copy here.
// TODO: Fix modulus copy operation.
void compute_array_sum_g1_gpu(std::vector<uint8_t*> x, std::vector<uint8_t*> y,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
int num_elements = x.size();
my_instance_t *gpuInstances;
my_instance_t* instance_array = (my_instance_t*) malloc(sizeof(my_instance_t) * num_elements);
cgbn_error_report_t *report;
// create a cgbn_error_report for CGBN to report back errors
NEW_CUDA_CHECK(cgbn_error_report_alloc(&report));
for (int i = 0; i < num_elements; i ++) {
std::memcpy((void*)instance_array[i].x._limbs, (const void*) a[i], num_bytes);
std::memcpy((void*)instance_array[i].y._limbs, (const void*) b[i], num_bytes);
std::memcpy((void*)instance_array[i].m._limbs, (const void*) input_m_base, num_bytes);
}
printf("\nCopying instances to the GPU ...\n");
NEW_CUDA_CHECK(cudaSetDevice(0));
NEW_CUDA_CHECK(cudaMalloc((void **)&gpuInstances, sizeof(my_instance_t)*num_elements));
NEW_CUDA_CHECK(cudaMemcpy(gpuInstances, instance_array, sizeof(my_instance_t)*num_elements, cudaMemcpyHostToDevice));
int tpb = TPB;
printf("\n Threads per block =%d", tpb);
int IPB = TPB/TPI;
int tpi = TPI;
printf("\n Threads per instance = %d", tpi);
printf("\n Instances per block = %d", IPB);
uint32_t num_blocks = (num_elements+IPB-1)/IPB;
printf("\n Number of blocks = %d", num_blocks);
my_kernel<<<num_blocks, TPB>>>(gpuInstances, num_elements);
NEW_CUDA_CHECK(cudaDeviceSynchronize());
CGBN_CHECK(report);
// copy the instances back from gpuMemory
printf("\nCopying results back to CPU ...\n");
NEW_CUDA_CHECK(cudaMemcpy(instance_array, gpuInstances, sizeof(my_instance_t)*num_elements, cudaMemcpyDeviceToHost));
int num_limbs = num_bytes / 8;
printf("\n Setting num 64 limbs = %d", num_limbs);
mp_limb_t* num = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs * 2);
mp_limb_t* modulus = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
std::memcpy((void*) modulus, (const void*) instance_array->m._limbs, num_bytes);
//printf("\n Dumping modulus:");
//gmp_printf("%Nx\n", modulus, num_limbs);
std::vector<uint8_t*>* res_vector = new std::vector<uint8_t*>();
for (int i = 0; i < num_elements; i ++) {
// Reduce
std::memcpy((void*)num, (const void*)instance_array[i].mul_lo._limbs, num_bytes);
std::memcpy((void*) (num + num_limbs), (const void*)instance_array[i].mul_hi._limbs, num_bytes);
mp_limb_t* fresult = (mp_limb_t*)malloc(sizeof(mp_limb_t) * num_limbs);
// printf("\n Dumping 64 byte limb wide num [%d]:", i);
// gmp_printf("%Nx\n", num, num_limbs * 2);
reduce_wide(fresult, num, modulus, inv, num_limbs);
// store the result.
res_vector->emplace_back((uint8_t*)fresult);
}
free(num);
free(modulus);
free(instance_array);
cudaFree(gpuInstances);
return res_vector;
}
G1Point* compute_array_sum_g1(std::vector<uint8_t*> x, std::vector<uint8_t*> y,
uint8_t* input_m_base, int num_bytes, uint64_t inv) {
//
int num_elements = x0_a0.size();
int skip = 1;
// parallel add.
while (skip < num_elements) {
compute_cuda_sum_g1_internal(x, y, skip, input_m_base, num_bytes, inv);
skip = skip * 2;
}
// result is in x[0], y[0]
// TODO: return result.
}
|
0e9c84d5f3bcd6d52ae3396ed1c728f7669c5517.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1e8;
// Allocate host mem
float *x = new float[N];
float *y = new float[N];
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Prepare GPU mem
float *dev_a, *dev_b;
hipMalloc((void**)&dev_a, N*sizeof(float));
hipMalloc((void**)&dev_b, N*sizeof(float));
hipMemcpy(dev_a, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dev_b, y, N*sizeof(float), hipMemcpyHostToDevice);
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, dev_a, dev_b);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Get the results back
hipMemcpy(y, dev_b, N*sizeof(float), hipMemcpyDeviceToHost);
// Free GPU mem
hipFree(dev_a);
hipFree(dev_b);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
delete [] x;
delete [] y;
return 0;
}
| 0e9c84d5f3bcd6d52ae3396ed1c728f7669c5517.cu | #include <iostream>
#include <math.h>
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1e8;
// Allocate host mem
float *x = new float[N];
float *y = new float[N];
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Prepare GPU mem
float *dev_a, *dev_b;
cudaMalloc((void**)&dev_a, N*sizeof(float));
cudaMalloc((void**)&dev_b, N*sizeof(float));
cudaMemcpy(dev_a, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, dev_a, dev_b);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Get the results back
cudaMemcpy(y, dev_b, N*sizeof(float), cudaMemcpyDeviceToHost);
// Free GPU mem
cudaFree(dev_a);
cudaFree(dev_b);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
delete [] x;
delete [] y;
return 0;
}
|
13f4e4b3e5f1cff8f376f414b44d8c054f842bb1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "pnpolyGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *vertex = NULL;
hipMalloc(&vertex, XSIZE*YSIZE);
float testx = 1;
float testy = 1;
int *results = NULL;
hipMalloc(&results, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
pnpolyGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, vertex,testx,testy,results);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
pnpolyGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, vertex,testx,testy,results);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
pnpolyGPU), dim3(gridBlock),dim3(threadBlock), 0, 0, vertex,testx,testy,results);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 13f4e4b3e5f1cff8f376f414b44d8c054f842bb1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "pnpolyGPU.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *vertex = NULL;
cudaMalloc(&vertex, XSIZE*YSIZE);
float testx = 1;
float testy = 1;
int *results = NULL;
cudaMalloc(&results, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
pnpolyGPU<<<gridBlock,threadBlock>>>(vertex,testx,testy,results);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
pnpolyGPU<<<gridBlock,threadBlock>>>(vertex,testx,testy,results);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
pnpolyGPU<<<gridBlock,threadBlock>>>(vertex,testx,testy,results);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4dc92d1660c43aa74af98d518669053346c5be5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void Dihedral14LJAtomEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd,
const VECTOR *boxlength, const int *a_14, const int *b_14,
const float *lj_scale_factor, const float *LJ_type_A,
const float *LJ_type_B, float *ene) {
int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_14_i < dihedral_14_numbers) {
int atom_i = a_14[dihedral_14_i];
int atom_j = b_14[dihedral_14_i];
UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i];
UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j];
int int_x;
int int_y;
int int_z;
VECTOR dr;
float dr2;
float dr_2;
float dr_4;
float dr_6;
float dr_12;
float ene_lin = 0.;
int x, y;
int atom_pair_LJ_type;
int_x = r2.uint_x - r1.uint_x;
int_y = r2.uint_y - r1.uint_y;
int_z = r2.uint_z - r1.uint_z;
dr.x = boxlength[0].x * int_x;
dr.y = boxlength[0].y * int_y;
dr.z = boxlength[0].z * int_z;
dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z;
dr_2 = 1. / dr2;
dr_4 = dr_2 * dr_2;
dr_6 = dr_4 * dr_2;
dr_12 = dr_6 * dr_6;
y = (r2.LJ_type - r1.LJ_type);
x = y >> 31;
y = (y ^ x) - x;
x = r2.LJ_type + r1.LJ_type;
r2.LJ_type = (x + y) >> 1;
x = (x - y) >> 1;
atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x;
ene_lin = 0.08333333 * LJ_type_A[atom_pair_LJ_type] * dr_12 -
0.1666666 * LJ_type_B[atom_pair_LJ_type] * dr_6; // LJA,B126
ene_lin *= lj_scale_factor[dihedral_14_i];
atomicAdd(&ene[atom_i], ene_lin);
}
}
void Dihedral14LJAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *lj_scale_factor, const float *LJ_type_A,
const float *LJ_type_B, float *ene, hipStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(atom_numbers) / 128);
UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL;
Cuda_Malloc_Safely(reinterpret_cast<void **>(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
hipLaunchKernelGGL(( Copy_Crd_To_New_Crd_Start), dim3(ceilf(static_cast<float>(atom_numbers) / 32)), dim3(32), 0, stream,
atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge);
VECTOR *boxlength = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(boxlength_f));
hipLaunchKernelGGL(( Reset_List), dim3(ceilf(static_cast<float>(3. * atom_numbers) / 128)), dim3(128), 0, stream, atom_numbers, ene, 0.);
hipLaunchKernelGGL(( Dihedral14LJAtomEnergyKernel), dim3(block_per_grid), dim3(thread_per_block), 0, stream,
dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, LJ_type_A, LJ_type_B, ene);
hipStreamSynchronize(stream);
return;
}
void Dihedral14LJAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *lj_scale_factor, const float *LJ_type_A,
const float *LJ_type_B, float *ene, hipStream_t stream);
| 4dc92d1660c43aa74af98d518669053346c5be5d.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/nb14/dihedral_14_lj_atom_energy_impl.cuh"
#include "backend/kernel_compiler/gpu/cuda_impl/sponge/common_sponge.cuh"
__global__ void Dihedral14LJAtomEnergyKernel(const int dihedral_14_numbers, const UINT_VECTOR_LJ_TYPE *uint_crd,
const VECTOR *boxlength, const int *a_14, const int *b_14,
const float *lj_scale_factor, const float *LJ_type_A,
const float *LJ_type_B, float *ene) {
int dihedral_14_i = blockDim.x * blockIdx.x + threadIdx.x;
if (dihedral_14_i < dihedral_14_numbers) {
int atom_i = a_14[dihedral_14_i];
int atom_j = b_14[dihedral_14_i];
UINT_VECTOR_LJ_TYPE r1 = uint_crd[atom_i];
UINT_VECTOR_LJ_TYPE r2 = uint_crd[atom_j];
int int_x;
int int_y;
int int_z;
VECTOR dr;
float dr2;
float dr_2;
float dr_4;
float dr_6;
float dr_12;
float ene_lin = 0.;
int x, y;
int atom_pair_LJ_type;
int_x = r2.uint_x - r1.uint_x;
int_y = r2.uint_y - r1.uint_y;
int_z = r2.uint_z - r1.uint_z;
dr.x = boxlength[0].x * int_x;
dr.y = boxlength[0].y * int_y;
dr.z = boxlength[0].z * int_z;
dr2 = dr.x * dr.x + dr.y * dr.y + dr.z * dr.z;
dr_2 = 1. / dr2;
dr_4 = dr_2 * dr_2;
dr_6 = dr_4 * dr_2;
dr_12 = dr_6 * dr_6;
y = (r2.LJ_type - r1.LJ_type);
x = y >> 31;
y = (y ^ x) - x;
x = r2.LJ_type + r1.LJ_type;
r2.LJ_type = (x + y) >> 1;
x = (x - y) >> 1;
atom_pair_LJ_type = (r2.LJ_type * (r2.LJ_type + 1) >> 1) + x;
ene_lin = 0.08333333 * LJ_type_A[atom_pair_LJ_type] * dr_12 -
0.1666666 * LJ_type_B[atom_pair_LJ_type] * dr_6; // LJ的A,B系数已经乘以12和6因此要反乘
ene_lin *= lj_scale_factor[dihedral_14_i];
atomicAdd(&ene[atom_i], ene_lin);
}
}
void Dihedral14LJAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *lj_scale_factor, const float *LJ_type_A,
const float *LJ_type_B, float *ene, cudaStream_t stream) {
size_t thread_per_block = 128;
size_t block_per_grid = ceilf(static_cast<float>(atom_numbers) / 128);
UINT_VECTOR_LJ_TYPE *uint_crd_with_LJ = NULL;
Cuda_Malloc_Safely(reinterpret_cast<void **>(&uint_crd_with_LJ), sizeof(UINT_VECTOR_LJ_TYPE) * atom_numbers);
UNSIGNED_INT_VECTOR *uint_crd =
const_cast<UNSIGNED_INT_VECTOR *>(reinterpret_cast<const UNSIGNED_INT_VECTOR *>(uint_crd_f));
Copy_Crd_To_New_Crd_Start<<<ceilf(static_cast<float>(atom_numbers) / 32), 32, 0, stream>>>(
atom_numbers, uint_crd, uint_crd_with_LJ, LJtype, charge);
VECTOR *boxlength = const_cast<VECTOR *>(reinterpret_cast<const VECTOR *>(boxlength_f));
Reset_List<<<ceilf(static_cast<float>(3. * atom_numbers) / 128), 128, 0, stream>>>(atom_numbers, ene, 0.);
Dihedral14LJAtomEnergyKernel<<<block_per_grid, thread_per_block, 0, stream>>>(
dihedral_14_numbers, uint_crd_with_LJ, boxlength, a_14, b_14, lj_scale_factor, LJ_type_A, LJ_type_B, ene);
cudaStreamSynchronize(stream);
return;
}
void Dihedral14LJAtomEnergy(const int dihedral_14_numbers, const int atom_numbers, const int *uint_crd_f,
const int *LJtype, const float *charge, const float *boxlength_f, const int *a_14,
const int *b_14, const float *lj_scale_factor, const float *LJ_type_A,
const float *LJ_type_B, float *ene, cudaStream_t stream);
|
e8ab03d6607faab7dbed0b0369d3234826bb08ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "DCNLayer.hpp"
#include <common/json.hpp>
#include <rocblas.h>
using namespace Plugin;
typedef TRTInfer::halfloat halfloat;
#define cublasCheck(op) \
do { \
auto ret = (op); \
if (ret != HIPBLAS_STATUS_SUCCESS) { \
INFO("%s fail, %d != %d", #op, ret, HIPBLAS_STATUS_SUCCESS); \
abort(); \
} \
} while (0);
template<typename _T>
static __global__ void sigmoidKernel(_T* input, _T* output, int edge);
template<>
__global__ void sigmoidKernel(float* input, float* output, int edge) {
KERNEL_POSITION;
output[position] = 1 / (1 + exp(-input[position]));
}
template<>
__global__ void sigmoidKernel(halfloat* input, halfloat* output, int edge) {
KERNEL_POSITION;
halfloat one = 1.0f;
output[position] = one / (one + hexp(-input[position]));
}
static __device__ float dmcnIm2colBilinearFP32(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
static __device__ halfloat dmcnIm2colBilinearFP16(const halfloat *bottom_data, const int data_width,
const int height, const int width, const halfloat& h, const halfloat& w)
{
int h_low = hfloor(h);
int w_low = hfloor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
halfloat one = 1.0f;
halfloat h_low_hf = h_low;
halfloat w_low_hf = w_low;
halfloat lh = h - h_low_hf;
halfloat lw = w - w_low_hf;
halfloat hh = one - lh, hw = one - lw;
halfloat zero = 0.0f;
halfloat v1 = zero;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
halfloat v2 = zero;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
halfloat v3 = zero;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
halfloat v4 = zero;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
halfloat w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
return (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
}
template<typename _T>
static __global__ void DCNIm2colKernel(
const _T *data_input, const _T *data_offset, const _T *data_mask,
const int height_input, const int width_input, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_output, const int width_output,
_T *data_output, int edge);
template<>
__global__ void DCNIm2colKernel(
const float *data_input, const float *data_offset, const float *data_mask,
const int height_input, const int width_input, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_output, const int width_output,
float *data_output, int edge)
{
KERNEL_POSITION;
const int f_area_input = width_input * height_input;
const int f_area_output = width_output * height_output;
// index index of output matrix
const int w_output = position % width_output;
const int h_output = (position / width_output) % height_output;
const int c_input = (position / width_output / height_output) % num_channels;
const int c_output = c_input * kernel_h * kernel_w;
const int deformable_group_index = c_input / channel_per_deformable_group;
const int h_input = h_output * stride_h - pad_h;
const int w_input = w_output * stride_w - pad_w;
int data_output_offset = c_input * kernel_h * kernel_w * f_area_output + h_output * width_output + w_output;
float *data_output_ptr = data_output + data_output_offset;
const float *data_input_ptr = data_input + c_input * f_area_input;
const float *data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * f_area_output;
const float *data_mask_ptr = data_mask + deformable_group_index * kernel_h * kernel_w * f_area_output;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int row = i + h_input;
const int col = j + w_input;
const int kernel_index = i * kernel_w + j;
const int offset_h_offset = 2 * kernel_index * f_area_output + h_output * width_output + w_output;
const int offset_w_offset = (2 * kernel_index + 1) * f_area_output + h_output * width_output + w_output;
const int mask_offset = kernel_index * f_area_output + h_output * width_output + w_output;
const float offset_h = data_offset_ptr[offset_h_offset];
const float offset_w = data_offset_ptr[offset_w_offset];
const float mask = data_mask_ptr[mask_offset];
float val = 0;
const float h_im = h_input + i * dilation_h + offset_h;
const float w_im = w_input + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height_input && w_im < width_input)
{
val = dmcnIm2colBilinearFP32(data_input_ptr, width_input, height_input, width_input, h_im, w_im);
}
*data_output_ptr = val * mask;
data_output_ptr += f_area_output;
}
}
}
template<>
__global__ void DCNIm2colKernel(
const halfloat *data_input, const halfloat *data_offset, const halfloat *data_mask,
const int height_input, const int width_input, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_output, const int width_output,
halfloat *data_output, int edge)
{
KERNEL_POSITION;
const int f_area_input = width_input * height_input;
const int f_area_output = width_output * height_output;
// index index of output matrix
const int w_output = position % width_output;
const int h_output = (position / width_output) % height_output;
const int c_input = (position / width_output / height_output) % num_channels;
const int c_output = c_input * kernel_h * kernel_w;
const int deformable_group_index = c_input / channel_per_deformable_group;
const int h_input = h_output * stride_h - pad_h;
const int w_input = w_output * stride_w - pad_w;
halfloat width_input_hf = __float2half(width_input);
halfloat height_input_hf = __float2half(height_input);
halfloat h_input_hf = __float2half(h_input);
halfloat w_input_hf = __float2half(w_input);
halfloat dilation_h_hf = __float2half(dilation_h);
halfloat dilation_w_hf = __float2half(dilation_w);
int data_output_offset = c_input * kernel_h * kernel_w * f_area_output + h_output * width_output + w_output;
halfloat *data_output_ptr = data_output + data_output_offset;
const halfloat *data_input_ptr = data_input + c_input * f_area_input;
const halfloat *data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * f_area_output;
const halfloat *data_mask_ptr = data_mask + deformable_group_index * kernel_h * kernel_w * f_area_output;
halfloat n_one = -1.0f;
halfloat zero = 0.0f;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
halfloat i_hf = __float2half(i);
halfloat j_hf = __float2half(j);
const int row = i + h_input;
const int col = j + w_input;
const int kernel_index = i * kernel_w + j;
const int offset_h_offset = 2 * kernel_index * f_area_output + h_output * width_output + w_output;
const int offset_w_offset = (2 * kernel_index + 1) * f_area_output + h_output * width_output + w_output;
const int mask_offset = kernel_index * f_area_output + h_output * width_output + w_output;
const halfloat offset_h = data_offset_ptr[offset_h_offset];
const halfloat offset_w = data_offset_ptr[offset_w_offset];
const halfloat mask = data_mask_ptr[mask_offset];
halfloat val = zero;
halfloat h_im = h_input_hf + i_hf * dilation_h_hf + offset_h;
halfloat w_im = w_input_hf + j_hf * dilation_w_hf + offset_w;
if (h_im > n_one && w_im > n_one && h_im < height_input_hf && w_im < width_input_hf)
{
val = dmcnIm2colBilinearFP16(data_input_ptr, width_input_hf, height_input_hf, width_input_hf, h_im, w_im);
}
*data_output_ptr = val * mask;
data_output_ptr += f_area_output;
}
}
}
template<typename _T>
static __global__ void biasKernel(_T* data_input, const _T* bias, const int f_area, int edge) {
KERNEL_POSITION;
int bias_index = position / f_area;
data_input[position] += bias[bias_index];
}
namespace Plugin {
DCNLayer::DCNLayer() {
cublasCheck(hipblasCreate(&cublasHandle_));
}
DCNLayer::~DCNLayer() {
cublasCheck(hipblasDestroy(cublasHandle_));
}
template<typename _T>
inline void segemm_native(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float alpha, /* host or device pointer */
const _T *A,
int lda,
const _T *B,
int ldb,
float beta, /* host or device pointer */
_T *C,
int ldc);
template<>
inline void segemm_native<float>(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float alpha, /* host or device pointer */
const float *A,
int lda,
const float *B,
int ldb,
float beta, /* host or device pointer */
float *C,
int ldc) {
cublasCheck(hipblasSgemm(handle, transa, transb, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc));
//cublasCheck(hipblasGemmEx(handle, transa, transb, m, n, k, &alpha, A, HIP_R_32F, lda, B, HIP_R_32F, ldb, &beta, C, HIP_R_32F, ldc, HIP_R_32F, CUBLAS_GEMM_DFALT));
}
template<>
inline void segemm_native<TRTInfer::halfloat>(hipblasHandle_t handle,
hipblasOperation_t transa,
hipblasOperation_t transb,
int m,
int n,
int k,
float alpha,
const TRTInfer::halfloat *A,
int lda,
const TRTInfer::halfloat *B,
int ldb,
float beta,
TRTInfer::halfloat *C,
int ldc) {
auto halpha = TRTInfer::halfloat(alpha);
auto hbeta = TRTInfer::halfloat(beta);
//cublasCheck(hipblasHgemm(handle, transa, transb, m, n, k, &halpha, A, lda, B, ldb, &hbeta, C, ldc));
cublasCheck(hipblasGemmEx(handle, transa, transb, m, n, k, &halpha, A, HIP_R_16F, lda, B, HIP_R_16F, ldb, &hbeta, C, HIP_R_16F, ldc, HIP_R_16F, CUBLAS_GEMM_DFALT));
}
template<typename _T>
void enqueue_native(hipblasHandle_t handle, const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, hipStream_t stream) {
auto& data = inputs[0];
auto& om = inputs[1];
auto& out = outputs[0];
int kernel_size = 3;
int deformable_group = 1;
size_t maskSize = (size_t)data.height_ * data.width_ * kernel_size * kernel_size * deformable_group;
size_t im2colSize = (size_t)data.channel_ * kernel_size * kernel_size * out.height_ * out.width_;
const int m = out.channel_;
const int n = out.count(2);
const int k = data.channel_ * kernel_size * kernel_size;
float alpha = 1.0;
float beta = 0.0;
cublasCheck(hipblasSetStream(handle, stream));
for (int ibatch = 0; ibatch < data.num_; ++ibatch) {
_T* maskWorkspacePtr = (_T*)workspace + (maskSize + im2colSize) * ibatch;
_T* im2colWorkspacePtr = (_T*)workspace + (maskSize + im2colSize) * ibatch + maskSize;
_T* inputMask = om.ptr<_T>(ibatch, om.channel_ / 3 * 2);
ExecuteKernel(maskSize, sigmoidKernel, stream)(inputMask, maskWorkspacePtr, maskSize);
_T* datainput = data.ptr<_T>(ibatch);
_T* offset = om.ptr<_T>(ibatch);
ExecuteKernel(im2colSize, DCNIm2colKernel, stream)(
datainput, offset, maskWorkspacePtr, data.height_, data.width_, kernel_size, kernel_size, 1, 1, 1, 1, 1, 1, data.channel_, data.num_, data.channel_, deformable_group,
out.height_, out.width_, im2colWorkspacePtr, im2colSize);
_T* weightKernel = weights[0].ptr<_T>();
segemm_native(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m, k, alpha, im2colWorkspacePtr, n, weightKernel, k, beta, out.ptr<_T>(ibatch), n);
if (weights.size() > 1) {
_T* weightBias = weights[1].ptr<_T>();
size_t edge = out.count(1);
size_t area = out.count(2);
ExecuteKernel(edge, biasKernel, stream)(out.ptr<_T>(ibatch), weightBias, area, edge);
}
}
}
int DCNLayer::enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, hipStream_t stream) {
if (config_->configDataType_ == TRTInfer::DataType::dtFloat) {
enqueue_native<float>(cublasHandle_, inputs, outputs, weights, workspace, stream);
}
else if (config_->configDataType_ == TRTInfer::DataType::dtHalfloat) {
enqueue_native<TRTInfer::halfloat>(cublasHandle_, inputs, outputs, weights, workspace, stream);
}
return 0;
}
nvinfer1::Dims DCNLayer::outputDims(int index, const nvinfer1::Dims* inputDims, int nbInputDims) {
return nvinfer1::Dims3(config_->weights_[0]->num(), inputDims[0].d[1], inputDims[0].d[2]);
}
size_t DCNLayer::getWorkspaceSize(int maxBatchSize) const {
//im2col??
int kernel_size = 3;
int deformable_group = 1;
//inputChannel * k * k * outputHeight * outputWidth
size_t im2colSize = (size_t)config_->input[0].d[0] * kernel_size * kernel_size * config_->output[0].d[1] * config_->output[0].d[2];
size_t maskSize = (size_t)config_->input[0].d[1] * config_->input[0].d[2] * kernel_size * kernel_size * deformable_group;
config_->workspaceSize_ = (im2colSize + maskSize) * maxBatchSize * TRTInfer::dataTypeSize(config_->configDataType_);
return config_->workspaceSize_;
}
std::shared_ptr<LayerConfig> DCNLayer::config(const std::string& layerName) {
auto cfg = TRTPlugin::config(layerName);
if (this->phase_ == CompilePhase) {
int p = layerName.find('@');
Assert(p != -1);
auto param = layerName.substr(p + 1);
Json::Value pjson;
if (!Json::Reader().parse(param, pjson)) {
LOG(LFATAL) << "can not parse param: " << param.c_str();
}
//cfg->supportDataType_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT};
//cfg->supportDataType_ = {nvinfer1::DataType::kHALF};
//halffloat
cfg->supportDataType_ = {nvinfer1::DataType::kFLOAT};
auto& weights = pjson["weights"];
cfg->weights_.resize(weights.size());
for (int i = 0; i < weights.size(); ++i) {
auto& weightShape = weights[i];
std::vector<int> dims;
for (int j = 0; j < weightShape.size(); ++j) {
dims.push_back(weightShape[j].asInt());
}
cfg->weights_[i].reset(new TRTInfer::Tensor(dims.size(), dims.data(), TRTInfer::DataType::dtFloat));
}
}
return cfg;
}
}
RegisterPlugin(DCNLayer); | e8ab03d6607faab7dbed0b0369d3234826bb08ed.cu |
#include "DCNLayer.hpp"
#include <common/json.hpp>
#include <cublas_v2.h>
using namespace Plugin;
typedef TRTInfer::halfloat halfloat;
#define cublasCheck(op) \
do { \
auto ret = (op); \
if (ret != CUBLAS_STATUS_SUCCESS) { \
INFO("%s fail, %d != %d", #op, ret, CUBLAS_STATUS_SUCCESS); \
abort(); \
} \
} while (0);
template<typename _T>
static __global__ void sigmoidKernel(_T* input, _T* output, int edge);
template<>
__global__ void sigmoidKernel(float* input, float* output, int edge) {
KERNEL_POSITION;
output[position] = 1 / (1 + exp(-input[position]));
}
template<>
__global__ void sigmoidKernel(halfloat* input, halfloat* output, int edge) {
KERNEL_POSITION;
halfloat one = 1.0f;
output[position] = one / (one + hexp(-input[position]));
}
static __device__ float dmcnIm2colBilinearFP32(const float *bottom_data, const int data_width,
const int height, const int width, float h, float w)
{
int h_low = floor(h);
int w_low = floor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
float lh = h - h_low;
float lw = w - w_low;
float hh = 1 - lh, hw = 1 - lw;
float v1 = 0;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
float v2 = 0;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
float v3 = 0;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
float v4 = 0;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
float w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
static __device__ halfloat dmcnIm2colBilinearFP16(const halfloat *bottom_data, const int data_width,
const int height, const int width, const halfloat& h, const halfloat& w)
{
int h_low = hfloor(h);
int w_low = hfloor(w);
int h_high = h_low + 1;
int w_high = w_low + 1;
halfloat one = 1.0f;
halfloat h_low_hf = h_low;
halfloat w_low_hf = w_low;
halfloat lh = h - h_low_hf;
halfloat lw = w - w_low_hf;
halfloat hh = one - lh, hw = one - lw;
halfloat zero = 0.0f;
halfloat v1 = zero;
if (h_low >= 0 && w_low >= 0)
v1 = bottom_data[h_low * data_width + w_low];
halfloat v2 = zero;
if (h_low >= 0 && w_high <= width - 1)
v2 = bottom_data[h_low * data_width + w_high];
halfloat v3 = zero;
if (h_high <= height - 1 && w_low >= 0)
v3 = bottom_data[h_high * data_width + w_low];
halfloat v4 = zero;
if (h_high <= height - 1 && w_high <= width - 1)
v4 = bottom_data[h_high * data_width + w_high];
halfloat w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;
return (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
}
template<typename _T>
static __global__ void DCNIm2colKernel(
const _T *data_input, const _T *data_offset, const _T *data_mask,
const int height_input, const int width_input, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_output, const int width_output,
_T *data_output, int edge);
template<>
__global__ void DCNIm2colKernel(
const float *data_input, const float *data_offset, const float *data_mask,
const int height_input, const int width_input, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_output, const int width_output,
float *data_output, int edge)
{
KERNEL_POSITION;
const int f_area_input = width_input * height_input;
const int f_area_output = width_output * height_output;
// index index of output matrix
const int w_output = position % width_output;
const int h_output = (position / width_output) % height_output;
const int c_input = (position / width_output / height_output) % num_channels;
const int c_output = c_input * kernel_h * kernel_w;
const int deformable_group_index = c_input / channel_per_deformable_group;
const int h_input = h_output * stride_h - pad_h;
const int w_input = w_output * stride_w - pad_w;
int data_output_offset = c_input * kernel_h * kernel_w * f_area_output + h_output * width_output + w_output;
float *data_output_ptr = data_output + data_output_offset;
const float *data_input_ptr = data_input + c_input * f_area_input;
const float *data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * f_area_output;
const float *data_mask_ptr = data_mask + deformable_group_index * kernel_h * kernel_w * f_area_output;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
const int row = i + h_input;
const int col = j + w_input;
const int kernel_index = i * kernel_w + j;
const int offset_h_offset = 2 * kernel_index * f_area_output + h_output * width_output + w_output;
const int offset_w_offset = (2 * kernel_index + 1) * f_area_output + h_output * width_output + w_output;
const int mask_offset = kernel_index * f_area_output + h_output * width_output + w_output;
const float offset_h = data_offset_ptr[offset_h_offset];
const float offset_w = data_offset_ptr[offset_w_offset];
const float mask = data_mask_ptr[mask_offset];
float val = 0;
const float h_im = h_input + i * dilation_h + offset_h;
const float w_im = w_input + j * dilation_w + offset_w;
if (h_im > -1 && w_im > -1 && h_im < height_input && w_im < width_input)
{
val = dmcnIm2colBilinearFP32(data_input_ptr, width_input, height_input, width_input, h_im, w_im);
}
*data_output_ptr = val * mask;
data_output_ptr += f_area_output;
}
}
}
template<>
__global__ void DCNIm2colKernel(
const halfloat *data_input, const halfloat *data_offset, const halfloat *data_mask,
const int height_input, const int width_input, const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int channel_per_deformable_group,
const int batch_size, const int num_channels, const int deformable_group,
const int height_output, const int width_output,
halfloat *data_output, int edge)
{
KERNEL_POSITION;
const int f_area_input = width_input * height_input;
const int f_area_output = width_output * height_output;
// index index of output matrix
const int w_output = position % width_output;
const int h_output = (position / width_output) % height_output;
const int c_input = (position / width_output / height_output) % num_channels;
const int c_output = c_input * kernel_h * kernel_w;
const int deformable_group_index = c_input / channel_per_deformable_group;
const int h_input = h_output * stride_h - pad_h;
const int w_input = w_output * stride_w - pad_w;
halfloat width_input_hf = __float2half(width_input);
halfloat height_input_hf = __float2half(height_input);
halfloat h_input_hf = __float2half(h_input);
halfloat w_input_hf = __float2half(w_input);
halfloat dilation_h_hf = __float2half(dilation_h);
halfloat dilation_w_hf = __float2half(dilation_w);
int data_output_offset = c_input * kernel_h * kernel_w * f_area_output + h_output * width_output + w_output;
halfloat *data_output_ptr = data_output + data_output_offset;
const halfloat *data_input_ptr = data_input + c_input * f_area_input;
const halfloat *data_offset_ptr = data_offset + deformable_group_index * 2 * kernel_h * kernel_w * f_area_output;
const halfloat *data_mask_ptr = data_mask + deformable_group_index * kernel_h * kernel_w * f_area_output;
halfloat n_one = -1.0f;
halfloat zero = 0.0f;
for (int i = 0; i < kernel_h; ++i)
{
for (int j = 0; j < kernel_w; ++j)
{
halfloat i_hf = __float2half(i);
halfloat j_hf = __float2half(j);
const int row = i + h_input;
const int col = j + w_input;
const int kernel_index = i * kernel_w + j;
const int offset_h_offset = 2 * kernel_index * f_area_output + h_output * width_output + w_output;
const int offset_w_offset = (2 * kernel_index + 1) * f_area_output + h_output * width_output + w_output;
const int mask_offset = kernel_index * f_area_output + h_output * width_output + w_output;
const halfloat offset_h = data_offset_ptr[offset_h_offset];
const halfloat offset_w = data_offset_ptr[offset_w_offset];
const halfloat mask = data_mask_ptr[mask_offset];
halfloat val = zero;
halfloat h_im = h_input_hf + i_hf * dilation_h_hf + offset_h;
halfloat w_im = w_input_hf + j_hf * dilation_w_hf + offset_w;
if (h_im > n_one && w_im > n_one && h_im < height_input_hf && w_im < width_input_hf)
{
val = dmcnIm2colBilinearFP16(data_input_ptr, width_input_hf, height_input_hf, width_input_hf, h_im, w_im);
}
*data_output_ptr = val * mask;
data_output_ptr += f_area_output;
}
}
}
template<typename _T>
static __global__ void biasKernel(_T* data_input, const _T* bias, const int f_area, int edge) {
KERNEL_POSITION;
int bias_index = position / f_area;
data_input[position] += bias[bias_index];
}
namespace Plugin {
DCNLayer::DCNLayer() {
cublasCheck(cublasCreate(&cublasHandle_));
}
DCNLayer::~DCNLayer() {
cublasCheck(cublasDestroy(cublasHandle_));
}
template<typename _T>
inline void segemm_native(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float alpha, /* host or device pointer */
const _T *A,
int lda,
const _T *B,
int ldb,
float beta, /* host or device pointer */
_T *C,
int ldc);
template<>
inline void segemm_native<float>(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float alpha, /* host or device pointer */
const float *A,
int lda,
const float *B,
int ldb,
float beta, /* host or device pointer */
float *C,
int ldc) {
cublasCheck(cublasSgemm(handle, transa, transb, m, n, k, &alpha, A, lda, B, ldb, &beta, C, ldc));
//cublasCheck(cublasGemmEx(handle, transa, transb, m, n, k, &alpha, A, CUDA_R_32F, lda, B, CUDA_R_32F, ldb, &beta, C, CUDA_R_32F, ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT));
}
template<>
inline void segemm_native<TRTInfer::halfloat>(cublasHandle_t handle,
cublasOperation_t transa,
cublasOperation_t transb,
int m,
int n,
int k,
float alpha,
const TRTInfer::halfloat *A,
int lda,
const TRTInfer::halfloat *B,
int ldb,
float beta,
TRTInfer::halfloat *C,
int ldc) {
auto halpha = TRTInfer::halfloat(alpha);
auto hbeta = TRTInfer::halfloat(beta);
//cublasCheck(cublasHgemm(handle, transa, transb, m, n, k, &halpha, A, lda, B, ldb, &hbeta, C, ldc));
cublasCheck(cublasGemmEx(handle, transa, transb, m, n, k, &halpha, A, CUDA_R_16F, lda, B, CUDA_R_16F, ldb, &hbeta, C, CUDA_R_16F, ldc, CUDA_R_16F, CUBLAS_GEMM_DFALT));
}
template<typename _T>
void enqueue_native(cublasHandle_t handle, const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) {
auto& data = inputs[0];
auto& om = inputs[1];
auto& out = outputs[0];
int kernel_size = 3;
int deformable_group = 1;
size_t maskSize = (size_t)data.height_ * data.width_ * kernel_size * kernel_size * deformable_group;
size_t im2colSize = (size_t)data.channel_ * kernel_size * kernel_size * out.height_ * out.width_;
const int m = out.channel_;
const int n = out.count(2);
const int k = data.channel_ * kernel_size * kernel_size;
float alpha = 1.0;
float beta = 0.0;
cublasCheck(cublasSetStream(handle, stream));
for (int ibatch = 0; ibatch < data.num_; ++ibatch) {
_T* maskWorkspacePtr = (_T*)workspace + (maskSize + im2colSize) * ibatch;
_T* im2colWorkspacePtr = (_T*)workspace + (maskSize + im2colSize) * ibatch + maskSize;
_T* inputMask = om.ptr<_T>(ibatch, om.channel_ / 3 * 2);
ExecuteKernel(maskSize, sigmoidKernel, stream)(inputMask, maskWorkspacePtr, maskSize);
_T* datainput = data.ptr<_T>(ibatch);
_T* offset = om.ptr<_T>(ibatch);
ExecuteKernel(im2colSize, DCNIm2colKernel, stream)(
datainput, offset, maskWorkspacePtr, data.height_, data.width_, kernel_size, kernel_size, 1, 1, 1, 1, 1, 1, data.channel_, data.num_, data.channel_, deformable_group,
out.height_, out.width_, im2colWorkspacePtr, im2colSize);
_T* weightKernel = weights[0].ptr<_T>();
segemm_native(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m, k, alpha, im2colWorkspacePtr, n, weightKernel, k, beta, out.ptr<_T>(ibatch), n);
if (weights.size() > 1) {
_T* weightBias = weights[1].ptr<_T>();
size_t edge = out.count(1);
size_t area = out.count(2);
ExecuteKernel(edge, biasKernel, stream)(out.ptr<_T>(ibatch), weightBias, area, edge);
}
}
}
int DCNLayer::enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) {
if (config_->configDataType_ == TRTInfer::DataType::dtFloat) {
enqueue_native<float>(cublasHandle_, inputs, outputs, weights, workspace, stream);
}
else if (config_->configDataType_ == TRTInfer::DataType::dtHalfloat) {
enqueue_native<TRTInfer::halfloat>(cublasHandle_, inputs, outputs, weights, workspace, stream);
}
return 0;
}
nvinfer1::Dims DCNLayer::outputDims(int index, const nvinfer1::Dims* inputDims, int nbInputDims) {
return nvinfer1::Dims3(config_->weights_[0]->num(), inputDims[0].d[1], inputDims[0].d[2]);
}
size_t DCNLayer::getWorkspaceSize(int maxBatchSize) const {
//im2col?Ŀռ?С
int kernel_size = 3;
int deformable_group = 1;
//inputChannel * k * k * outputHeight * outputWidth
size_t im2colSize = (size_t)config_->input[0].d[0] * kernel_size * kernel_size * config_->output[0].d[1] * config_->output[0].d[2];
size_t maskSize = (size_t)config_->input[0].d[1] * config_->input[0].d[2] * kernel_size * kernel_size * deformable_group;
config_->workspaceSize_ = (im2colSize + maskSize) * maxBatchSize * TRTInfer::dataTypeSize(config_->configDataType_);
return config_->workspaceSize_;
}
std::shared_ptr<LayerConfig> DCNLayer::config(const std::string& layerName) {
auto cfg = TRTPlugin::config(layerName);
if (this->phase_ == CompilePhase) {
int p = layerName.find('@');
Assert(p != -1);
auto param = layerName.substr(p + 1);
Json::Value pjson;
if (!Json::Reader().parse(param, pjson)) {
LOG(LFATAL) << "can not parse param: " << param.c_str();
}
//cfg->supportDataType_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT};
//cfg->supportDataType_ = {nvinfer1::DataType::kHALF};
//避免选择half和float交叉,因为选择的时候是通过计算选择的
cfg->supportDataType_ = {nvinfer1::DataType::kFLOAT};
auto& weights = pjson["weights"];
cfg->weights_.resize(weights.size());
for (int i = 0; i < weights.size(); ++i) {
auto& weightShape = weights[i];
std::vector<int> dims;
for (int j = 0; j < weightShape.size(); ++j) {
dims.push_back(weightShape[j].asInt());
}
cfg->weights_[i].reset(new TRTInfer::Tensor(dims.size(), dims.data(), TRTInfer::DataType::dtFloat));
}
}
return cfg;
}
}
RegisterPlugin(DCNLayer); |
1ee945c564294b5bf6b9ac92a475c0e1da9282d5.hip | // !!! This is a file automatically generated by hipify!!!
//#pragma once
//#pragma comment(lib, "cudart.lib")
//#include <iostream>
//
//
//#include <minmax.h>
//#include "../../nclgl/OGLRenderer.h"
//#include <cuda_gl_interop.h>
//#include <hip/hip_runtime.h>
//
//#include <SOIL.h>
//
//#include "../../nclgl/Mesh.h"
//#include "../../nclgl/Shader.h"
//#include "../../nclgl/GameTimer.h"
//
//using namespace std;
//
//GLuint waterTexture;
//
//class VBOWaterResource : public Mesh
//{
//public:
// VBOWaterResource();
// ~VBOWaterResource();
// void initVBO(MeshBuffer type, float* data, int comps, int num, unsigned int mode);
// void initIBO(unsigned int* data, int num, unsigned int mode);
// void draw() const;
//
// void update();
//private:
// void generateGrid();
//
// unsigned int restart_index;
// int width, height;
// struct cudaGraphicsResource* cudaVBO;
//};
#include "Water.h"
VBOWaterResource::VBOWaterResource(): width(100), height(100) {
width = max(2, width);
height = max(2, height);
time = 0;
generateGrid();
//hipGLSetGLDevice(0);
hipError_t t = hipGraphicsGLRegisterBuffer(&cudaVBO, bufferObject[VERTEX_BUFFER]/*vbo[VBO_VERTEX]*/, hipGraphicsMapFlagsNone);
if (/*hipGraphicsGLRegisterBuffer(&cudaVBO, bufferObject[VERTEX_BUFFER]/*vbo[VBO_VERTEX], hipGraphicsMapFlagsNone) */t != hipSuccess)
{
printf("Failed with error: %s\n\n\n\n", hipGetErrorString(t));
}
}
VBOWaterResource::~VBOWaterResource() {
if (hipGraphicsUnregisterResource(cudaVBO) != hipSuccess)
{
printf("Failed\n");
}
}
void VBOWaterResource::generateGrid() {
int loop_size = 2*height + 1;
numVertices = width*height;
numIndices = (width - 1)*loop_size;
vertices = new Vector3[numVertices];
normals = new Vector3[numVertices];
textureCoords = new Vector2[numVertices];
indices = new unsigned int [numIndices];
type = GL_TRIANGLE_STRIP;
for (int x = 0; x < width; x++) {
int loops = x*loop_size;
for (int y = 0; y < height; y++) {
int offset = y*width + x;
if (x != width - 1)
indices[loops + 2*y + 1] = offset;
if (x != 0)
indices[loops - loop_size + 2*y] = offset;
//vertices[3*offset + 0].x = 2*(x*1.0f/(width-1)) - 1;
//vertices[3*offset + 1] = 0;
//vertices[3*offset + 2] = 2*(y*1.0f/(height-1)) - 1;
vertices[offset] = Vector3(2*(x*1.0f/(width-1)) - 1, 0, 2*(y*1.0f/(height-1)) - 1);
//normals[3*offset + 0] = 0;
//normals[3*offset + 1] = 1;
//normals[3*offset + 2] = 0;
normals[offset] = Vector3(0,1,0);
// textureCoords[2*offset + 0] = x*1.0f/(width-1);
//textureCoords[2*offset + 1] = y*1.0f/(height-1);
textureCoords[offset] = Vector2(x*1.0f/(width-1),y*1.0f/(height-1) );
}
if (x != width - 1)
indices[loops + loop_size - 1] = width*height;
}
restart_index = width*height;
// glBindVertexArray(vao[0]);
///*initVBO(VBO_VERTEX*/bufferObject((), (float*)verts, 3, num_verts, GL_DYNAMIC_DRAW);
///*initVBO*/bufferObject(VBO_NORMAL, (float*)norms, 3, num_verts, GL_DYNAMIC_DRAW);
//initVBO(VBO_TEXCOORD, (float*)texcoords, 2, num_verts, GL_DYNAMIC_DRAW);
//initIBO(indices, num_indices, GL_DYNAMIC_DRAW);
BufferData();
glBindVertexArray(0);
// delete[] vertices;
// delete[] normals;
// delete[] textureCoords;
// delete[] indices;
}
__global__ void vboTestResource_update(float* ptr, int width, int height, float time) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = y*width + x;
if (x >= width || y >= height) return;
float period = 10; // smaller number = fewer waves
float rate = 1.0; //smaller number = slower waves
float cx = x*0.5f/width - 0.5f;//affects origin of waves ... probably
float cy = y*0.5f/height - 0.5f;//affects origin of waves ... probably
float wave = sin(sqrt(cx*cx + cy*cy)*period - rate*time);
int sign = wave>0?1:-1;
wave = sign*sqrt(sign*wave);
ptr[3*offset + 1] = wave/20; //smaller number, more wavey waves
period *= 3;
rate *= -9;
ptr[3*offset + 1] += (sin(x*period/(width - 1) + rate*time) + sin(y*period/(height - 1) + rate*time))/60;//bigger number, more wavey waves
}
void VBOWaterResource::update(float msec) {
time += msec * 0.0009;//GameTimer().GetTimedMS();
float* devBuff;
size_t size;
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((width - 1)/threadsPerBlock.x + 1, (height - 1)/threadsPerBlock.y + 1);
if (hipGraphicsMapResources(1, &cudaVBO, 0) != hipSuccess)
{
printf("Failed\n");
}
hipGraphicsResourceGetMappedPointer((void**)&devBuff, &size, cudaVBO);
hipLaunchKernelGGL(( vboTestResource_update), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, devBuff, width, height, time);
if (hipGraphicsUnmapResources(1, &cudaVBO, 0) != hipSuccess)
{
printf("Failed\n");
}
}
void VBOWaterResource::initVBO(MeshBuffer type, float* data, int comps, int num, unsigned int mode) {
glBindVertexArray(arrayObject);
glGenBuffers(1, &bufferObject[type]);
glBindBuffer(GL_ARRAY_BUFFER, bufferObject[type]);
glBufferData(GL_ARRAY_BUFFER, num*comps*sizeof(GLfloat), (GLvoid*)data, mode);
glVertexAttribPointer((GLuint)type, comps, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray((GLuint)type);
glBindVertexArray(0);
}
void VBOWaterResource::initIBO(unsigned int* data, int num, unsigned int mode) {
glBindVertexArray(arrayObject);
numIndices = num;
glGenBuffers(1, &bufferObject[INDEX_BUFFER]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferObject[INDEX_BUFFER]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, num * sizeof(GLuint), (GLvoid*)data, mode);
glBindVertexArray(0);
}
void VBOWaterResource::draw() const {
//glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glPrimitiveRestartIndex(restart_index);
glEnable(GL_PRIMITIVE_RESTART);
{
glBindVertexArray(arrayObject);
glDrawElements(type, numIndices, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
glDisable(GL_PRIMITIVE_RESTART);
//glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
} | 1ee945c564294b5bf6b9ac92a475c0e1da9282d5.cu | //#pragma once
//#pragma comment(lib, "cudart.lib")
//#include <iostream>
//
//
//#include <minmax.h>
//#include "../../nclgl/OGLRenderer.h"
//#include <cuda_gl_interop.h>
//#include <cuda_runtime.h>
//
//#include <SOIL.h>
//
//#include "../../nclgl/Mesh.h"
//#include "../../nclgl/Shader.h"
//#include "../../nclgl/GameTimer.h"
//
//using namespace std;
//
//GLuint waterTexture;
//
//class VBOWaterResource : public Mesh
//{
//public:
// VBOWaterResource();
// ~VBOWaterResource();
// void initVBO(MeshBuffer type, float* data, int comps, int num, unsigned int mode);
// void initIBO(unsigned int* data, int num, unsigned int mode);
// void draw() const;
//
// void update();
//private:
// void generateGrid();
//
// unsigned int restart_index;
// int width, height;
// struct cudaGraphicsResource* cudaVBO;
//};
#include "Water.h"
VBOWaterResource::VBOWaterResource(): width(100), height(100) {
width = max(2, width);
height = max(2, height);
time = 0;
generateGrid();
//cudaGLSetGLDevice(0);
cudaError t = cudaGraphicsGLRegisterBuffer(&cudaVBO, bufferObject[VERTEX_BUFFER]/*vbo[VBO_VERTEX]*/, cudaGraphicsMapFlagsNone);
if (/*cudaGraphicsGLRegisterBuffer(&cudaVBO, bufferObject[VERTEX_BUFFER]/*vbo[VBO_VERTEX], cudaGraphicsMapFlagsNone) */t != cudaSuccess)
{
printf("Failed with error: %s\n\n\n\n", cudaGetErrorString(t));
}
}
VBOWaterResource::~VBOWaterResource() {
if (cudaGraphicsUnregisterResource(cudaVBO) != cudaSuccess)
{
printf("Failed\n");
}
}
void VBOWaterResource::generateGrid() {
int loop_size = 2*height + 1;
numVertices = width*height;
numIndices = (width - 1)*loop_size;
vertices = new Vector3[numVertices];
normals = new Vector3[numVertices];
textureCoords = new Vector2[numVertices];
indices = new unsigned int [numIndices];
type = GL_TRIANGLE_STRIP;
for (int x = 0; x < width; x++) {
int loops = x*loop_size;
for (int y = 0; y < height; y++) {
int offset = y*width + x;
if (x != width - 1)
indices[loops + 2*y + 1] = offset;
if (x != 0)
indices[loops - loop_size + 2*y] = offset;
//vertices[3*offset + 0].x = 2*(x*1.0f/(width-1)) - 1;
//vertices[3*offset + 1] = 0;
//vertices[3*offset + 2] = 2*(y*1.0f/(height-1)) - 1;
vertices[offset] = Vector3(2*(x*1.0f/(width-1)) - 1, 0, 2*(y*1.0f/(height-1)) - 1);
//normals[3*offset + 0] = 0;
//normals[3*offset + 1] = 1;
//normals[3*offset + 2] = 0;
normals[offset] = Vector3(0,1,0);
// textureCoords[2*offset + 0] = x*1.0f/(width-1);
//textureCoords[2*offset + 1] = y*1.0f/(height-1);
textureCoords[offset] = Vector2(x*1.0f/(width-1),y*1.0f/(height-1) );
}
if (x != width - 1)
indices[loops + loop_size - 1] = width*height;
}
restart_index = width*height;
// glBindVertexArray(vao[0]);
///*initVBO(VBO_VERTEX*/bufferObject((), (float*)verts, 3, num_verts, GL_DYNAMIC_DRAW);
///*initVBO*/bufferObject(VBO_NORMAL, (float*)norms, 3, num_verts, GL_DYNAMIC_DRAW);
//initVBO(VBO_TEXCOORD, (float*)texcoords, 2, num_verts, GL_DYNAMIC_DRAW);
//initIBO(indices, num_indices, GL_DYNAMIC_DRAW);
BufferData();
glBindVertexArray(0);
// delete[] vertices;
// delete[] normals;
// delete[] textureCoords;
// delete[] indices;
}
__global__ void vboTestResource_update(float* ptr, int width, int height, float time) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int offset = y*width + x;
if (x >= width || y >= height) return;
float period = 10; // smaller number = fewer waves
float rate = 1.0; //smaller number = slower waves
float cx = x*0.5f/width - 0.5f;//affects origin of waves ... probably
float cy = y*0.5f/height - 0.5f;//affects origin of waves ... probably
float wave = sin(sqrt(cx*cx + cy*cy)*period - rate*time);
int sign = wave>0?1:-1;
wave = sign*sqrt(sign*wave);
ptr[3*offset + 1] = wave/20; //smaller number, more wavey waves
period *= 3;
rate *= -9;
ptr[3*offset + 1] += (sin(x*period/(width - 1) + rate*time) + sin(y*period/(height - 1) + rate*time))/60;//bigger number, more wavey waves
}
void VBOWaterResource::update(float msec) {
time += msec * 0.0009;//GameTimer().GetTimedMS();
float* devBuff;
size_t size;
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((width - 1)/threadsPerBlock.x + 1, (height - 1)/threadsPerBlock.y + 1);
if (cudaGraphicsMapResources(1, &cudaVBO, 0) != cudaSuccess)
{
printf("Failed\n");
}
cudaGraphicsResourceGetMappedPointer((void**)&devBuff, &size, cudaVBO);
vboTestResource_update<<<numBlocks, threadsPerBlock>>>(devBuff, width, height, time);
if (cudaGraphicsUnmapResources(1, &cudaVBO, 0) != cudaSuccess)
{
printf("Failed\n");
}
}
void VBOWaterResource::initVBO(MeshBuffer type, float* data, int comps, int num, unsigned int mode) {
glBindVertexArray(arrayObject);
glGenBuffers(1, &bufferObject[type]);
glBindBuffer(GL_ARRAY_BUFFER, bufferObject[type]);
glBufferData(GL_ARRAY_BUFFER, num*comps*sizeof(GLfloat), (GLvoid*)data, mode);
glVertexAttribPointer((GLuint)type, comps, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray((GLuint)type);
glBindVertexArray(0);
}
void VBOWaterResource::initIBO(unsigned int* data, int num, unsigned int mode) {
glBindVertexArray(arrayObject);
numIndices = num;
glGenBuffers(1, &bufferObject[INDEX_BUFFER]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bufferObject[INDEX_BUFFER]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, num * sizeof(GLuint), (GLvoid*)data, mode);
glBindVertexArray(0);
}
void VBOWaterResource::draw() const {
//glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glPrimitiveRestartIndex(restart_index);
glEnable(GL_PRIMITIVE_RESTART);
{
glBindVertexArray(arrayObject);
glDrawElements(type, numIndices, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
glDisable(GL_PRIMITIVE_RESTART);
//glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
} |
4bec3e78f99845fe1f6576df7a0ba94e0e50f707.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/affine_grid_grad_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/affine_grid_utils.h"
namespace phi {
template <typename T>
__global__ void LinspaceKernel(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T>
struct Linspace<phi::GPUContext, T> {
void operator()(T start,
T end,
int count,
bool align_corners,
DenseTensor* numbers,
const phi::GPUContext& dev_ctx) {
numbers->Resize(phi::make_ddim({count}));
T* number_data = dev_ctx.template Alloc<T>(numbers);
T slice = (end - start) / (T)(count - 1);
if (!align_corners) {
slice = (end - start) / (T)count;
start *= (T)(count - 1) / (T)count;
}
auto stream = dev_ctx.stream();
int block = 512;
int grid = (count + block - 1) / block;
hipLaunchKernelGGL(( LinspaceKernel<T>)
, dim3(grid), dim3(block), 0, stream, start, slice, count, number_data);
}
};
template <typename T>
__global__ void affine_grid_grad_kernel_4d(const int count,
int n,
int out_h,
int out_w,
T h_start,
T w_start,
T h_step,
T w_step,
const T* out_grad, // N, H, W, 2
T* theta_grad) { // N, 2, 3
CUDA_KERNEL_LOOP(index, count) {
int w = index % out_w;
int h = (index / out_w) % out_h;
int n = index / (out_w * out_h);
T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start);
T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start);
int theta_offset = n * 6; // 2 * 3;
T out_grad_x = out_grad[index * 2];
phi::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x);
T out_grad_y = out_grad[index * 2 + 1];
phi::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_y * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y);
}
}
template <typename T>
__global__ void affine_grid_grad_kernel_5d(const int count,
int n,
int out_d,
int out_h,
int out_w,
T d_start,
T h_start,
T w_start,
T d_step,
T h_step,
T w_step,
const T* out_grad, // N, D, H, W, 3
T* theta_grad) { // N, 3, 4
CUDA_KERNEL_LOOP(index, count) {
int w = index % out_w;
int h = (index / out_w) % out_h;
int d = (index / (out_w * out_h)) % out_d;
int n = index / (out_w * out_h * out_d);
T d_coor = d_step * static_cast<T>(d) + static_cast<T>(d_start);
T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start);
T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start);
int theta_offset = n * 12; // 3 * 4;
T out_grad_x = out_grad[index * 3];
phi::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x * d_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_x);
T out_grad_y = out_grad[index * 3 + 1];
phi::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 6, out_grad_y * d_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 7, out_grad_y);
T out_grad_z = out_grad[index * 3 + 2];
phi::CudaAtomicAdd(theta_grad + theta_offset + 8, out_grad_z * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 9, out_grad_z * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 10, out_grad_z * d_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 11, out_grad_z);
}
}
template <typename T, typename Context>
void AffineGridGrad4DCUDAKernel(const Context& dev_ctx,
const DenseTensor& output_grad,
const IntArray& outputShape,
bool align_corners,
DenseTensor* input_grad) {
auto& theta_grad = input_grad;
int n = output_grad.dims()[0];
auto& size_attr = outputShape.GetData();
int h = 0;
int w = 0;
h = size_attr[2];
w = size_attr[3];
theta_grad->Resize(phi::make_ddim({n, 2, 3}));
T* theta_grad_data = dev_ctx.template Alloc<T>(theta_grad);
phi::funcs::SetConstant<phi::GPUContext, T>()(
dev_ctx, theta_grad, static_cast<T>(0));
T h_step;
T w_step;
T h_start = -1;
T w_start = -1;
if (align_corners) {
h_step = static_cast<T>(2) / static_cast<T>(h - 1);
w_step = static_cast<T>(2) / static_cast<T>(w - 1);
} else {
h_step = static_cast<T>(2) / static_cast<T>(h);
w_step = static_cast<T>(2) / static_cast<T>(w);
h_start *= static_cast<T>(h - 1) / static_cast<T>(h);
w_start *= static_cast<T>(w - 1) / static_cast<T>(w);
}
const int count = n * h * w;
VLOG(3) << "count: " << count << "; h_step: " << h_step
<< "; w_step: " << w_step << "; h_start: " << h_start
<< "; w_start: " << w_start;
int block = 512;
int grid = (count + block - 1) / block;
auto cu_stream = dev_ctx.stream();
hipLaunchKernelGGL(( affine_grid_grad_kernel_4d), dim3(grid), dim3(block), 0, cu_stream,
count,
n,
h,
w,
h_start,
w_start,
h_step,
w_step,
output_grad.data<T>(),
theta_grad_data);
}
template <typename T, typename Context>
void AffineGridGrad5DCUDAKernel(const Context& dev_ctx,
const DenseTensor& output_grad,
const IntArray& outputShape,
bool align_corners,
DenseTensor* input_grad) {
// VLOG(0) << "in affine grid backward 5D";
auto& theta_grad = input_grad;
int n = output_grad.dims()[0];
auto& size_attr = outputShape.GetData();
int d = 0;
int h = 0;
int w = 0;
d = size_attr[2];
h = size_attr[3];
w = size_attr[4];
theta_grad->Resize(phi::make_ddim({n, 3, 4}));
T* theta_grad_data = dev_ctx.template Alloc<T>(theta_grad);
phi::funcs::SetConstant<phi::GPUContext, T>()(
dev_ctx, theta_grad, static_cast<T>(0));
T d_step;
T h_step;
T w_step;
T d_start = -1;
T h_start = -1;
T w_start = -1;
if (align_corners) {
d_step = static_cast<T>(2) / static_cast<T>(d - 1);
h_step = static_cast<T>(2) / static_cast<T>(h - 1);
w_step = static_cast<T>(2) / static_cast<T>(w - 1);
} else {
d_step = static_cast<T>(2) / static_cast<T>(d);
h_step = static_cast<T>(2) / static_cast<T>(h);
w_step = static_cast<T>(2) / static_cast<T>(w);
d_start *= static_cast<T>(d - 1) / static_cast<T>(d);
h_start *= static_cast<T>(h - 1) / static_cast<T>(h);
w_start *= static_cast<T>(w - 1) / static_cast<T>(w);
}
const int count = n * d * h * w;
int block = 512;
int grid = (count + block - 1) / block;
auto cu_stream = dev_ctx.stream();
hipLaunchKernelGGL(( affine_grid_grad_kernel_5d), dim3(grid), dim3(block), 0, cu_stream,
count,
n,
d,
h,
w,
d_start,
h_start,
w_start,
d_step,
h_step,
w_step,
output_grad.data<T>(),
theta_grad_data);
}
template <typename T, typename Context>
void AffineGridGradCUDAKernel(const Context& dev_ctx,
const DenseTensor& input,
const IntArray& outputShape,
bool align_corners,
DenseTensor* output) {
auto* theta = &input;
auto theta_size = theta->dims().size();
if (theta_size == 4) {
AffineGridGrad4DCUDAKernel<T, Context>(
dev_ctx, input, outputShape, align_corners, output);
} else {
AffineGridGrad5DCUDAKernel<T, Context>(
dev_ctx, input, outputShape, align_corners, output);
}
}
} // namespace phi
PD_REGISTER_KERNEL(affine_grid_grad,
GPU,
ALL_LAYOUT,
phi::AffineGridGradCUDAKernel,
float,
double){};
| 4bec3e78f99845fe1f6576df7a0ba94e0e50f707.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/affine_grid_grad_kernel.h"
#include "glog/logging.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_device_function.h"
#include "paddle/phi/backends/gpu/gpu_primitives.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/affine_grid_utils.h"
namespace phi {
template <typename T>
__global__ void LinspaceKernel(T start, T step, int64_t size, T* out) {
CUDA_KERNEL_LOOP(index, size) { out[index] = start + step * index; }
}
template <typename T>
struct Linspace<phi::GPUContext, T> {
void operator()(T start,
T end,
int count,
bool align_corners,
DenseTensor* numbers,
const phi::GPUContext& dev_ctx) {
numbers->Resize(phi::make_ddim({count}));
T* number_data = dev_ctx.template Alloc<T>(numbers);
T slice = (end - start) / (T)(count - 1);
if (!align_corners) {
slice = (end - start) / (T)count;
start *= (T)(count - 1) / (T)count;
}
auto stream = dev_ctx.stream();
int block = 512;
int grid = (count + block - 1) / block;
LinspaceKernel<T>
<<<grid, block, 0, stream>>>(start, slice, count, number_data);
}
};
template <typename T>
__global__ void affine_grid_grad_kernel_4d(const int count,
int n,
int out_h,
int out_w,
T h_start,
T w_start,
T h_step,
T w_step,
const T* out_grad, // N, H, W, 2
T* theta_grad) { // N, 2, 3
CUDA_KERNEL_LOOP(index, count) {
int w = index % out_w;
int h = (index / out_w) % out_h;
int n = index / (out_w * out_h);
T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start);
T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start);
int theta_offset = n * 6; // 2 * 3;
T out_grad_x = out_grad[index * 2];
phi::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x);
T out_grad_y = out_grad[index * 2 + 1];
phi::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_y * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y);
}
}
template <typename T>
__global__ void affine_grid_grad_kernel_5d(const int count,
int n,
int out_d,
int out_h,
int out_w,
T d_start,
T h_start,
T w_start,
T d_step,
T h_step,
T w_step,
const T* out_grad, // N, D, H, W, 3
T* theta_grad) { // N, 3, 4
CUDA_KERNEL_LOOP(index, count) {
int w = index % out_w;
int h = (index / out_w) % out_h;
int d = (index / (out_w * out_h)) % out_d;
int n = index / (out_w * out_h * out_d);
T d_coor = d_step * static_cast<T>(d) + static_cast<T>(d_start);
T h_coor = h_step * static_cast<T>(h) + static_cast<T>(h_start);
T w_coor = w_step * static_cast<T>(w) + static_cast<T>(w_start);
int theta_offset = n * 12; // 3 * 4;
T out_grad_x = out_grad[index * 3];
phi::CudaAtomicAdd(theta_grad + theta_offset, out_grad_x * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 1, out_grad_x * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 2, out_grad_x * d_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 3, out_grad_x);
T out_grad_y = out_grad[index * 3 + 1];
phi::CudaAtomicAdd(theta_grad + theta_offset + 4, out_grad_y * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 5, out_grad_y * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 6, out_grad_y * d_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 7, out_grad_y);
T out_grad_z = out_grad[index * 3 + 2];
phi::CudaAtomicAdd(theta_grad + theta_offset + 8, out_grad_z * w_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 9, out_grad_z * h_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 10, out_grad_z * d_coor);
phi::CudaAtomicAdd(theta_grad + theta_offset + 11, out_grad_z);
}
}
template <typename T, typename Context>
void AffineGridGrad4DCUDAKernel(const Context& dev_ctx,
const DenseTensor& output_grad,
const IntArray& outputShape,
bool align_corners,
DenseTensor* input_grad) {
auto& theta_grad = input_grad;
int n = output_grad.dims()[0];
auto& size_attr = outputShape.GetData();
int h = 0;
int w = 0;
h = size_attr[2];
w = size_attr[3];
theta_grad->Resize(phi::make_ddim({n, 2, 3}));
T* theta_grad_data = dev_ctx.template Alloc<T>(theta_grad);
phi::funcs::SetConstant<phi::GPUContext, T>()(
dev_ctx, theta_grad, static_cast<T>(0));
T h_step;
T w_step;
T h_start = -1;
T w_start = -1;
if (align_corners) {
h_step = static_cast<T>(2) / static_cast<T>(h - 1);
w_step = static_cast<T>(2) / static_cast<T>(w - 1);
} else {
h_step = static_cast<T>(2) / static_cast<T>(h);
w_step = static_cast<T>(2) / static_cast<T>(w);
h_start *= static_cast<T>(h - 1) / static_cast<T>(h);
w_start *= static_cast<T>(w - 1) / static_cast<T>(w);
}
const int count = n * h * w;
VLOG(3) << "count: " << count << "; h_step: " << h_step
<< "; w_step: " << w_step << "; h_start: " << h_start
<< "; w_start: " << w_start;
int block = 512;
int grid = (count + block - 1) / block;
auto cu_stream = dev_ctx.stream();
affine_grid_grad_kernel_4d<<<grid, block, 0, cu_stream>>>(
count,
n,
h,
w,
h_start,
w_start,
h_step,
w_step,
output_grad.data<T>(),
theta_grad_data);
}
template <typename T, typename Context>
void AffineGridGrad5DCUDAKernel(const Context& dev_ctx,
const DenseTensor& output_grad,
const IntArray& outputShape,
bool align_corners,
DenseTensor* input_grad) {
// VLOG(0) << "in affine grid backward 5D";
auto& theta_grad = input_grad;
int n = output_grad.dims()[0];
auto& size_attr = outputShape.GetData();
int d = 0;
int h = 0;
int w = 0;
d = size_attr[2];
h = size_attr[3];
w = size_attr[4];
theta_grad->Resize(phi::make_ddim({n, 3, 4}));
T* theta_grad_data = dev_ctx.template Alloc<T>(theta_grad);
phi::funcs::SetConstant<phi::GPUContext, T>()(
dev_ctx, theta_grad, static_cast<T>(0));
T d_step;
T h_step;
T w_step;
T d_start = -1;
T h_start = -1;
T w_start = -1;
if (align_corners) {
d_step = static_cast<T>(2) / static_cast<T>(d - 1);
h_step = static_cast<T>(2) / static_cast<T>(h - 1);
w_step = static_cast<T>(2) / static_cast<T>(w - 1);
} else {
d_step = static_cast<T>(2) / static_cast<T>(d);
h_step = static_cast<T>(2) / static_cast<T>(h);
w_step = static_cast<T>(2) / static_cast<T>(w);
d_start *= static_cast<T>(d - 1) / static_cast<T>(d);
h_start *= static_cast<T>(h - 1) / static_cast<T>(h);
w_start *= static_cast<T>(w - 1) / static_cast<T>(w);
}
const int count = n * d * h * w;
int block = 512;
int grid = (count + block - 1) / block;
auto cu_stream = dev_ctx.stream();
affine_grid_grad_kernel_5d<<<grid, block, 0, cu_stream>>>(
count,
n,
d,
h,
w,
d_start,
h_start,
w_start,
d_step,
h_step,
w_step,
output_grad.data<T>(),
theta_grad_data);
}
template <typename T, typename Context>
void AffineGridGradCUDAKernel(const Context& dev_ctx,
const DenseTensor& input,
const IntArray& outputShape,
bool align_corners,
DenseTensor* output) {
auto* theta = &input;
auto theta_size = theta->dims().size();
if (theta_size == 4) {
AffineGridGrad4DCUDAKernel<T, Context>(
dev_ctx, input, outputShape, align_corners, output);
} else {
AffineGridGrad5DCUDAKernel<T, Context>(
dev_ctx, input, outputShape, align_corners, output);
}
}
} // namespace phi
PD_REGISTER_KERNEL(affine_grid_grad,
GPU,
ALL_LAYOUT,
phi::AffineGridGradCUDAKernel,
float,
double){};
|
a7cdb38bb40417e20183a9042a7051b53fc3dee2.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include "caffe/layers/silence_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Do nothing.
}
template<typename Dtype>
void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int_tp i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_ROCM
caffe_gpu_set(bottom[i]->count(), Dtype(0),
bottom[i]->mutable_gpu_diff());
#endif // USE_ROCM
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel(
CL_KERNEL_SELECT("gpu_set"));
viennacl::ocl::enqueue(
oclk_gpu_set(
bottom[i]->count(), Dtype(0),
WrapHandle((cl_mem) bottom[i]->mutable_gpu_diff(), &ctx)),
ctx.get_queue());
ctx.get_queue().finish();
#endif
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer);
} // namespace caffe
| a7cdb38bb40417e20183a9042a7051b53fc3dee2.cu | #include <vector>
#include "caffe/layers/silence_layer.hpp"
#include "caffe/util/math_functions.hpp"
#ifdef USE_GREENTEA
#include "caffe/greentea/greentea.hpp"
#include "caffe/greentea/greentea_math_functions.hpp"
#endif
namespace caffe {
template<typename Dtype>
void SilenceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
// Do nothing.
}
template<typename Dtype>
void SilenceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
for (int_tp i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
if (this->device_->backend() == BACKEND_CUDA) {
#ifdef USE_CUDA
caffe_gpu_set(bottom[i]->count(), Dtype(0),
bottom[i]->mutable_gpu_diff());
#endif // USE_CUDA
} else {
#ifdef USE_GREENTEA
viennacl::ocl::context &ctx = viennacl::ocl::get_context(
this->device_->id());
viennacl::ocl::program &program = this->device_->program();
viennacl::ocl::kernel &oclk_gpu_set = program.get_kernel(
CL_KERNEL_SELECT("gpu_set"));
viennacl::ocl::enqueue(
oclk_gpu_set(
bottom[i]->count(), Dtype(0),
WrapHandle((cl_mem) bottom[i]->mutable_gpu_diff(), &ctx)),
ctx.get_queue());
ctx.get_queue().finish();
#endif
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SilenceLayer);
} // namespace caffe
|
a58943f5acc5d3f1d7940d0229514ee36ee5d9b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "common.h"
// GPU kernel to perform Vector Addition
__global__ void vector_subractionKernel(float* ad, float* bd, float* cd, int size)
{
// Retrive thread id within the block
int th_id = threadIdx.x + blockIdx.x * blockDim.x;
// Perform vector subraction
while(th_id<size) {
cd[th_id] = ad[th_id] - bd[th_id];
th_id= blockDim.x * gridDim.x;
}
}
bool subtractVectorGPU( float* a, float* b, float* c, int size )
{
// Error return value
hipError_t status;
// Number of bytes in a vector
int bytes = size * sizeof(float);
float *ad, *bd, *cd;
// Device pointer to pinned meory
hipHostGetDevicePointer( (void**)&ad, a, 0 );
hipHostGetDevicePointer( (void**)&bd, b, 0 );
hipHostGetDevicePointer( (void**)&cd, c, 0 );
// Specify the size of the grid and the size of the block
dim3 dimBlock(1024); // is contained in a block
dim3 dimGrid((size+1023)/1024); // Only using a single grid element
// Launch the kernel on a size-by-size block of threads
hipLaunchKernelGGL(( vector_subractionKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd, cd, size);
hipDeviceSynchronize();// Sync threads
// Check for errors
status = hipGetLastError();
if (status != hipSuccess) {
std::cout << "Kernel failed: " << hipGetErrorString(status) << std::endl;
return false;
}
// Success
return true;
}
| a58943f5acc5d3f1d7940d0229514ee36ee5d9b8.cu | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <iostream>
#include "common.h"
// GPU kernel to perform Vector Addition
__global__ void vector_subractionKernel(float* ad, float* bd, float* cd, int size)
{
// Retrive thread id within the block
int th_id = threadIdx.x + blockIdx.x * blockDim.x;
// Perform vector subraction
while(th_id<size) {
cd[th_id] = ad[th_id] - bd[th_id];
th_id= blockDim.x * gridDim.x;
}
}
bool subtractVectorGPU( float* a, float* b, float* c, int size )
{
// Error return value
cudaError_t status;
// Number of bytes in a vector
int bytes = size * sizeof(float);
float *ad, *bd, *cd;
// Device pointer to pinned meory
cudaHostGetDevicePointer( (void**)&ad, a, 0 );
cudaHostGetDevicePointer( (void**)&bd, b, 0 );
cudaHostGetDevicePointer( (void**)&cd, c, 0 );
// Specify the size of the grid and the size of the block
dim3 dimBlock(1024); // is contained in a block
dim3 dimGrid((size+1023)/1024); // Only using a single grid element
// Launch the kernel on a size-by-size block of threads
vector_subractionKernel<<<dimGrid, dimBlock>>>(ad, bd, cd, size);
cudaThreadSynchronize();// Sync threads
// Check for errors
status = cudaGetLastError();
if (status != cudaSuccess) {
std::cout << "Kernel failed: " << cudaGetErrorString(status) << std::endl;
return false;
}
// Success
return true;
}
|
c7352c7841351ce6cc77abfa7371f85e046b3849.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<hparams.hpp>
#include<data_types.hpp>
#include<decoder.hpp>
#include<iostream>
#include<vector>
#include<string>
#include<fstream>
#include<cassert>
#include<algorithm>
#include<utility>
#include<queue>
#include<limits>
using namespace s2t::decodernet;
using namespace s2t::sys;
using namespace s2t::common;
using namespace std;
// kernel for decoder compuatations
__global__ void decoder_concat(size_t in1_sz, size_t in2_sz, float* in1, const float* in2)
{
// concat in2 into in1
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < in2_sz)
{
in1[in1_sz+index] = in2[index];
}
}
// min_first methods
bool min_first::operator()(pair<float, long long> const& pair1, pair<float, long long> const& pair2)
{
// pair with minimum value of first will be at the top of priority queue
return pair1.first > pair2.first;
}
// TrieNode methods
TrieNode::TrieNode()
{
isCompleteWord = false;
for (int i = 0; i < letters; ++i)
{
children[i] = NULL;
}
}
TrieNode::~TrieNode()
{
}
// Trie methods
Trie::Trie()
{
root = new TrieNode();
all_trie_nodes.push_back(root);
}
bool Trie::insert_and_check(vector<size_t>& word)
{
/* returns true if word already exists;
else returns false and inserts word */
auto current = root;
for(int i = 0; i < word.size(); ++i)
{
int index = word[i];
if(!current->children[index])
{
current->children[index] = new TrieNode();
all_trie_nodes.push_back(current->children[index]);
}
current = current->children[index];
}
if(current->isCompleteWord)
return true;
current->isCompleteWord = true;
return false;
}
Trie::~Trie()
{
for(int i=0; i<all_trie_nodes.size(); ++i)
{
delete all_trie_nodes[i];
}
}
// is_prefix : checks if sids_1 is a proper prefix of sids_2
bool is_prefix(vector<size_t>& sids_1, vector<size_t>& sids_2)
{
if(sids_1.size() >= sids_2.size())
{
return false;
}
for(int i=0; i<sids_1.size(); ++i)
{
if(sids_1[i]!=sids_2[i])
return false;
}
return true;
}
// sorting comparator function for boosting phase
bool compareSIDsLengths(pair<int, int>& pair1, pair<int, int>& pair2)
{
return (pair1.second < pair2.second);
}
// log sum exp function for a pair of float values
float logsumexp(float x, float y)
{
float maxval = max(x, y);
return log(exp(x - maxval) + exp(y - maxval)) + maxval;
}
// decoder methods
decoder::decoder(size_t p_vocab_size, size_t p_blank_index)
{
vocab_size = p_vocab_size;
blank_index = p_blank_index;
// Read the subword file
{
string subword;
ifstream subwords(hparams::subword_file);
if(subwords.is_open())
{
while(getline(subwords, subword))
{
subword_map.push_back(subword);
}
subwords.close();
}
else
{
cout << "Couldn't open vocabulary file!" << endl;
}
subword_map.push_back(""); // appending blank symbol at last
assert(vocab_size==subword_map.size() && "Number of subwords in file and vocab_size do not match!");
assert(vocab_size==hparams::joint_net_logit_size && "hparams::joint_net_logit_size and vocab_size do not match!");
}
// intialise prednet and jointnet
{
checkCUDNN(cudnnCreate(&cudnn));
prednet1.init(cudnn, "");
jointnet1.init(cudnn, "");
}
// initialise the gpu variables
{
prednet_out.init(hparams::max_input_size, hparams::pred_net_logit_size);
enc_pred_concated.init(hparams::max_input_size, hparams::enc_net_logit_size+hparams::pred_net_logit_size); // first 700 enocder, next 700 decoder
jointnet_out.init(hparams::max_input_size, hparams::joint_net_logit_size);
}
// initialise the cpu variables
{
log_probs = (float*) malloc(hparams::joint_net_logit_size * sizeof(float));
boost_phase = hparams::boost_phase;
}
}
void decoder::boost_prob(data_tuple& final, data_tuple& prefix)
{
float boost_log_prob = prefix.log_prob;
size_t input_symbol = prefix.last_decoded_sid;
int output_state_idx, input_state_idx;
for(int i=prefix.beam_sids.size(); i<final.beam_sids.size(); ++i)
{
size_t output_symbol = final.beam_sids[i];
// compute log_prob
float log_prob;
{
if(i==prefix.beam_sids.size())
{
input_state_idx = prefix.hidden_idx;
output_state_idx = -1;
}
else if(i==prefix.beam_sids.size()+1)
{
input_state_idx = output_state_idx;
output_state_idx = -1;
}
else
{
swap(input_state_idx, output_state_idx);
}
// calls to jointnet and prednet
int return_state_idx = prednet1(cudnn, input_symbol, prednet_out, input_state_idx, output_state_idx);
if(i<=prefix.beam_sids.size()+1)
{
output_state_idx = return_state_idx;
prednet1.reuse_state(output_state_idx);
}
else
{
assert(output_state_idx==return_state_idx && "output state index doesn't match return state index!");
}
hipLaunchKernelGGL(( decoder_concat), dim3(1), dim3(1024), 0, 0, 700, 700, enc_pred_concated.ptr, prednet_out.ptr);
jointnet1(cudnn, enc_pred_concated, jointnet_out);
// loading log_probs in float array
size_t log_probs_N = jointnet_out.data_at_host(&log_probs);
log_prob = log_probs[output_symbol];
}
boost_log_prob += log_prob;
input_symbol = output_symbol;
}
if(prefix.beam_sids.size()+1==final.beam_sids.size())
{
prednet1.free_state(output_state_idx);
}
else
{
prednet1.free_state(input_state_idx);
prednet1.free_state(output_state_idx);
}
final.log_prob = logsumexp(final.log_prob, boost_log_prob);
}
void decoder::operator() (const string& encoder_features_file, size_t beamsize, vector<pair<string, float>>& beams_and_logprobs_out)
{
// resetting state buffer for LSTM
prednet1.reset_state_buffer();
auto encoder_features = cnpy::npy_load(encoder_features_file);
size_t acoustic_time_steps = encoder_features.shape[0]; // T * 700 file
// b_heap related data structures
vector<data_tuple> data_b;
priority_queue<pair<float, int>, vector<pair<float, int>>, min_first> b_heap;
// a_heap realted data structures
vector<data_tuple> data_a;
priority_queue<pair<float, int>, vector<pair<float, int>>, min_first> a_heap;
// initialse b_heap related data structures before t=0
int zeroed_dlsm_state_idx = prednet1.get_zerod_state();
data_tuple init_data_tuple = {"", 0.f, blank_index, zeroed_dlsm_state_idx /* hidden index */, {blank_index}};
prednet1.reuse_state(zeroed_dlsm_state_idx);
data_b.push_back(init_data_tuple);
b_heap.push(make_pair(0.f, 0));
for(int t=0; t<acoustic_time_steps; ++t)
{
enc_pred_concated.copy(encoder_features.data<float_t>() + hparams::enc_net_logit_size*t, hparams::enc_net_logit_size);
// delete all for a_heap;
{
for(int i=0; i<data_a.size(); ++i)
{
prednet1.free_state(data_a[i].hidden_idx);
}
data_a.clear();
while(a_heap.size()) // reset it
{
a_heap.pop();
}
}
// put all data from b_heap in to a_heap and initialise empty b_heap;
{
// boost the probabilities in b_heap and push to a_heap;
{
vector<pair<int, int>> data_b_idx_sids_len_vector;
while(b_heap.size())
{
pair<float, int> log_prob_data_idx_pair = b_heap.top();
data_b_idx_sids_len_vector.push_back(make_pair(log_prob_data_idx_pair.second, data_b[log_prob_data_idx_pair.second].beam_sids.size()));
b_heap.pop();
}
sort(data_b_idx_sids_len_vector.begin(), data_b_idx_sids_len_vector.end(), compareSIDsLengths);
for(int i=0; i<data_b_idx_sids_len_vector.size(); ++i)
{
for(int j=i-1; boost_phase && j>=0; --j)
{
// if data_b object at index j is a prefix of data_b object at index i;
if(is_prefix(data_b[data_b_idx_sids_len_vector[j].first].beam_sids, data_b[data_b_idx_sids_len_vector[i].first].beam_sids))
{
boost_prob(data_b[data_b_idx_sids_len_vector[i].first], data_b[data_b_idx_sids_len_vector[j].first]);
break;
}
}
// data_b object at index i is boosted so push to a_heap;
a_heap.push(make_pair(-data_b[data_b_idx_sids_len_vector[i].first].log_prob, data_b_idx_sids_len_vector[i].first));
}
}
data_a = data_b;
data_b.clear();
}
// choose the most probable for a_heap and iterate
pair<float, int> top_log_prob_data_idx_pair = a_heap.top();
a_heap.pop();
size_t top_id_data_a = top_log_prob_data_idx_pair.second;
float top_log_prob_a = data_a[top_id_data_a].log_prob;
float bmszth_top_log_prob_b = -numeric_limits<float>::infinity();
Trie trie;
while(top_log_prob_a!=-numeric_limits<float>::infinity() && bmszth_top_log_prob_b<top_log_prob_a)
{
// compute next set of log probablities by calling lm and joint net
size_t input_symbol = data_a[top_id_data_a].last_decoded_sid;
// calls to jointnet and prednet
int output_state_idx = prednet1(cudnn, input_symbol, prednet_out, data_a[top_id_data_a].hidden_idx);
hipLaunchKernelGGL(( decoder_concat), dim3(1), dim3(1024), 0, 0, 700, 700, enc_pred_concated.ptr, prednet_out.ptr);
jointnet1(cudnn, enc_pred_concated, jointnet_out);
// loading log_probs in float array
size_t log_probs_N = jointnet_out.data_at_host(&log_probs);
// add blank transition to B
if(top_log_prob_a+log_probs[blank_index] > bmszth_top_log_prob_b && !trie.insert_and_check(data_a[top_id_data_a].beam_sids)) // and not already in trie:
{
data_tuple next_data_tuple = {data_a[top_id_data_a].beam_string, top_log_prob_a + log_probs[blank_index], data_a[top_id_data_a].last_decoded_sid, data_a[top_id_data_a].hidden_idx, data_a[top_id_data_a].beam_sids};
prednet1.reuse_state(data_a[top_id_data_a].hidden_idx);
b_heap.push(make_pair(next_data_tuple.log_prob, data_b.size()));
data_b.push_back(next_data_tuple);
if(b_heap.size()==beamsize+1)
{
b_heap.pop();
}
if(b_heap.size()==beamsize)
{
pair<float, int> log_prob_data_idx_pair = b_heap.top();
bmszth_top_log_prob_b = data_b[log_prob_data_idx_pair.second].log_prob;
}
}
// add non-blank transition to A
for(int i=0; i<vocab_size; i++)
{
if(i == blank_index || top_log_prob_a+log_probs[i] <= bmszth_top_log_prob_b || log_probs[i] < hparams::prune_log_prob)
continue;
data_tuple next_data_tuple = {data_a[top_id_data_a].beam_string + subword_map[i], top_log_prob_a + log_probs[i], size_t(i), output_state_idx, data_a[top_id_data_a].beam_sids};
prednet1.reuse_state(output_state_idx);
next_data_tuple.beam_sids.push_back(i);
a_heap.push(make_pair(-next_data_tuple.log_prob, data_a.size()));
data_a.push_back(next_data_tuple);
}
// update top_id_data_a and top_log_prob_a
top_log_prob_a = -numeric_limits<float>::infinity();
if(a_heap.size())
{
top_log_prob_data_idx_pair = a_heap.top();
a_heap.pop();
top_id_data_a = top_log_prob_data_idx_pair.second;
top_log_prob_a = data_a[top_id_data_a].log_prob;
}
}
}
// dealloc all hiddens floats
for(int i=0; i<data_a.size(); ++i)
prednet1.free_state(data_a[i].hidden_idx);
for(int i=0; i<data_b.size(); ++i)
prednet1.free_state(data_b[i].hidden_idx);
// write to beams_and_logprobs_out
while(b_heap.size())
{
pair<float, int> log_prob_data_idx_pair = b_heap.top(); b_heap.pop();
int data_b_idx = log_prob_data_idx_pair.second;
beams_and_logprobs_out.push_back(make_pair(data_b[data_b_idx].beam_string, data_b[data_b_idx].log_prob));
}
}
decoder::~decoder()
{
// de-initialise the cpu variables
{
free(log_probs);
}
} | c7352c7841351ce6cc77abfa7371f85e046b3849.cu | #include<hparams.hpp>
#include<data_types.hpp>
#include<decoder.hpp>
#include<iostream>
#include<vector>
#include<string>
#include<fstream>
#include<cassert>
#include<algorithm>
#include<utility>
#include<queue>
#include<limits>
using namespace s2t::decodernet;
using namespace s2t::sys;
using namespace s2t::common;
using namespace std;
// kernel for decoder compuatations
__global__ void decoder_concat(size_t in1_sz, size_t in2_sz, float* in1, const float* in2)
{
// concat in2 into in1
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < in2_sz)
{
in1[in1_sz+index] = in2[index];
}
}
// min_first methods
bool min_first::operator()(pair<float, long long> const& pair1, pair<float, long long> const& pair2)
{
// pair with minimum value of first will be at the top of priority queue
return pair1.first > pair2.first;
}
// TrieNode methods
TrieNode::TrieNode()
{
isCompleteWord = false;
for (int i = 0; i < letters; ++i)
{
children[i] = NULL;
}
}
TrieNode::~TrieNode()
{
}
// Trie methods
Trie::Trie()
{
root = new TrieNode();
all_trie_nodes.push_back(root);
}
bool Trie::insert_and_check(vector<size_t>& word)
{
/* returns true if word already exists;
else returns false and inserts word */
auto current = root;
for(int i = 0; i < word.size(); ++i)
{
int index = word[i];
if(!current->children[index])
{
current->children[index] = new TrieNode();
all_trie_nodes.push_back(current->children[index]);
}
current = current->children[index];
}
if(current->isCompleteWord)
return true;
current->isCompleteWord = true;
return false;
}
Trie::~Trie()
{
for(int i=0; i<all_trie_nodes.size(); ++i)
{
delete all_trie_nodes[i];
}
}
// is_prefix : checks if sids_1 is a proper prefix of sids_2
bool is_prefix(vector<size_t>& sids_1, vector<size_t>& sids_2)
{
if(sids_1.size() >= sids_2.size())
{
return false;
}
for(int i=0; i<sids_1.size(); ++i)
{
if(sids_1[i]!=sids_2[i])
return false;
}
return true;
}
// sorting comparator function for boosting phase
bool compareSIDsLengths(pair<int, int>& pair1, pair<int, int>& pair2)
{
return (pair1.second < pair2.second);
}
// log sum exp function for a pair of float values
float logsumexp(float x, float y)
{
float maxval = max(x, y);
return log(exp(x - maxval) + exp(y - maxval)) + maxval;
}
// decoder methods
decoder::decoder(size_t p_vocab_size, size_t p_blank_index)
{
vocab_size = p_vocab_size;
blank_index = p_blank_index;
// Read the subword file
{
string subword;
ifstream subwords(hparams::subword_file);
if(subwords.is_open())
{
while(getline(subwords, subword))
{
subword_map.push_back(subword);
}
subwords.close();
}
else
{
cout << "Couldn't open vocabulary file!" << endl;
}
subword_map.push_back(""); // appending blank symbol at last
assert(vocab_size==subword_map.size() && "Number of subwords in file and vocab_size do not match!");
assert(vocab_size==hparams::joint_net_logit_size && "hparams::joint_net_logit_size and vocab_size do not match!");
}
// intialise prednet and jointnet
{
checkCUDNN(cudnnCreate(&cudnn));
prednet1.init(cudnn, "");
jointnet1.init(cudnn, "");
}
// initialise the gpu variables
{
prednet_out.init(hparams::max_input_size, hparams::pred_net_logit_size);
enc_pred_concated.init(hparams::max_input_size, hparams::enc_net_logit_size+hparams::pred_net_logit_size); // first 700 enocder, next 700 decoder
jointnet_out.init(hparams::max_input_size, hparams::joint_net_logit_size);
}
// initialise the cpu variables
{
log_probs = (float*) malloc(hparams::joint_net_logit_size * sizeof(float));
boost_phase = hparams::boost_phase;
}
}
void decoder::boost_prob(data_tuple& final, data_tuple& prefix)
{
float boost_log_prob = prefix.log_prob;
size_t input_symbol = prefix.last_decoded_sid;
int output_state_idx, input_state_idx;
for(int i=prefix.beam_sids.size(); i<final.beam_sids.size(); ++i)
{
size_t output_symbol = final.beam_sids[i];
// compute log_prob
float log_prob;
{
if(i==prefix.beam_sids.size())
{
input_state_idx = prefix.hidden_idx;
output_state_idx = -1;
}
else if(i==prefix.beam_sids.size()+1)
{
input_state_idx = output_state_idx;
output_state_idx = -1;
}
else
{
swap(input_state_idx, output_state_idx);
}
// calls to jointnet and prednet
int return_state_idx = prednet1(cudnn, input_symbol, prednet_out, input_state_idx, output_state_idx);
if(i<=prefix.beam_sids.size()+1)
{
output_state_idx = return_state_idx;
prednet1.reuse_state(output_state_idx);
}
else
{
assert(output_state_idx==return_state_idx && "output state index doesn't match return state index!");
}
decoder_concat<<<1, 1024>>>(700, 700, enc_pred_concated.ptr, prednet_out.ptr);
jointnet1(cudnn, enc_pred_concated, jointnet_out);
// loading log_probs in float array
size_t log_probs_N = jointnet_out.data_at_host(&log_probs);
log_prob = log_probs[output_symbol];
}
boost_log_prob += log_prob;
input_symbol = output_symbol;
}
if(prefix.beam_sids.size()+1==final.beam_sids.size())
{
prednet1.free_state(output_state_idx);
}
else
{
prednet1.free_state(input_state_idx);
prednet1.free_state(output_state_idx);
}
final.log_prob = logsumexp(final.log_prob, boost_log_prob);
}
void decoder::operator() (const string& encoder_features_file, size_t beamsize, vector<pair<string, float>>& beams_and_logprobs_out)
{
// resetting state buffer for LSTM
prednet1.reset_state_buffer();
auto encoder_features = cnpy::npy_load(encoder_features_file);
size_t acoustic_time_steps = encoder_features.shape[0]; // T * 700 file
// b_heap related data structures
vector<data_tuple> data_b;
priority_queue<pair<float, int>, vector<pair<float, int>>, min_first> b_heap;
// a_heap realted data structures
vector<data_tuple> data_a;
priority_queue<pair<float, int>, vector<pair<float, int>>, min_first> a_heap;
// initialse b_heap related data structures before t=0
int zeroed_dlsm_state_idx = prednet1.get_zerod_state();
data_tuple init_data_tuple = {"", 0.f, blank_index, zeroed_dlsm_state_idx /* hidden index */, {blank_index}};
prednet1.reuse_state(zeroed_dlsm_state_idx);
data_b.push_back(init_data_tuple);
b_heap.push(make_pair(0.f, 0));
for(int t=0; t<acoustic_time_steps; ++t)
{
enc_pred_concated.copy(encoder_features.data<float_t>() + hparams::enc_net_logit_size*t, hparams::enc_net_logit_size);
// delete all for a_heap;
{
for(int i=0; i<data_a.size(); ++i)
{
prednet1.free_state(data_a[i].hidden_idx);
}
data_a.clear();
while(a_heap.size()) // reset it
{
a_heap.pop();
}
}
// put all data from b_heap in to a_heap and initialise empty b_heap;
{
// boost the probabilities in b_heap and push to a_heap;
{
vector<pair<int, int>> data_b_idx_sids_len_vector;
while(b_heap.size())
{
pair<float, int> log_prob_data_idx_pair = b_heap.top();
data_b_idx_sids_len_vector.push_back(make_pair(log_prob_data_idx_pair.second, data_b[log_prob_data_idx_pair.second].beam_sids.size()));
b_heap.pop();
}
sort(data_b_idx_sids_len_vector.begin(), data_b_idx_sids_len_vector.end(), compareSIDsLengths);
for(int i=0; i<data_b_idx_sids_len_vector.size(); ++i)
{
for(int j=i-1; boost_phase && j>=0; --j)
{
// if data_b object at index j is a prefix of data_b object at index i;
if(is_prefix(data_b[data_b_idx_sids_len_vector[j].first].beam_sids, data_b[data_b_idx_sids_len_vector[i].first].beam_sids))
{
boost_prob(data_b[data_b_idx_sids_len_vector[i].first], data_b[data_b_idx_sids_len_vector[j].first]);
break;
}
}
// data_b object at index i is boosted so push to a_heap;
a_heap.push(make_pair(-data_b[data_b_idx_sids_len_vector[i].first].log_prob, data_b_idx_sids_len_vector[i].first));
}
}
data_a = data_b;
data_b.clear();
}
// choose the most probable for a_heap and iterate
pair<float, int> top_log_prob_data_idx_pair = a_heap.top();
a_heap.pop();
size_t top_id_data_a = top_log_prob_data_idx_pair.second;
float top_log_prob_a = data_a[top_id_data_a].log_prob;
float bmszth_top_log_prob_b = -numeric_limits<float>::infinity();
Trie trie;
while(top_log_prob_a!=-numeric_limits<float>::infinity() && bmszth_top_log_prob_b<top_log_prob_a)
{
// compute next set of log probablities by calling lm and joint net
size_t input_symbol = data_a[top_id_data_a].last_decoded_sid;
// calls to jointnet and prednet
int output_state_idx = prednet1(cudnn, input_symbol, prednet_out, data_a[top_id_data_a].hidden_idx);
decoder_concat<<<1, 1024>>>(700, 700, enc_pred_concated.ptr, prednet_out.ptr);
jointnet1(cudnn, enc_pred_concated, jointnet_out);
// loading log_probs in float array
size_t log_probs_N = jointnet_out.data_at_host(&log_probs);
// add blank transition to B
if(top_log_prob_a+log_probs[blank_index] > bmszth_top_log_prob_b && !trie.insert_and_check(data_a[top_id_data_a].beam_sids)) // and not already in trie:
{
data_tuple next_data_tuple = {data_a[top_id_data_a].beam_string, top_log_prob_a + log_probs[blank_index], data_a[top_id_data_a].last_decoded_sid, data_a[top_id_data_a].hidden_idx, data_a[top_id_data_a].beam_sids};
prednet1.reuse_state(data_a[top_id_data_a].hidden_idx);
b_heap.push(make_pair(next_data_tuple.log_prob, data_b.size()));
data_b.push_back(next_data_tuple);
if(b_heap.size()==beamsize+1)
{
b_heap.pop();
}
if(b_heap.size()==beamsize)
{
pair<float, int> log_prob_data_idx_pair = b_heap.top();
bmszth_top_log_prob_b = data_b[log_prob_data_idx_pair.second].log_prob;
}
}
// add non-blank transition to A
for(int i=0; i<vocab_size; i++)
{
if(i == blank_index || top_log_prob_a+log_probs[i] <= bmszth_top_log_prob_b || log_probs[i] < hparams::prune_log_prob)
continue;
data_tuple next_data_tuple = {data_a[top_id_data_a].beam_string + subword_map[i], top_log_prob_a + log_probs[i], size_t(i), output_state_idx, data_a[top_id_data_a].beam_sids};
prednet1.reuse_state(output_state_idx);
next_data_tuple.beam_sids.push_back(i);
a_heap.push(make_pair(-next_data_tuple.log_prob, data_a.size()));
data_a.push_back(next_data_tuple);
}
// update top_id_data_a and top_log_prob_a
top_log_prob_a = -numeric_limits<float>::infinity();
if(a_heap.size())
{
top_log_prob_data_idx_pair = a_heap.top();
a_heap.pop();
top_id_data_a = top_log_prob_data_idx_pair.second;
top_log_prob_a = data_a[top_id_data_a].log_prob;
}
}
}
// dealloc all hiddens floats
for(int i=0; i<data_a.size(); ++i)
prednet1.free_state(data_a[i].hidden_idx);
for(int i=0; i<data_b.size(); ++i)
prednet1.free_state(data_b[i].hidden_idx);
// write to beams_and_logprobs_out
while(b_heap.size())
{
pair<float, int> log_prob_data_idx_pair = b_heap.top(); b_heap.pop();
int data_b_idx = log_prob_data_idx_pair.second;
beams_and_logprobs_out.push_back(make_pair(data_b[data_b_idx].beam_string, data_b[data_b_idx].log_prob));
}
}
decoder::~decoder()
{
// de-initialise the cpu variables
{
free(log_probs);
}
} |
5ad248a2d2a071e44f3cee8cf1c2422c83d87d4f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define IDX(i,j,ld) (((i)*(ld))+(j))
texture<real, 2> in_tex;
__global__ void resize(real *out, const uint out_rows, const uint out_cols)
{
const uint i = blockIdx.y*blockDim.y + threadIdx.y;
const uint j = blockIdx.x*blockDim.x + threadIdx.x;
if (i > out_rows-1 || j > out_cols-1)
return;
out[IDX(i,j,out_cols)] = tex2D(in_tex,
((float)j) / ((float)out_cols),
((float)i) / ((float)out_rows));
}
| 5ad248a2d2a071e44f3cee8cf1c2422c83d87d4f.cu | #define IDX(i,j,ld) (((i)*(ld))+(j))
texture<real, 2> in_tex;
__global__ void resize(real *out, const uint out_rows, const uint out_cols)
{
const uint i = blockIdx.y*blockDim.y + threadIdx.y;
const uint j = blockIdx.x*blockDim.x + threadIdx.x;
if (i > out_rows-1 || j > out_cols-1)
return;
out[IDX(i,j,out_cols)] = tex2D(in_tex,
((float)j) / ((float)out_cols),
((float)i) / ((float)out_rows));
}
|
b9ffc9ef702c1ee1403f6f828dc688793e4bdac2.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "case.cuh"
#include "dist.cuh"
#include "finders.cuh"
#include "random_hip.cuh"
#include "type.cuh"
#include "utils.cuh"
// extern "C" int GetBest(IPTR pop, int size);
IPTR caseBase;
int *sIndex, *hamDist;
void TourPrint(FILE *fp, IPTR pj, char *name);
void LoadCases(IPTR pop, int gen, float frac, Population *p, Functions *f) {
int nseeds, newSeeds, i, index;
int *rank;
IPTR seedCases;
if (p->nCases <= 0) return;
if (p->injectFraction <= 0.0) return;
nseeds = (int) ((float) p->popsize * (float) p->injectFraction);
if (p->nCases < nseeds) { nseeds = p->nCases; }
rank = (int *) malloc(sizeof(int) * p->popsize);
if (rank == NULL) { perror("error in malloc (rank)\n"); }
seedCases = AllocateIndividuals(nseeds, p->chromLength);
/* (IPTR) malloc (sizeof(INDIVIDUAL) * nseeds);*/
if (seedCases == NULL) { perror("error in malloc (seedCases)\n"); }
index = f->GetIndexIndividual(pop, p->popsize);
/* printf("Before GetCases\n");*/
newSeeds = GetCases(&pop[index], seedCases, nseeds, p, f);
/* newSeeds may be less than nseeds */
/* printf("after GetCases\n");*/
#ifdef DEBUG
PrintCases(debugFName, seedCases, nseeds, pop, best);
#endif
for (i = 0; i < p->popsize; i++) { rank[i] = i; }
FindNWorst(pop, rank, p->popsize, newSeeds);
/* printf("after FindNworst %d\n", nseeds); */
for (i = 0; i < newSeeds; i++) {
IndividualCopy(&seedCases[i], &pop[rank[i]]);
/* pop[rank[i]].fitness = eval_org(&pop[rank[i]]); */
/* Done in GetCases */
}
free(rank);
free(seedCases);
}
int GetBest(IPTR pop, int size) {
int i;
int max = 0;
for (i = 1; i < size; i++) {
if (pop[i].fitness > pop[max].fitness) { max = i; }
}
return max;
}
int GetWorst(IPTR pop, int size) {
int i;
int min = 0;
for (i = 1; i < size; i++) {
if (pop[i].fitness < pop[min].fitness) { min = i; }
}
return min;
}
void SaveCase(FILE *fp, IPTR pj, int gen, Population *p) {
int i;
if (p->saveCases) {
fprintf(fp, "%5d %i ", gen, pj->chromLen);
for (i = 0; i < pj->chromLen; i++) { fprintf(fp, "%i ", pj->chrom[i]); }
fprintf(fp, " %f %f\n", pj->fitness, pj->scaledFitness);
p->nCurrentCases++;
}
return;
}
// temporarilyt disabled in switch to cuda?
int GetCases(IPTR pj, IPTR iCases, int howmany, Population *p, Functions *f) {
int i, ncopied = 0;
int *sIndex;
double *dist;
if (p->nCases <= 0) return 0;
sIndex = (int *) malloc(sizeof(int) * p->nCases);
if (sIndex == NULL) { perror("error in malloc (sIndex)\n"); }
dist = (double *) malloc(sizeof(double) * p->nCases);
if (dist == NULL) { perror("error in malloc (dist)\n"); }
for (i = 0; i < p->nCases; i++) {
dist[i] = f->DistanceMetric(pj->chrom, (&(caseBase[i]))->chrom, pj->chromLen,
(&(caseBase[i]))->chromLen, &(caseBase[i].backup));
/* This is the address of backup so I can change what backup points to
Needed in sequential representations to deal with varying size
cases.
*/
}
for (i = 0; i < p->nCases; i++) { sIndex[i] = i; }
f->ApplyMetric(dist, sIndex, p->nCases, howmany);
ncopied = 0;
for (i = 0; (i < p->nCases) && (ncopied < howmany) && (ncopied < p->nCases); i++) {
if (dist[sIndex[i]] != 0.0) {
IndividualCopy(&caseBase[sIndex[i]], &iCases[ncopied]);
if (p->xType > 0 && p->dMetric == LCSD) {
FixCopiedIndividual(&iCases[ncopied], p);
// TourPrint(stdout, &iCases[ncopied], "GetCases");
} /* to copy backup to chrom before evaluation*/
iCases[ncopied].fitness =
1.0; // Eval(&(iCases[ncopied])); //disabled in switch to cuda
printf("CASES Was reached\n");
/* how do I parallelize this? */
iCases[ncopied].dx = dist[sIndex[i]];
ncopied++;
}
}
/*************************************************************
ncopied = 0;
for(i = 0; i < howmany; i++) {
IndividualCopy(&caseBase[sIndex[i]], &iCases[i]);
iCases[i].fitness = Eval(&(iCases[i]));
iCases[i].dx = dist[sIndex[i]];
ncopied++;
}
*************************************************************/
free(sIndex);
free(dist);
return ncopied;
}
void ReadCase(FILE *fp, IPTR pj, Population *p) {
int t, i, len;
fscanf(fp, "%i %i", &t, &len);
if (p->xType < 1) {
for (i = 0; i < len; i++) {
fscanf(fp, "%i", &(pj->chrom[i]));
// All I/O should be abstracted out
}
pj->chromLen = p->chromLength;
} else { // sequential (TSP) representation
GetSetSeqChrom(fp, pj, p, len);
// TourPrint(stdout, pj, "ReadCase");
}
fscanf(fp, "%lf %lf", &(pj->fitness), &(pj->scaledFitness));
}
void GetSetSeqChrom(FILE *fp, IPTR pj, Population *p,
int len) { /* This frees up the old chromosome, and replaces it with
allocated space for a chromosome of length len, then reads
new chromosome from file* fp*/
int i;
free(pj->chrom);
free(pj->backup);
pj->chrom = (ChromType *) calloc((size_t) len, sizeof(ChromType));
pj->backup = (ChromType *) calloc((size_t) len, sizeof(ChromType));
pj->chromLen = len;
for (i = 0; i < len; i++) {
fscanf(fp, "%i", &(pj->chrom[i]));
// All I/O should be abstracted out
}
}
int FindNCases(char *ncfile) {
FILE *fp;
int tmp;
if ((fp = fopen(ncfile, "r")) == NULL) {
fprintf(stdout, "no cases in case base\n");
return 0;
} else {
fscanf(fp, "%d", &tmp);
fclose(fp);
return tmp;
}
}
void StoreNcases(char *ncfile, int ncases, int nCurrentCases) {
FILE *fp;
if ((fp = fopen(ncfile, "w")) == NULL) {
fprintf(stdout, "problem in opening %s \n", ncfile);
exit(1);
} else {
fprintf(fp, "%d\n", ncases + nCurrentCases);
fclose(fp);
}
}
void InitLoadCases(char *caseFile, IPTR pop, int gen, int perc, Population *p) {
FILE *fp;
int i;
p->nCases = FindNCases(p->nCFile);
if (p->nCases <= 0) return;
caseBase = AllocateIndividuals(p->nCases, p->chromLength);
if (caseBase == NULL) {
perror("Malloc failure for caseBase\n");
exit(1);
}
if ((fp = fopen(caseFile, "r")) == NULL) {
fprintf(stderr, "InitLoadCases: Cannot open %s for reading\n", caseFile);
exit(1);
}
for (i = 0; i < p->nCases; i++) { ReadCase(fp, &caseBase[i], p); }
fclose(fp);
}
| b9ffc9ef702c1ee1403f6f828dc688793e4bdac2.cu | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "case.cuh"
#include "dist.cuh"
#include "finders.cuh"
#include "random.cuh"
#include "type.cuh"
#include "utils.cuh"
// extern "C" int GetBest(IPTR pop, int size);
IPTR caseBase;
int *sIndex, *hamDist;
void TourPrint(FILE *fp, IPTR pj, char *name);
void LoadCases(IPTR pop, int gen, float frac, Population *p, Functions *f) {
int nseeds, newSeeds, i, index;
int *rank;
IPTR seedCases;
if (p->nCases <= 0) return;
if (p->injectFraction <= 0.0) return;
nseeds = (int) ((float) p->popsize * (float) p->injectFraction);
if (p->nCases < nseeds) { nseeds = p->nCases; }
rank = (int *) malloc(sizeof(int) * p->popsize);
if (rank == NULL) { perror("error in malloc (rank)\n"); }
seedCases = AllocateIndividuals(nseeds, p->chromLength);
/* (IPTR) malloc (sizeof(INDIVIDUAL) * nseeds);*/
if (seedCases == NULL) { perror("error in malloc (seedCases)\n"); }
index = f->GetIndexIndividual(pop, p->popsize);
/* printf("Before GetCases\n");*/
newSeeds = GetCases(&pop[index], seedCases, nseeds, p, f);
/* newSeeds may be less than nseeds */
/* printf("after GetCases\n");*/
#ifdef DEBUG
PrintCases(debugFName, seedCases, nseeds, pop, best);
#endif
for (i = 0; i < p->popsize; i++) { rank[i] = i; }
FindNWorst(pop, rank, p->popsize, newSeeds);
/* printf("after FindNworst %d\n", nseeds); */
for (i = 0; i < newSeeds; i++) {
IndividualCopy(&seedCases[i], &pop[rank[i]]);
/* pop[rank[i]].fitness = eval_org(&pop[rank[i]]); */
/* Done in GetCases */
}
free(rank);
free(seedCases);
}
int GetBest(IPTR pop, int size) {
int i;
int max = 0;
for (i = 1; i < size; i++) {
if (pop[i].fitness > pop[max].fitness) { max = i; }
}
return max;
}
int GetWorst(IPTR pop, int size) {
int i;
int min = 0;
for (i = 1; i < size; i++) {
if (pop[i].fitness < pop[min].fitness) { min = i; }
}
return min;
}
void SaveCase(FILE *fp, IPTR pj, int gen, Population *p) {
int i;
if (p->saveCases) {
fprintf(fp, "%5d %i ", gen, pj->chromLen);
for (i = 0; i < pj->chromLen; i++) { fprintf(fp, "%i ", pj->chrom[i]); }
fprintf(fp, " %f %f\n", pj->fitness, pj->scaledFitness);
p->nCurrentCases++;
}
return;
}
// temporarilyt disabled in switch to cuda?
int GetCases(IPTR pj, IPTR iCases, int howmany, Population *p, Functions *f) {
int i, ncopied = 0;
int *sIndex;
double *dist;
if (p->nCases <= 0) return 0;
sIndex = (int *) malloc(sizeof(int) * p->nCases);
if (sIndex == NULL) { perror("error in malloc (sIndex)\n"); }
dist = (double *) malloc(sizeof(double) * p->nCases);
if (dist == NULL) { perror("error in malloc (dist)\n"); }
for (i = 0; i < p->nCases; i++) {
dist[i] = f->DistanceMetric(pj->chrom, (&(caseBase[i]))->chrom, pj->chromLen,
(&(caseBase[i]))->chromLen, &(caseBase[i].backup));
/* This is the address of backup so I can change what backup points to
Needed in sequential representations to deal with varying size
cases.
*/
}
for (i = 0; i < p->nCases; i++) { sIndex[i] = i; }
f->ApplyMetric(dist, sIndex, p->nCases, howmany);
ncopied = 0;
for (i = 0; (i < p->nCases) && (ncopied < howmany) && (ncopied < p->nCases); i++) {
if (dist[sIndex[i]] != 0.0) {
IndividualCopy(&caseBase[sIndex[i]], &iCases[ncopied]);
if (p->xType > 0 && p->dMetric == LCSD) {
FixCopiedIndividual(&iCases[ncopied], p);
// TourPrint(stdout, &iCases[ncopied], "GetCases");
} /* to copy backup to chrom before evaluation*/
iCases[ncopied].fitness =
1.0; // Eval(&(iCases[ncopied])); //disabled in switch to cuda
printf("CASES Was reached\n");
/* how do I parallelize this? */
iCases[ncopied].dx = dist[sIndex[i]];
ncopied++;
}
}
/*************************************************************
ncopied = 0;
for(i = 0; i < howmany; i++) {
IndividualCopy(&caseBase[sIndex[i]], &iCases[i]);
iCases[i].fitness = Eval(&(iCases[i]));
iCases[i].dx = dist[sIndex[i]];
ncopied++;
}
*************************************************************/
free(sIndex);
free(dist);
return ncopied;
}
void ReadCase(FILE *fp, IPTR pj, Population *p) {
int t, i, len;
fscanf(fp, "%i %i", &t, &len);
if (p->xType < 1) {
for (i = 0; i < len; i++) {
fscanf(fp, "%i", &(pj->chrom[i]));
// All I/O should be abstracted out
}
pj->chromLen = p->chromLength;
} else { // sequential (TSP) representation
GetSetSeqChrom(fp, pj, p, len);
// TourPrint(stdout, pj, "ReadCase");
}
fscanf(fp, "%lf %lf", &(pj->fitness), &(pj->scaledFitness));
}
void GetSetSeqChrom(FILE *fp, IPTR pj, Population *p,
int len) { /* This frees up the old chromosome, and replaces it with
allocated space for a chromosome of length len, then reads
new chromosome from file* fp*/
int i;
free(pj->chrom);
free(pj->backup);
pj->chrom = (ChromType *) calloc((size_t) len, sizeof(ChromType));
pj->backup = (ChromType *) calloc((size_t) len, sizeof(ChromType));
pj->chromLen = len;
for (i = 0; i < len; i++) {
fscanf(fp, "%i", &(pj->chrom[i]));
// All I/O should be abstracted out
}
}
int FindNCases(char *ncfile) {
FILE *fp;
int tmp;
if ((fp = fopen(ncfile, "r")) == NULL) {
fprintf(stdout, "no cases in case base\n");
return 0;
} else {
fscanf(fp, "%d", &tmp);
fclose(fp);
return tmp;
}
}
void StoreNcases(char *ncfile, int ncases, int nCurrentCases) {
FILE *fp;
if ((fp = fopen(ncfile, "w")) == NULL) {
fprintf(stdout, "problem in opening %s \n", ncfile);
exit(1);
} else {
fprintf(fp, "%d\n", ncases + nCurrentCases);
fclose(fp);
}
}
void InitLoadCases(char *caseFile, IPTR pop, int gen, int perc, Population *p) {
FILE *fp;
int i;
p->nCases = FindNCases(p->nCFile);
if (p->nCases <= 0) return;
caseBase = AllocateIndividuals(p->nCases, p->chromLength);
if (caseBase == NULL) {
perror("Malloc failure for caseBase\n");
exit(1);
}
if ((fp = fopen(caseFile, "r")) == NULL) {
fprintf(stderr, "InitLoadCases: Cannot open %s for reading\n", caseFile);
exit(1);
}
for (i = 0; i < p->nCases; i++) { ReadCase(fp, &caseBase[i], p); }
fclose(fp);
}
|
25e875e4a1406fb9ae0708dfd5ba5ea59d6e7135.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void helloFromGPU(void) { // The qualifier __global__ tells the compiler
// that the function will be called from the CPU and executed on the GPU.
printf("Hello World from GPU from thread %d\n", threadIdx.x);
}
int main(void) {
printf("Hello World from CPU!\n");
// Triple angle brackets mark a call from the host thread to the code on the device side. A kernel is
// executed by an array of threads and all threads run the same code. The parameters within the triple
// angle brackets are the execution configuration, which specifies how many threads will execute the
// kernel. In this example, you will run 10 GPU threads.
hipLaunchKernelGGL(( helloFromGPU) , dim3(1), dim3(10), 0, 0, );
hipDeviceReset(); // The function hipDeviceReset() will explicitly destroy and
// clean up all resources associated with the current device in the current process.
return 0;
}
| 25e875e4a1406fb9ae0708dfd5ba5ea59d6e7135.cu | #include <stdio.h>
__global__ void helloFromGPU(void) { // The qualifier __global__ tells the compiler
// that the function will be called from the CPU and executed on the GPU.
printf("Hello World from GPU from thread %d\n", threadIdx.x);
}
int main(void) {
printf("Hello World from CPU!\n");
// Triple angle brackets mark a call from the host thread to the code on the device side. A kernel is
// executed by an array of threads and all threads run the same code. The parameters within the triple
// angle brackets are the execution configuration, which specifies how many threads will execute the
// kernel. In this example, you will run 10 GPU threads.
helloFromGPU <<<1, 10>>>();
cudaDeviceReset(); // The function cudaDeviceReset() will explicitly destroy and
// clean up all resources associated with the current device in the current process.
return 0;
}
|
aad0bfca82f393af78c631fe79b3dcdca789f2b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/cvm_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width,
const T* X, T* Y, int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
if (use_cvm) {
if (i % item_width == 0) {
Y[i] = log(X[i] + 1);
} else if (i % item_width == 1) {
Y[i] = log(X[i] + 1) - log(X[i - 1] + 1);
} else {
Y[i] = X[i];
}
} else {
Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2];
}
}
}
template <typename T>
__global__ void CvmGradComputeKernel(const bool use_cvm,
const int64_t item_width, const T* CVM,
const T* DY, T* DX, bool has_lod,
const size_t* lod, size_t lod_size,
int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
int offset = i % item_width;
if (offset <= 1) {
int cvm_id = i / item_width;
if (has_lod) {
int low = 1;
int high = lod_size - 1;
while (low < high) {
int mid = (low + high) / 2;
if (cvm_id < lod[mid])
high = mid;
else
low = mid + 1;
}
cvm_id = low - 1;
}
DX[i] = CVM[2 * cvm_id + offset];
} else {
if (use_cvm) {
DX[i] = DY[i];
} else {
DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2];
}
}
}
}
template <typename T>
class CVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<LoDTensor>("X");
const T* x_data = x->data<T>();
auto batch_size = x->dims()[0];
auto numel = x->numel();
auto item_size = numel / batch_size;
auto use_cvm = context.Attr<bool>("use_cvm");
auto* y = context.Output<LoDTensor>("Y");
T* y_data = y->mutable_data<T>(context.GetPlace());
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (x->NumLevels() == 0) {
hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, x_data, y_data, y->numel());
} else {
auto lod = x->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Input(X)'s dim[0] must be equal to last element of lod"));
hipLaunchKernelGGL(( CvmComputeKernel), (numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, x_data, y_data, y->numel());
}
}
};
template <typename T>
class CVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
T* dx_data = dx->mutable_data<T>(context.GetPlace());
const Tensor* cvm = context.Input<Tensor>("CVM");
const T* cvm_data = cvm->data<T>();
const auto* dOut =
context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
const T* dout_data = dOut->data<T>();
auto use_cvm = context.Attr<bool>("use_cvm");
auto offset = 2;
auto batch_size = dx->dims()[0];
auto dx_numel = dx->numel();
auto item_size = dx_numel / batch_size;
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (dx->NumLevels() == 0) {
hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0,
dx_numel);
} else {
auto lod = dx->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Output(X@GRAD)'s dim[0] must be equal to last element of lod"));
hipLaunchKernelGGL(( CvmGradComputeKernel), (dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
dim3(PADDLE_CUDA_NUM_THREADS), 0, stream,
use_cvm, item_size, cvm_data, dout_data, dx_data, true,
lod.CUDAData(context.GetPlace()), lod.size(), dx_numel);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>,
ops::CVMCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>,
ops::CVMGradCUDAKernel<double>);
| aad0bfca82f393af78c631fe79b3dcdca789f2b9.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/cvm_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
using platform::PADDLE_CUDA_NUM_THREADS;
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
template <typename T>
__global__ void CvmComputeKernel(const bool use_cvm, const int64_t item_width,
const T* X, T* Y, int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
if (use_cvm) {
if (i % item_width == 0) {
Y[i] = log(X[i] + 1);
} else if (i % item_width == 1) {
Y[i] = log(X[i] + 1) - log(X[i - 1] + 1);
} else {
Y[i] = X[i];
}
} else {
Y[i] = X[i / (item_width - 2) * item_width + i % (item_width - 2) + 2];
}
}
}
template <typename T>
__global__ void CvmGradComputeKernel(const bool use_cvm,
const int64_t item_width, const T* CVM,
const T* DY, T* DX, bool has_lod,
const size_t* lod, size_t lod_size,
int64_t numel) {
CUDA_KERNEL_LOOP(i, numel) {
int offset = i % item_width;
if (offset <= 1) {
int cvm_id = i / item_width;
if (has_lod) {
int low = 1;
int high = lod_size - 1;
while (low < high) {
int mid = (low + high) / 2;
if (cvm_id < lod[mid])
high = mid;
else
low = mid + 1;
}
cvm_id = low - 1;
}
DX[i] = CVM[2 * cvm_id + offset];
} else {
if (use_cvm) {
DX[i] = DY[i];
} else {
DX[i] = DY[i / item_width * (item_width - 2) + i % item_width - 2];
}
}
}
}
template <typename T>
class CVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const auto* x = context.Input<LoDTensor>("X");
const T* x_data = x->data<T>();
auto batch_size = x->dims()[0];
auto numel = x->numel();
auto item_size = numel / batch_size;
auto use_cvm = context.Attr<bool>("use_cvm");
auto* y = context.Output<LoDTensor>("Y");
T* y_data = y->mutable_data<T>(context.GetPlace());
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (x->NumLevels() == 0) {
CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, x_data, y_data, y->numel());
} else {
auto lod = x->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Input(X)'s dim[0] must be equal to last element of lod"));
CvmComputeKernel<<<(numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, x_data, y_data, y->numel());
}
}
};
template <typename T>
class CVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* dx = context.Output<LoDTensor>(framework::GradVarName("X"));
T* dx_data = dx->mutable_data<T>(context.GetPlace());
const Tensor* cvm = context.Input<Tensor>("CVM");
const T* cvm_data = cvm->data<T>();
const auto* dOut =
context.Input<framework::LoDTensor>(framework::GradVarName("Y"));
const T* dout_data = dOut->data<T>();
auto use_cvm = context.Attr<bool>("use_cvm");
auto offset = 2;
auto batch_size = dx->dims()[0];
auto dx_numel = dx->numel();
auto item_size = dx_numel / batch_size;
// for Input X do not have Lod Information.
auto stream =
context.template device_context<platform::CUDADeviceContext>().stream();
if (dx->NumLevels() == 0) {
CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, cvm_data, dout_data, dx_data, false, NULL, 0,
dx_numel);
} else {
auto lod = dx->lod()[0];
PADDLE_ENFORCE_EQ(
batch_size, lod[lod.size() - 1],
platform::errors::PreconditionNotMet(
"Output(X@GRAD)'s dim[0] must be equal to last element of lod"));
CvmGradComputeKernel<<<(dx_numel + PADDLE_CUDA_NUM_THREADS - 1) /
PADDLE_CUDA_NUM_THREADS,
PADDLE_CUDA_NUM_THREADS, 0, stream>>>(
use_cvm, item_size, cvm_data, dout_data, dx_data, true,
lod.CUDAData(context.GetPlace()), lod.size(), dx_numel);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(cvm, ops::CVMCUDAKernel<float>,
ops::CVMCUDAKernel<double>);
REGISTER_OP_CUDA_KERNEL(cvm_grad, ops::CVMGradCUDAKernel<float>,
ops::CVMGradCUDAKernel<double>);
|
5983310a85644ec42d758c0cd8642ca104c4ef6f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Based on CSC materials from:
*
* https://github.com/csc-training/openacc/tree/master/exercises/heat
*
*/
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "pngwriter.h"
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
/* Convert 2D index layout to unrolled 1D layout
*
* \param[in] i Row index
* \param[in] j Column index
* \param[in] width The width of the area
*
* \returns An index in the unrolled 1D array.
*/
int __host__ __device__ getIndex(const int i, const int j, const int width)
{
return i*width + j;
}
__global__ void evolve_kernel(const float* Un, float* Unp1, const int nx, const int ny, const float dx2, const float dy2, const float aTimesDt)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i > 0 && i < nx - 1)
{
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (j > 0 && j < ny - 1)
{
const int index = getIndex(i, j, ny);
float uij = Un[index];
float uim1j = Un[getIndex(i-1, j, ny)];
float uijm1 = Un[getIndex(i, j-1, ny)];
float uip1j = Un[getIndex(i+1, j, ny)];
float uijp1 = Un[getIndex(i, j+1, ny)];
// Explicit scheme
Unp1[index] = uij + aTimesDt * ( (uim1j - 2.0*uij + uip1j)/dx2 + (uijm1 - 2.0*uij + uijp1)/dy2 );
}
}
}
int main()
{
const int nx = 200; // Width of the area
const int ny = 200; // Height of the area
const float a = 0.5; // Diffusion constant
const float dx = 0.01; // Horizontal grid spacing
const float dy = 0.01; // Vertical grid spacing
const float dx2 = dx*dx;
const float dy2 = dy*dy;
const float dt = dx2 * dy2 / (2.0 * a * (dx2 + dy2)); // Largest stable time step
const int numSteps = 5000; // Number of time steps
const int outputEvery = 1000; // How frequently to write output image
int numElements = nx*ny;
// Allocate two sets of data for current and next timesteps
float* h_Un = (float*)calloc(numElements, sizeof(float));
// Initializing the data with a pattern of disk of radius of 1/6 of the width
float radius2 = (nx/6.0) * (nx/6.0);
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int index = getIndex(i, j, ny);
// Distance of point i, j from the origin
float ds2 = (i - nx/2) * (i - nx/2) + (j - ny/2)*(j - ny/2);
if (ds2 < radius2)
{
h_Un[index] = 65.0;
}
else
{
h_Un[index] = 5.0;
}
}
}
float* d_Un;
float* d_Unp1;
hipMalloc((void**)&d_Un, numElements*sizeof(float));
hipMalloc((void**)&d_Unp1, numElements*sizeof(float));
hipMemcpy(d_Un, h_Un, numElements*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_Unp1, h_Un, numElements*sizeof(float), hipMemcpyHostToDevice);
dim3 numBlocks(nx/BLOCK_SIZE_X + 1, ny/BLOCK_SIZE_Y + 1);
dim3 threadsPerBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
// Timing
clock_t start = clock();
// Main loop
for (int n = 0; n <= numSteps; n++)
{
hipLaunchKernelGGL(( evolve_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_Un, d_Unp1, nx, ny, dx2, dy2, a*dt);
// Write the output if needed
if (n % outputEvery == 0)
{
hipMemcpy(h_Un, d_Un, numElements*sizeof(float), hipMemcpyDeviceToHost);
hipError_t errorCode = hipGetLastError();
if (errorCode != hipSuccess)
{
printf("Cuda error %d: %s\n", errorCode, hipGetErrorString(errorCode));
exit(0);
}
char filename[64];
sprintf(filename, "heat_%04d.png", n);
save_png(h_Un, nx, ny, filename, 'c');
}
std::swap(d_Un, d_Unp1);
}
// Timing
clock_t finish = clock();
printf("It took %f seconds\n", (double)(finish - start) / CLOCKS_PER_SEC);
// Release the memory
free(h_Un);
hipFree(d_Un);
hipFree(d_Unp1);
return 0;
}
| 5983310a85644ec42d758c0cd8642ca104c4ef6f.cu | /*
* Based on CSC materials from:
*
* https://github.com/csc-training/openacc/tree/master/exercises/heat
*
*/
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "pngwriter.h"
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
/* Convert 2D index layout to unrolled 1D layout
*
* \param[in] i Row index
* \param[in] j Column index
* \param[in] width The width of the area
*
* \returns An index in the unrolled 1D array.
*/
int __host__ __device__ getIndex(const int i, const int j, const int width)
{
return i*width + j;
}
__global__ void evolve_kernel(const float* Un, float* Unp1, const int nx, const int ny, const float dx2, const float dy2, const float aTimesDt)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i > 0 && i < nx - 1)
{
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (j > 0 && j < ny - 1)
{
const int index = getIndex(i, j, ny);
float uij = Un[index];
float uim1j = Un[getIndex(i-1, j, ny)];
float uijm1 = Un[getIndex(i, j-1, ny)];
float uip1j = Un[getIndex(i+1, j, ny)];
float uijp1 = Un[getIndex(i, j+1, ny)];
// Explicit scheme
Unp1[index] = uij + aTimesDt * ( (uim1j - 2.0*uij + uip1j)/dx2 + (uijm1 - 2.0*uij + uijp1)/dy2 );
}
}
}
int main()
{
const int nx = 200; // Width of the area
const int ny = 200; // Height of the area
const float a = 0.5; // Diffusion constant
const float dx = 0.01; // Horizontal grid spacing
const float dy = 0.01; // Vertical grid spacing
const float dx2 = dx*dx;
const float dy2 = dy*dy;
const float dt = dx2 * dy2 / (2.0 * a * (dx2 + dy2)); // Largest stable time step
const int numSteps = 5000; // Number of time steps
const int outputEvery = 1000; // How frequently to write output image
int numElements = nx*ny;
// Allocate two sets of data for current and next timesteps
float* h_Un = (float*)calloc(numElements, sizeof(float));
// Initializing the data with a pattern of disk of radius of 1/6 of the width
float radius2 = (nx/6.0) * (nx/6.0);
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
int index = getIndex(i, j, ny);
// Distance of point i, j from the origin
float ds2 = (i - nx/2) * (i - nx/2) + (j - ny/2)*(j - ny/2);
if (ds2 < radius2)
{
h_Un[index] = 65.0;
}
else
{
h_Un[index] = 5.0;
}
}
}
float* d_Un;
float* d_Unp1;
cudaMalloc((void**)&d_Un, numElements*sizeof(float));
cudaMalloc((void**)&d_Unp1, numElements*sizeof(float));
cudaMemcpy(d_Un, h_Un, numElements*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Unp1, h_Un, numElements*sizeof(float), cudaMemcpyHostToDevice);
dim3 numBlocks(nx/BLOCK_SIZE_X + 1, ny/BLOCK_SIZE_Y + 1);
dim3 threadsPerBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
// Timing
clock_t start = clock();
// Main loop
for (int n = 0; n <= numSteps; n++)
{
evolve_kernel<<<numBlocks, threadsPerBlock>>>(d_Un, d_Unp1, nx, ny, dx2, dy2, a*dt);
// Write the output if needed
if (n % outputEvery == 0)
{
cudaMemcpy(h_Un, d_Un, numElements*sizeof(float), cudaMemcpyDeviceToHost);
cudaError_t errorCode = cudaGetLastError();
if (errorCode != cudaSuccess)
{
printf("Cuda error %d: %s\n", errorCode, cudaGetErrorString(errorCode));
exit(0);
}
char filename[64];
sprintf(filename, "heat_%04d.png", n);
save_png(h_Un, nx, ny, filename, 'c');
}
std::swap(d_Un, d_Unp1);
}
// Timing
clock_t finish = clock();
printf("It took %f seconds\n", (double)(finish - start) / CLOCKS_PER_SEC);
// Release the memory
free(h_Un);
cudaFree(d_Un);
cudaFree(d_Unp1);
return 0;
}
|
c54fcd21f730f7b9f1a450ec3aee4dc36c8bed6f.hip | // !!! This is a file automatically generated by hipify!!!
/*
This is the central piece of code. This file implements a class
(interface in GPUUtil.hh) that takes data in on the cpu side, copies
it to the gpu, and exposes functions (increment and retreive) that let
you perform actions with the GPU
This class will get translated into python via swig
*/
#include <kernel.cu>
#include <manager.hh>
#include <assert.h>
#include <iostream>
using namespace std;
GPUUtil::GPUUtil (int* array_host_, int length_) {
// array_host = array_host_;
// length = length_;
// int size = length * sizeof(int);
// hipError_t err = hipMalloc((void**) &array_device, size);
// assert(err == 0);
// err = hipMemcpy(array_device, array_host, size, hipMemcpyHostToDevice);
// assert(err == 0);
}
void GPUUtil::increment() {
// kernel_add_one<<<64, 64>>>(array_device, length);
// hipError_t err = hipGetLastError();
// assert(err == 0);
}
void GPUUtil::retreive() {
// int size = length * sizeof(int);
// hipMemcpy(array_host, array_device, size, hipMemcpyDeviceToHost);
// hipError_t err = hipGetLastError();
// if(err != 0) { cout << err << endl; assert(0); }
}
void GPUUtil::retreive_to (int* array_host_, int length_) {
// assert(length == length_);
// int size = length * sizeof(int);
// hipMemcpy(array_host_, array_device, size, hipMemcpyDeviceToHost);
// hipError_t err = hipGetLastError();
// assert(err == 0);
}
GPUUtil::~GPUUtil() {
hipFree(array_device);
}
| c54fcd21f730f7b9f1a450ec3aee4dc36c8bed6f.cu | /*
This is the central piece of code. This file implements a class
(interface in GPUUtil.hh) that takes data in on the cpu side, copies
it to the gpu, and exposes functions (increment and retreive) that let
you perform actions with the GPU
This class will get translated into python via swig
*/
#include <kernel.cu>
#include <manager.hh>
#include <assert.h>
#include <iostream>
using namespace std;
GPUUtil::GPUUtil (int* array_host_, int length_) {
// array_host = array_host_;
// length = length_;
// int size = length * sizeof(int);
// cudaError_t err = cudaMalloc((void**) &array_device, size);
// assert(err == 0);
// err = cudaMemcpy(array_device, array_host, size, cudaMemcpyHostToDevice);
// assert(err == 0);
}
void GPUUtil::increment() {
// kernel_add_one<<<64, 64>>>(array_device, length);
// cudaError_t err = cudaGetLastError();
// assert(err == 0);
}
void GPUUtil::retreive() {
// int size = length * sizeof(int);
// cudaMemcpy(array_host, array_device, size, cudaMemcpyDeviceToHost);
// cudaError_t err = cudaGetLastError();
// if(err != 0) { cout << err << endl; assert(0); }
}
void GPUUtil::retreive_to (int* array_host_, int length_) {
// assert(length == length_);
// int size = length * sizeof(int);
// cudaMemcpy(array_host_, array_device, size, cudaMemcpyDeviceToHost);
// cudaError_t err = cudaGetLastError();
// assert(err == 0);
}
GPUUtil::~GPUUtil() {
cudaFree(array_device);
}
|
ac25a03fe061a99dba28346616e870cacd19571a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
const int N=64;
__global__ void VecAdd( float *A, float *B, float *C, int Ntot)
{
int i=threadIdx.x;
C[i]=A[i]+B[i];
}
int main()
{
/* pointers to host memory */
float *a, *b, *c;
/* pointers to device memory */
float *a_d, *b_d, *c_d;
int i;
/* Allocate arrays a, b and c on host*/
a = (float*) malloc(N*sizeof(float));
b = (float*) malloc(N*sizeof(float));
c = (float*) malloc(N*sizeof(float));
/* Allocate arrays a_d, b_d and c_d on device*/
hipMalloc ((void **) &a_d, sizeof(float)*N);
hipMalloc ((void **) &b_d, sizeof(float)*N);
hipMalloc ((void **) &c_d, sizeof(float)*N);
/* Initialize arrays a and b */
for (i=0; i<N;i++)
{
a[i]= (float) i;
b[i]= -(float) i;
}
/* Copy data from host memory to device memory */
hipMemcpy(a_d, a, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(b_d, b, sizeof(float)*N, hipMemcpyHostToDevice);
/* Add arrays a and b, store result in c */hipLaunchKernelGGL((
VecAdd), dim3(1), dim3(N) , 0, 0, a_d, b_d, c_d, N);
/* Copy data from deveice memory to host memory */
hipMemcpy(c, c_d, sizeof(float)*N, hipMemcpyDeviceToHost);
/* Print c */
for (i=0; i<N;i++)
printf(" c[%d]=%f\n",i,c[i]);
/* Free the memory */
free(a); free(b); free(c);
hipFree(a_d); hipFree(b_d);hipFree(c_d);
}
| ac25a03fe061a99dba28346616e870cacd19571a.cu | #include "stdio.h"
const int N=64;
__global__ void VecAdd( float *A, float *B, float *C, int Ntot)
{
int i=threadIdx.x;
C[i]=A[i]+B[i];
}
int main()
{
/* pointers to host memory */
float *a, *b, *c;
/* pointers to device memory */
float *a_d, *b_d, *c_d;
int i;
/* Allocate arrays a, b and c on host*/
a = (float*) malloc(N*sizeof(float));
b = (float*) malloc(N*sizeof(float));
c = (float*) malloc(N*sizeof(float));
/* Allocate arrays a_d, b_d and c_d on device*/
cudaMalloc ((void **) &a_d, sizeof(float)*N);
cudaMalloc ((void **) &b_d, sizeof(float)*N);
cudaMalloc ((void **) &c_d, sizeof(float)*N);
/* Initialize arrays a and b */
for (i=0; i<N;i++)
{
a[i]= (float) i;
b[i]= -(float) i;
}
/* Copy data from host memory to device memory */
cudaMemcpy(a_d, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float)*N, cudaMemcpyHostToDevice);
/* Add arrays a and b, store result in c */
VecAdd<<< 1, N >>>(a_d, b_d, c_d, N);
/* Copy data from deveice memory to host memory */
cudaMemcpy(c, c_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
/* Print c */
for (i=0; i<N;i++)
printf(" c[%d]=%f\n",i,c[i]);
/* Free the memory */
free(a); free(b); free(c);
cudaFree(a_d); cudaFree(b_d);cudaFree(c_d);
}
|
85ae92be36f2cd8434594d58da4c72717a58e700.hip | // !!! This is a file automatically generated by hipify!!!
#include "book.h"
#include "cpu_bitmap.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernale(unsigned char *pt)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offerset=x+y*gridDim.x*blockDim.x;
__shared__ float colormem[16][16];
const float peri=128.;
colormem[threadIdx.x][threadIdx.y]=255*(sinf(x*2*PI/peri)+1.0f)*(sinf(y*2*PI/peri)+1.0f)*4.0f;
__syncthreads();
pt[offerset*4+0]=0;
pt[offerset*4+1]=colormem[15-threadIdx.x][15-threadIdx.y];
pt[offerset*4+2]=0;
pt[offerset*4+3]=255;
}
int main(void)
{
CPUBitmap bitmap(DIM,DIM);
unsigned char *dec_bitmap;
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
HANDLE_ERROR(hipMalloc((void **)&dec_bitmap,bitmap.image_size()));
hipLaunchKernelGGL(( kernale), dim3(grids),dim3(threads), 0, 0, dec_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(),dec_bitmap,bitmap.image_size(),hipMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(hipFree(dec_bitmap));
if (getchar()==27)
{
exit(0);
}
return 0;
} | 85ae92be36f2cd8434594d58da4c72717a58e700.cu | #include "book.h"
#include "cpu_bitmap.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernale(unsigned char *pt)
{
int x=threadIdx.x+blockIdx.x*blockDim.x;
int y=threadIdx.y+blockIdx.y*blockDim.y;
int offerset=x+y*gridDim.x*blockDim.x;
__shared__ float colormem[16][16];
const float peri=128.;
colormem[threadIdx.x][threadIdx.y]=255*(sinf(x*2*PI/peri)+1.0f)*(sinf(y*2*PI/peri)+1.0f)*4.0f;
__syncthreads();
pt[offerset*4+0]=0;
pt[offerset*4+1]=colormem[15-threadIdx.x][15-threadIdx.y];
pt[offerset*4+2]=0;
pt[offerset*4+3]=255;
}
int main(void)
{
CPUBitmap bitmap(DIM,DIM);
unsigned char *dec_bitmap;
dim3 grids(DIM/16,DIM/16);
dim3 threads(16,16);
HANDLE_ERROR(cudaMalloc((void **)&dec_bitmap,bitmap.image_size()));
kernale<<<grids,threads>>>(dec_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(),dec_bitmap,bitmap.image_size(),cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
HANDLE_ERROR(cudaFree(dec_bitmap));
if (getchar()==27)
{
exit(0);
}
return 0;
} |
c45e70e2130201f4b09d0133bad253afa82e665f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
//*****double buffering*****
#define SCREEN_WIDTH 1920
#define SCREEN_HEIGHT 1000
D2D1_RECT_U display_area;
ID2D1Bitmap *memkeptarolo = NULL;
unsigned int image_data[SCREEN_WIDTH * SCREEN_HEIGHT];
float zbuffer[SCREEN_WIDTH*SCREEN_HEIGHT];
typedef struct Vec3f {
float x, y, z;
};
//**************************************
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Windows handler
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
void D2D_drawing(ID2D1HwndRenderTarget* pRT);
//*****double buffering*****
void create_main_buffer(void);
void cleanup_main_buffer(void);
void CleanUp_Zbuffer(void);
void swap_main_buffer(void);
//**************************************
//*****Drawing algorithms*****
void SetPixel_Zbuffer(int x1, int y1, int z1, int color);
void DrawLine_Zbuffer(int x0, int y0, int z0, int x1, int y1, int z1, int color);
void FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color);
//**************************************
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the window
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
SCREEN_WIDTH,
SCREEN_HEIGHT,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT)),
&pRT);
create_main_buffer();
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing(pRT);
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
void D2D_drawing(ID2D1HwndRenderTarget* pRT)
{
cleanup_main_buffer();
CleanUp_Zbuffer();
SetPixel_Zbuffer(100,100,0,RGB(0,0,0));
FillTriangle_Zbuffer(0, 0, 10, 600, 80, 20, 50, 400, 20, RGB(200, 200, 200));
FillTriangle_Zbuffer(100, 30, 1, 200, 80, 1, 50, 90, 1, RGB(250, 0, 0));
DrawLine_Zbuffer(10,10,10,300,80,10, RGB(0, 0, 0));
swap_main_buffer();
}
void create_main_buffer(void)
{
pRT->CreateBitmap(D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT),
D2D1::BitmapProperties(D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_IGNORE)), &memkeptarolo);
}
void cleanup_main_buffer(void)
{
memset(image_data, 255, SCREEN_HEIGHT*SCREEN_WIDTH * sizeof(unsigned int));
}
void CleanUp_Zbuffer(void)
{
int i, j;
for (i = 0; i < SCREEN_WIDTH; ++i)
for (j = 0; j < SCREEN_HEIGHT; ++j)
{
zbuffer[(j * SCREEN_WIDTH) + i] = 9999999;
}
}
void swap_main_buffer(void)
{
display_area.left = 0;
display_area.top = 0;
display_area.right = SCREEN_WIDTH;
display_area.bottom = SCREEN_HEIGHT;
memkeptarolo->CopyFromMemory(&display_area, image_data, SCREEN_WIDTH * sizeof(unsigned int));
pRT->BeginDraw();
pRT->DrawBitmap(memkeptarolo, D2D1::RectF(0.0f, 0.0f, SCREEN_WIDTH, SCREEN_HEIGHT), 1.0f, D2D1_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR, NULL);
pRT->EndDraw();
}
void SetPixel_Zbuffer(int x1, int y1, int z1, int color)
{
int offset = (y1 * SCREEN_WIDTH) + x1;
if (zbuffer[offset] > z1)
{
zbuffer[offset] = z1;
image_data[offset] = color;
}
}
void DrawLine_Zbuffer(int x0, int y0, int z0, int x1, int y1, int z1, int color)
{
bool flip = false;
int swap, offset;
float depth_value;
if (y1 < 0 || y0 < 0) return;
if (abs(x0 - x1) < abs(y0 - y1))
{
swap = x0;
x0 = y0;
y0 = swap;
swap = x1;
x1 = y1;
y1 = swap;
flip = true;
}
if (x0 > x1)
{
swap = x0;
x0 = x1;
x1 = swap;
swap = y0;
y0 = y1;
y1 = swap;
}
int dx = x1 - x0;
int dy = y1 - y0;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y0, x;
for (x = x0; x <= x1; ++x)
{
if (z0 == z1) depth_value = z0;
else
{
int s1 = abs(x1 - x0);
int s2 = abs(z0 - z1);
depth_value = (float)z1 + (float)((((float)x - (float)x0) / (float)s1) * (float)s2);
}
if (flip)
{
offset = (x * SCREEN_WIDTH);
if (zbuffer[offset + y] > depth_value)
{
zbuffer[offset + y] = depth_value;
image_data[offset + y] = color;
}
}
else
{
offset = (y * SCREEN_WIDTH);
if (zbuffer[offset + x] > depth_value)
{
zbuffer[offset + x] = depth_value;
image_data[offset + x] = color;
}
}
marker2 += marker1;
if (marker2 > dx)
{
y += (y1 > y0 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
void FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color)
{
int Px, Py, depth_value, boxminx = SCREEN_WIDTH - 1, boxminy = SCREEN_HEIGHT - 1, boxmaxx = 0, boxmaxy = 0;
int offset;
Vec3f interpolate, helper_vector;
if (y1 == y2 && y1 == y3) return;
if (x1 == x2 && x1 == x3) return;
boxminx = __min(x1, x2); boxminx = __min(boxminx, x3);
boxminy = __min(y1, y2); boxminy = __min(boxminy, y3);
boxmaxx = __max(x1, x2); boxmaxx = __max(boxmaxx, x3);
boxmaxy = __max(y1, y2); boxmaxy = __max(boxmaxy, y3);
for (Px = boxminx; Px <= boxmaxx; ++Px)
{
for (Py = boxminy; Py <= boxmaxy; ++Py)
{
offset = Px + (Py * SCREEN_WIDTH);
helper_vector.x = (x2 - x1) * (y1 - Py) - (x1 - Px) * (y2 - y1);
helper_vector.y = (x1 - Px) * (y3 - y1) - (x3 - x1) * (y1 - Py);
helper_vector.z = (x3 - x1) * (y2 - y1) - (x2 - x1) * (y3 - y1);
if (abs((int)helper_vector.z) < 1) { interpolate.x = -1; interpolate.y = 0; interpolate.z = 0; }
else
{
interpolate.x = 1.f - (helper_vector.x + helper_vector.y) / helper_vector.z;
interpolate.y = helper_vector.y / helper_vector.z;
interpolate.z = helper_vector.x / helper_vector.z;
}
if (interpolate.x < 0 || interpolate.y < 0 || interpolate.z < 0) continue;
depth_value = (z1*interpolate.x) + (z2*interpolate.y) + (z3*interpolate.z);
if (zbuffer[offset] > depth_value)
{
zbuffer[offset] = depth_value;
image_data[offset] = color;
}
}
}
} | c45e70e2130201f4b09d0133bad253afa82e665f.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <windows.h>
#include <d2d1.h>
#include <d2d1helper.h>
#pragma comment(lib, "d2d1")
//*****double buffering*****
#define SCREEN_WIDTH 1920
#define SCREEN_HEIGHT 1000
D2D1_RECT_U display_area;
ID2D1Bitmap *memkeptarolo = NULL;
unsigned int image_data[SCREEN_WIDTH * SCREEN_HEIGHT];
float zbuffer[SCREEN_WIDTH*SCREEN_HEIGHT];
typedef struct Vec3f {
float x, y, z;
};
//**************************************
ID2D1Factory* pD2DFactory = NULL;
ID2D1HwndRenderTarget* pRT = NULL;
#define HIBA_00 TEXT("Error:Program initialisation process.")
HINSTANCE hInstGlob;
int SajatiCmdShow;
char szClassName[] = "WindowsApp";
HWND Form1; //Windows handler
LRESULT CALLBACK WndProc0(HWND, UINT, WPARAM, LPARAM);
void D2D_drawing(ID2D1HwndRenderTarget* pRT);
//*****double buffering*****
void create_main_buffer(void);
void cleanup_main_buffer(void);
void CleanUp_Zbuffer(void);
void swap_main_buffer(void);
//**************************************
//*****Drawing algorithms*****
void SetPixel_Zbuffer(int x1, int y1, int z1, int color);
void DrawLine_Zbuffer(int x0, int y0, int z0, int x1, int y1, int z1, int color);
void FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color);
//**************************************
//*********************************
//The main entry point of our program
//*********************************
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, PSTR szCmdLine, int iCmdShow)
{
static TCHAR szAppName[] = TEXT("StdWinClassName");
HWND hwnd;
MSG msg;
WNDCLASS wndclass0;
SajatiCmdShow = iCmdShow;
hInstGlob = hInstance;
//*********************************
//Preparing Windows class
//*********************************
wndclass0.style = CS_HREDRAW | CS_VREDRAW;
wndclass0.lpfnWndProc = WndProc0;
wndclass0.cbClsExtra = 0;
wndclass0.cbWndExtra = 0;
wndclass0.hInstance = hInstance;
wndclass0.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wndclass0.hCursor = LoadCursor(NULL, IDC_ARROW);
wndclass0.hbrBackground = (HBRUSH)GetStockObject(LTGRAY_BRUSH);
wndclass0.lpszMenuName = NULL;
wndclass0.lpszClassName = TEXT("WIN0");
//*********************************
//Registering our windows class
//*********************************
if (!RegisterClass(&wndclass0))
{
MessageBox(NULL, HIBA_00, TEXT("Program Start"), MB_ICONERROR);
return 0;
}
//*********************************
//Creating the window
//*********************************
Form1 = CreateWindow(TEXT("WIN0"),
TEXT("CUDA - DIRECT2D"),
(WS_OVERLAPPED | WS_SYSMENU | WS_THICKFRAME | WS_MAXIMIZEBOX | WS_MINIMIZEBOX),
50,
50,
SCREEN_WIDTH,
SCREEN_HEIGHT,
NULL,
NULL,
hInstance,
NULL);
//*********************************
//Displaying the window
//*********************************
ShowWindow(Form1, SajatiCmdShow);
UpdateWindow(Form1);
//*********************************
//Activating the message processing for our window
//*********************************
while (GetMessage(&msg, NULL, 0, 0))
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
return msg.wParam;
}
//*********************************
//The window's callback funtcion: handling events
//*********************************
LRESULT CALLBACK WndProc0(HWND hwnd, UINT message, WPARAM wParam, LPARAM lParam)
{
HDC hdc;
PAINTSTRUCT ps;
switch (message)
{
//*********************************
//When creating the window
//*********************************
case WM_CREATE:
D2D1CreateFactory(D2D1_FACTORY_TYPE_SINGLE_THREADED, &pD2DFactory);
pD2DFactory->CreateHwndRenderTarget(
D2D1::RenderTargetProperties(),
D2D1::HwndRenderTargetProperties(
hwnd, D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT)),
&pRT);
create_main_buffer();
return 0;
//*********************************
//to eliminate color flickering
//*********************************
case WM_ERASEBKGND:
return (LRESULT)1;
//*********************************
//Repainting the client area of the window
//*********************************
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
EndPaint(hwnd, &ps);
D2D_drawing(pRT);
return 0;
//*********************************
//Closing the window, freeing resources
//*********************************
case WM_CLOSE:
pRT->Release();
pD2DFactory->Release();
DestroyWindow(hwnd);
return 0;
//*********************************
//Destroying the window
//*********************************
case WM_DESTROY:
PostQuitMessage(0);
return 0;
}
return DefWindowProc(hwnd, message, wParam, lParam);
}
void D2D_drawing(ID2D1HwndRenderTarget* pRT)
{
cleanup_main_buffer();
CleanUp_Zbuffer();
SetPixel_Zbuffer(100,100,0,RGB(0,0,0));
FillTriangle_Zbuffer(0, 0, 10, 600, 80, 20, 50, 400, 20, RGB(200, 200, 200));
FillTriangle_Zbuffer(100, 30, 1, 200, 80, 1, 50, 90, 1, RGB(250, 0, 0));
DrawLine_Zbuffer(10,10,10,300,80,10, RGB(0, 0, 0));
swap_main_buffer();
}
void create_main_buffer(void)
{
pRT->CreateBitmap(D2D1::SizeU(SCREEN_WIDTH, SCREEN_HEIGHT),
D2D1::BitmapProperties(D2D1::PixelFormat(DXGI_FORMAT_B8G8R8A8_UNORM,
D2D1_ALPHA_MODE_IGNORE)), &memkeptarolo);
}
void cleanup_main_buffer(void)
{
memset(image_data, 255, SCREEN_HEIGHT*SCREEN_WIDTH * sizeof(unsigned int));
}
void CleanUp_Zbuffer(void)
{
int i, j;
for (i = 0; i < SCREEN_WIDTH; ++i)
for (j = 0; j < SCREEN_HEIGHT; ++j)
{
zbuffer[(j * SCREEN_WIDTH) + i] = 9999999;
}
}
void swap_main_buffer(void)
{
display_area.left = 0;
display_area.top = 0;
display_area.right = SCREEN_WIDTH;
display_area.bottom = SCREEN_HEIGHT;
memkeptarolo->CopyFromMemory(&display_area, image_data, SCREEN_WIDTH * sizeof(unsigned int));
pRT->BeginDraw();
pRT->DrawBitmap(memkeptarolo, D2D1::RectF(0.0f, 0.0f, SCREEN_WIDTH, SCREEN_HEIGHT), 1.0f, D2D1_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR, NULL);
pRT->EndDraw();
}
void SetPixel_Zbuffer(int x1, int y1, int z1, int color)
{
int offset = (y1 * SCREEN_WIDTH) + x1;
if (zbuffer[offset] > z1)
{
zbuffer[offset] = z1;
image_data[offset] = color;
}
}
void DrawLine_Zbuffer(int x0, int y0, int z0, int x1, int y1, int z1, int color)
{
bool flip = false;
int swap, offset;
float depth_value;
if (y1 < 0 || y0 < 0) return;
if (abs(x0 - x1) < abs(y0 - y1))
{
swap = x0;
x0 = y0;
y0 = swap;
swap = x1;
x1 = y1;
y1 = swap;
flip = true;
}
if (x0 > x1)
{
swap = x0;
x0 = x1;
x1 = swap;
swap = y0;
y0 = y1;
y1 = swap;
}
int dx = x1 - x0;
int dy = y1 - y0;
int marker1 = abs(dy) * 2;
int marker2 = 0;
int y = y0, x;
for (x = x0; x <= x1; ++x)
{
if (z0 == z1) depth_value = z0;
else
{
int s1 = abs(x1 - x0);
int s2 = abs(z0 - z1);
depth_value = (float)z1 + (float)((((float)x - (float)x0) / (float)s1) * (float)s2);
}
if (flip)
{
offset = (x * SCREEN_WIDTH);
if (zbuffer[offset + y] > depth_value)
{
zbuffer[offset + y] = depth_value;
image_data[offset + y] = color;
}
}
else
{
offset = (y * SCREEN_WIDTH);
if (zbuffer[offset + x] > depth_value)
{
zbuffer[offset + x] = depth_value;
image_data[offset + x] = color;
}
}
marker2 += marker1;
if (marker2 > dx)
{
y += (y1 > y0 ? 1 : -1);
marker2 -= dx * 2;
}
}
}
void FillTriangle_Zbuffer(int x1, int y1, int z1, int x2, int y2, int z2, int x3, int y3, int z3, int color)
{
int Px, Py, depth_value, boxminx = SCREEN_WIDTH - 1, boxminy = SCREEN_HEIGHT - 1, boxmaxx = 0, boxmaxy = 0;
int offset;
Vec3f interpolate, helper_vector;
if (y1 == y2 && y1 == y3) return;
if (x1 == x2 && x1 == x3) return;
boxminx = __min(x1, x2); boxminx = __min(boxminx, x3);
boxminy = __min(y1, y2); boxminy = __min(boxminy, y3);
boxmaxx = __max(x1, x2); boxmaxx = __max(boxmaxx, x3);
boxmaxy = __max(y1, y2); boxmaxy = __max(boxmaxy, y3);
for (Px = boxminx; Px <= boxmaxx; ++Px)
{
for (Py = boxminy; Py <= boxmaxy; ++Py)
{
offset = Px + (Py * SCREEN_WIDTH);
helper_vector.x = (x2 - x1) * (y1 - Py) - (x1 - Px) * (y2 - y1);
helper_vector.y = (x1 - Px) * (y3 - y1) - (x3 - x1) * (y1 - Py);
helper_vector.z = (x3 - x1) * (y2 - y1) - (x2 - x1) * (y3 - y1);
if (abs((int)helper_vector.z) < 1) { interpolate.x = -1; interpolate.y = 0; interpolate.z = 0; }
else
{
interpolate.x = 1.f - (helper_vector.x + helper_vector.y) / helper_vector.z;
interpolate.y = helper_vector.y / helper_vector.z;
interpolate.z = helper_vector.x / helper_vector.z;
}
if (interpolate.x < 0 || interpolate.y < 0 || interpolate.z < 0) continue;
depth_value = (z1*interpolate.x) + (z2*interpolate.y) + (z3*interpolate.z);
if (zbuffer[offset] > depth_value)
{
zbuffer[offset] = depth_value;
image_data[offset] = color;
}
}
}
} |
c6996c7dac8c58b0e4fcde6d4bbf8758fa54ec99.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/lib/unique.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/atomic.h"
#include "xdl/core/lib/binary_search.h"
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/device_ptr.h>
#include <thrust/pair.h>
#include <thrust/system/hip/execution_policy.h>
#include "xdl/core/framework/gpu/gpu_device.h"
#include "xdl/core/lib/common_defines.h"
#include <chrono>
namespace xdl {
namespace functor {
template <typename T, typename I>
struct UniqueFunctor<GpuDevice, T, I> {
void operator()(GpuDevice* d, const Tensor& in, const Tensor& segment, Tensor* out, Tensor* out_index, Tensor* sample_index, Tensor* sample_segment);
};
template <typename T>
struct Less {
__host__ __device__ bool operator()(const thrust::pair<T, T>& l,
const thrust::pair<T, T>& r) {
return l.first < r.first || (l.first == r.first && l.second < r.second);
}
};
template <typename T>
struct Equal {
__host__ __device__ bool operator()(const thrust::pair<T, T>& l,
const thrust::pair<T, T>& r) {
return l.first == r.first && l.second == r.second;
}
};
template <typename T, typename I>
__global__ void FindIndex(const T* src, size_t sz, const T* uniqs,
size_t uniq_sz, I* out_index, I* sample_segment) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sz) return;
out_index[idx] = static_cast<I>(BinarySearch(uniqs, uniq_sz, src[idx]));
common::gpu_atomic_add<I>(1, sample_segment + out_index[idx]);
}
template <typename T, typename I>
__global__ void FindPairIndex(const T* src, size_t sz, const T* uniqs,
size_t uniq_sz, I* out_index, I* sample_segment) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sz) return;
out_index[idx] = static_cast<I>(BinarySearch2(uniqs, uniq_sz,
src[2*idx], src[2*idx+1]));
common::gpu_atomic_add<I>(1, sample_segment + out_index[idx]);
}
template <typename I>
__global__ void FindSampleIndex(const I* segment, const I* out_index, size_t sz, size_t uniq_size, size_t segment_size,
I* cur, I* sample_index, I* sample_segment) {
cur[0] = 0;
for (size_t i = 1; i < uniq_size; ++i) {
cur[i] = sample_segment[i-1];
sample_segment[i] += sample_segment[i-1];
}
I segment_idx = 0;
for (I i = 0; i < sz; ++i) {
while (i == *segment) {
if (++segment_idx > segment_size) return;
++segment;
}
sample_index[cur[out_index[i]]] = segment_idx;
cur[out_index[i]]++;
}
}
template <typename T, typename I>
void UniqueFunctor<GpuDevice, T, I>::operator()(GpuDevice* d,
const Tensor& in,
const Tensor& segment,
Tensor* out,
Tensor* out_index,
Tensor* sample_index,
Tensor* sample_segment) {
hipStream_t stream = d->Stream()->GetInternal();
//CUDA_CHECK(hipStreamSynchronize(stream));
//auto t0 = std::chrono::high_resolution_clock::now();
Tensor temp(d, in.Shape(), in.Type());
*out_index = Tensor(d, TensorShape({in.Shape()[0]}), DataTypeToEnum<I>::v());
*sample_index = Tensor(d, TensorShape({in.Shape()[0]}), DataTypeToEnum<I>::v());
T* ptr_in = in.Raw<T>();
T* ptr_temp = temp.Raw<T>();
CUDA_CHECK(hipMemcpyAsync(ptr_temp,
ptr_in,
in.Shape().NumElements() * sizeof(T),
hipMemcpyDeviceToDevice));
size_t id_num = in.Shape()[0];
size_t id_dim = in.Shape().Size() == 1 ? 1 : in.Shape()[1];
size_t segment_size = segment.Shape()[0];
if (id_dim == 1) {
thrust::device_ptr<T> dptr_temp(ptr_temp), dptr_end;
thrust::sort(thrust::hip::par.on(stream),
dptr_temp, dptr_temp + id_num);
dptr_end = thrust::unique(thrust::hip::par.on(stream),
dptr_temp, dptr_temp + id_num);
size_t uniq_size = dptr_end - dptr_temp;
TensorShape out_shape({uniq_size});
*out = Tensor(d, out_shape, in.Type());
TensorShape sseg_shape({uniq_size});
*sample_segment = Tensor(d, sseg_shape, out_index->Type());
Tensor cur(d, sseg_shape, out_index->Type());
CUDA_CHECK(hipMemsetAsync(sample_segment->Raw<I>(), 0, sizeof(I) * uniq_size, stream));
size_t blocks = CUDA_GET_BLOCKS(id_num);
hipLaunchKernelGGL(( FindIndex<T, I>),
dim3(blocks),
dim3(CUDA_GET_THREADS(id_num, blocks)),
0,
stream, ptr_in, id_num, ptr_temp, uniq_size, out_index->Raw<I>(), sample_segment->Raw<I>());
hipLaunchKernelGGL(( FindSampleIndex<I>),
dim3(1),
dim3(1),
0,
stream, segment.Raw<I>(), out_index->Raw<I>(), id_num, uniq_size, segment_size,
cur.Raw<I>(), sample_index->Raw<I>(), sample_segment->Raw<I>());
CUDA_CHECK(hipMemcpyAsync(out->Raw<T>(),
ptr_temp,
out_shape.NumElements() * sizeof(T),
hipMemcpyDeviceToDevice));
} else if (id_dim == 2) {
thrust::pair<T, T>* ptr_pair = reinterpret_cast<thrust::pair<T, T>*>(ptr_temp);
thrust::device_ptr<thrust::pair<T, T>> dptr_temp(ptr_pair), dptr_end;
thrust::sort(thrust::hip::par.on(stream), dptr_temp, dptr_temp + id_num,
Less<T>());
dptr_end = thrust::unique(thrust::hip::par.on(stream), dptr_temp, dptr_temp + id_num,
Equal<T>());
size_t uniq_size = dptr_end - dptr_temp;
TensorShape out_shape({uniq_size, 2});
*out = Tensor(d, out_shape, in.Type());
TensorShape sseg_shape({uniq_size});
*sample_segment = Tensor(d, sseg_shape, out_index->Type());
Tensor cur(d, sseg_shape, out_index->Type());
CUDA_CHECK(hipMemsetAsync(sample_segment->Raw<I>(), 0, sizeof(I) * uniq_size, stream));
size_t blocks = CUDA_GET_BLOCKS(id_num);
hipLaunchKernelGGL(( FindPairIndex<T, I>),
dim3(blocks),
dim3(CUDA_GET_THREADS(id_num, blocks)),
0,
stream, ptr_in, id_num, ptr_temp, uniq_size, out_index->Raw<I>(), sample_segment->Raw<I>());
hipLaunchKernelGGL(( FindSampleIndex<I>),
dim3(1),
dim3(1),
0,
stream, segment.Raw<I>(), out_index->Raw<I>(), id_num, uniq_size, segment_size,
cur.Raw<I>(), sample_index->Raw<I>(), sample_segment->Raw<I>());
CUDA_CHECK(hipMemcpyAsync(out->Raw<T>(),
ptr_temp,
out_shape.NumElements() * sizeof(T),
hipMemcpyDeviceToDevice));
}
//CUDA_CHECK(hipStreamSynchronize(stream));
//auto t1 = std::chrono::high_resolution_clock::now();
//std::chrono::duration<double, std::milli> diff = t1 - t0;
//LOG(INFO) << "unique op time:" << diff.count() << "ms, size=" << id_num;
}
template struct UniqueFunctor<GpuDevice, int64_t, int64_t>;
template struct UniqueFunctor<GpuDevice, int32_t, int32_t>;
template struct UniqueFunctor<GpuDevice, int64_t, int32_t>;
template struct UniqueFunctor<GpuDevice, int32_t, int64_t>;
} // namespace functor
template <typename T, typename I>
class UniqueGpuOp : public GpuOpKernel {
public:
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status UniqueGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor input, segment, output, out_index, sample_index, sample_segment;
XDL_CHECK_STATUS(ctx->GetInput(0, &input));
XDL_CHECK_STATUS(ctx->GetInput(1, &segment));
XDL_CHECK_COND(2 >= input.Shape().Size(),
Status::ArgumentError("input dim can't be greater than 2"));
TensorShape index_shape({input.Shape()[0]});
GpuDevice* device = dynamic_cast<GpuDevice*>(ctx->GetDevice());
auto fn = functor::UniqueFunctor<GpuDevice, T, I>();
fn(device, input, segment, &output, &out_index, &sample_index, &sample_segment);
ctx->SetOutput(0, output);
ctx->SetOutput(1, out_index);
ctx->SetOutput(2, sample_index);
ctx->SetOutput(3, sample_segment);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(Unique, UniqueGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(int64_t, int64_t);
REGISTER_GPU_KERNEL(int32_t, int32_t);
REGISTER_GPU_KERNEL(int64_t, int32_t);
REGISTER_GPU_KERNEL(int32_t, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
| c6996c7dac8c58b0e4fcde6d4bbf8758fa54ec99.cu | /*
* Copyright 1999-2017 Alibaba Group.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "xdl/core/lib/unique.h"
#include "xdl/core/framework/op_registry.h"
#include "xdl/core/lib/atomic.h"
#include "xdl/core/lib/binary_search.h"
#include <thrust/sort.h>
#include <thrust/unique.h>
#include <thrust/device_ptr.h>
#include <thrust/pair.h>
#include <thrust/system/cuda/execution_policy.h>
#include "xdl/core/framework/gpu/gpu_device.h"
#include "xdl/core/lib/common_defines.h"
#include <chrono>
namespace xdl {
namespace functor {
template <typename T, typename I>
struct UniqueFunctor<GpuDevice, T, I> {
void operator()(GpuDevice* d, const Tensor& in, const Tensor& segment, Tensor* out, Tensor* out_index, Tensor* sample_index, Tensor* sample_segment);
};
template <typename T>
struct Less {
__host__ __device__ bool operator()(const thrust::pair<T, T>& l,
const thrust::pair<T, T>& r) {
return l.first < r.first || (l.first == r.first && l.second < r.second);
}
};
template <typename T>
struct Equal {
__host__ __device__ bool operator()(const thrust::pair<T, T>& l,
const thrust::pair<T, T>& r) {
return l.first == r.first && l.second == r.second;
}
};
template <typename T, typename I>
__global__ void FindIndex(const T* src, size_t sz, const T* uniqs,
size_t uniq_sz, I* out_index, I* sample_segment) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sz) return;
out_index[idx] = static_cast<I>(BinarySearch(uniqs, uniq_sz, src[idx]));
common::gpu_atomic_add<I>(1, sample_segment + out_index[idx]);
}
template <typename T, typename I>
__global__ void FindPairIndex(const T* src, size_t sz, const T* uniqs,
size_t uniq_sz, I* out_index, I* sample_segment) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= sz) return;
out_index[idx] = static_cast<I>(BinarySearch2(uniqs, uniq_sz,
src[2*idx], src[2*idx+1]));
common::gpu_atomic_add<I>(1, sample_segment + out_index[idx]);
}
template <typename I>
__global__ void FindSampleIndex(const I* segment, const I* out_index, size_t sz, size_t uniq_size, size_t segment_size,
I* cur, I* sample_index, I* sample_segment) {
cur[0] = 0;
for (size_t i = 1; i < uniq_size; ++i) {
cur[i] = sample_segment[i-1];
sample_segment[i] += sample_segment[i-1];
}
I segment_idx = 0;
for (I i = 0; i < sz; ++i) {
while (i == *segment) {
if (++segment_idx > segment_size) return;
++segment;
}
sample_index[cur[out_index[i]]] = segment_idx;
cur[out_index[i]]++;
}
}
template <typename T, typename I>
void UniqueFunctor<GpuDevice, T, I>::operator()(GpuDevice* d,
const Tensor& in,
const Tensor& segment,
Tensor* out,
Tensor* out_index,
Tensor* sample_index,
Tensor* sample_segment) {
cudaStream_t stream = d->Stream()->GetInternal();
//CUDA_CHECK(cudaStreamSynchronize(stream));
//auto t0 = std::chrono::high_resolution_clock::now();
Tensor temp(d, in.Shape(), in.Type());
*out_index = Tensor(d, TensorShape({in.Shape()[0]}), DataTypeToEnum<I>::v());
*sample_index = Tensor(d, TensorShape({in.Shape()[0]}), DataTypeToEnum<I>::v());
T* ptr_in = in.Raw<T>();
T* ptr_temp = temp.Raw<T>();
CUDA_CHECK(cudaMemcpyAsync(ptr_temp,
ptr_in,
in.Shape().NumElements() * sizeof(T),
cudaMemcpyDeviceToDevice));
size_t id_num = in.Shape()[0];
size_t id_dim = in.Shape().Size() == 1 ? 1 : in.Shape()[1];
size_t segment_size = segment.Shape()[0];
if (id_dim == 1) {
thrust::device_ptr<T> dptr_temp(ptr_temp), dptr_end;
thrust::sort(thrust::cuda::par.on(stream),
dptr_temp, dptr_temp + id_num);
dptr_end = thrust::unique(thrust::cuda::par.on(stream),
dptr_temp, dptr_temp + id_num);
size_t uniq_size = dptr_end - dptr_temp;
TensorShape out_shape({uniq_size});
*out = Tensor(d, out_shape, in.Type());
TensorShape sseg_shape({uniq_size});
*sample_segment = Tensor(d, sseg_shape, out_index->Type());
Tensor cur(d, sseg_shape, out_index->Type());
CUDA_CHECK(cudaMemsetAsync(sample_segment->Raw<I>(), 0, sizeof(I) * uniq_size, stream));
size_t blocks = CUDA_GET_BLOCKS(id_num);
FindIndex<T, I><<<
blocks,
CUDA_GET_THREADS(id_num, blocks),
0,
stream>>>(ptr_in, id_num, ptr_temp, uniq_size, out_index->Raw<I>(), sample_segment->Raw<I>());
FindSampleIndex<I><<<
1,
1,
0,
stream>>>(segment.Raw<I>(), out_index->Raw<I>(), id_num, uniq_size, segment_size,
cur.Raw<I>(), sample_index->Raw<I>(), sample_segment->Raw<I>());
CUDA_CHECK(cudaMemcpyAsync(out->Raw<T>(),
ptr_temp,
out_shape.NumElements() * sizeof(T),
cudaMemcpyDeviceToDevice));
} else if (id_dim == 2) {
thrust::pair<T, T>* ptr_pair = reinterpret_cast<thrust::pair<T, T>*>(ptr_temp);
thrust::device_ptr<thrust::pair<T, T>> dptr_temp(ptr_pair), dptr_end;
thrust::sort(thrust::cuda::par.on(stream), dptr_temp, dptr_temp + id_num,
Less<T>());
dptr_end = thrust::unique(thrust::cuda::par.on(stream), dptr_temp, dptr_temp + id_num,
Equal<T>());
size_t uniq_size = dptr_end - dptr_temp;
TensorShape out_shape({uniq_size, 2});
*out = Tensor(d, out_shape, in.Type());
TensorShape sseg_shape({uniq_size});
*sample_segment = Tensor(d, sseg_shape, out_index->Type());
Tensor cur(d, sseg_shape, out_index->Type());
CUDA_CHECK(cudaMemsetAsync(sample_segment->Raw<I>(), 0, sizeof(I) * uniq_size, stream));
size_t blocks = CUDA_GET_BLOCKS(id_num);
FindPairIndex<T, I><<<
blocks,
CUDA_GET_THREADS(id_num, blocks),
0,
stream>>>(ptr_in, id_num, ptr_temp, uniq_size, out_index->Raw<I>(), sample_segment->Raw<I>());
FindSampleIndex<I><<<
1,
1,
0,
stream>>>(segment.Raw<I>(), out_index->Raw<I>(), id_num, uniq_size, segment_size,
cur.Raw<I>(), sample_index->Raw<I>(), sample_segment->Raw<I>());
CUDA_CHECK(cudaMemcpyAsync(out->Raw<T>(),
ptr_temp,
out_shape.NumElements() * sizeof(T),
cudaMemcpyDeviceToDevice));
}
//CUDA_CHECK(cudaStreamSynchronize(stream));
//auto t1 = std::chrono::high_resolution_clock::now();
//std::chrono::duration<double, std::milli> diff = t1 - t0;
//LOG(INFO) << "unique op time:" << diff.count() << "ms, size=" << id_num;
}
template struct UniqueFunctor<GpuDevice, int64_t, int64_t>;
template struct UniqueFunctor<GpuDevice, int32_t, int32_t>;
template struct UniqueFunctor<GpuDevice, int64_t, int32_t>;
template struct UniqueFunctor<GpuDevice, int32_t, int64_t>;
} // namespace functor
template <typename T, typename I>
class UniqueGpuOp : public GpuOpKernel {
public:
Status LaunchKernel(OpKernelContext* ctx, CudaStream* stream) override;
};
template <typename T, typename I>
Status UniqueGpuOp<T, I>::LaunchKernel(OpKernelContext* ctx, CudaStream* stream) {
Tensor input, segment, output, out_index, sample_index, sample_segment;
XDL_CHECK_STATUS(ctx->GetInput(0, &input));
XDL_CHECK_STATUS(ctx->GetInput(1, &segment));
XDL_CHECK_COND(2 >= input.Shape().Size(),
Status::ArgumentError("input dim can't be greater than 2"));
TensorShape index_shape({input.Shape()[0]});
GpuDevice* device = dynamic_cast<GpuDevice*>(ctx->GetDevice());
auto fn = functor::UniqueFunctor<GpuDevice, T, I>();
fn(device, input, segment, &output, &out_index, &sample_index, &sample_segment);
ctx->SetOutput(0, output);
ctx->SetOutput(1, out_index);
ctx->SetOutput(2, sample_index);
ctx->SetOutput(3, sample_segment);
return Status::Ok();
}
#define REGISTER_GPU_KERNEL(T, I) \
XDL_REGISTER_KERNEL(Unique, UniqueGpuOp<T, I>) \
.Device("GPU") \
.AttrDataType<T>("dtype") \
.AttrDataType<I>("itype")
REGISTER_GPU_KERNEL(int64_t, int64_t);
REGISTER_GPU_KERNEL(int32_t, int32_t);
REGISTER_GPU_KERNEL(int64_t, int32_t);
REGISTER_GPU_KERNEL(int32_t, int64_t);
#undef REGISTER_GPU_KERNEL
} // namespace xdl
|
54f216471d008bbef6af783bb83e24a01caa4601.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ----------------------------------
// Copyright (c) 2011, Brown University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// (3) Neither the name of Brown University nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY BROWN UNIVERSITY AS IS WITH NO
// WARRANTIES OR REPRESENTATIONS OF ANY KIND WHATSOEVER EITHER EXPRESS OR
// IMPLIED, INCLUDING WITHOUT LIMITATION ANY WARRANTY OF DESIGN OR
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, EACH OF WHICH ARE
// SPECIFICALLY DISCLAIMED, NOR ANY WARRANTY OR REPRESENTATIONS THAT THE
// SOFTWARE IS ERROR FREE OR THAT THE SOFTWARE WILL NOT INFRINGE ANY
// PATENT, COPYRIGHT, TRADEMARK, OR OTHER THIRD PARTY PROPRIETARY RIGHTS.
// IN NO EVENT SHALL BROWN UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY OR CAUSE OF ACTION, WHETHER IN CONTRACT,
// STRICT LIABILITY, TORT, NEGLIGENCE OR OTHERWISE, ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE. ANY RECIPIENT OR USER OF THIS SOFTWARE ACKNOWLEDGES THE
// FOREGOING, AND ACCEPTS ALL RISKS AND LIABILITIES THAT MAY ARISE FROM
// THEIR USE OF THE SOFTWARE.
// ----------------------------------
/// \file Merger_kernels.cu
/// \author Benjamin Knorlein
#include "Merger_kernels.h"
// Define the cuda compositiing kernel
__global__
void merge_kernel(float* src1,
float* src2,
float* dest,
size_t width,
size_t height);
namespace xromm {
namespace gpu {
void merge(float* src1,
float* src2,
float* dest,
size_t width,
size_t height)
{
// Calculate the block and grid sizes.
dim3 blockDim(16, 16);
dim3 gridDim((width+blockDim.x-1)/blockDim.x,
(height+blockDim.y-1)/blockDim.y);
// Call the kernel
hipLaunchKernelGGL(( merge_kernel), dim3(gridDim), dim3(blockDim), 0, 0, src1,src2,dest,width,height);
}
} // namespace gpu
} // namespace xromm
__global__
void merge_kernel(float* src1,
float* src2,
float* dest,
size_t width,
size_t height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x > width - 1 || y > height - 1) {
return;
}
const unsigned int xy = y*width + x;
// src1 maps to orange and src2 to blue
dest[xy] = min(src1[xy] + src2[xy], 1.0);
}
| 54f216471d008bbef6af783bb83e24a01caa4601.cu | // ----------------------------------
// Copyright (c) 2011, Brown University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// (1) Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// (2) Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// (3) Neither the name of Brown University nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY BROWN UNIVERSITY “AS IS” WITH NO
// WARRANTIES OR REPRESENTATIONS OF ANY KIND WHATSOEVER EITHER EXPRESS OR
// IMPLIED, INCLUDING WITHOUT LIMITATION ANY WARRANTY OF DESIGN OR
// MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, EACH OF WHICH ARE
// SPECIFICALLY DISCLAIMED, NOR ANY WARRANTY OR REPRESENTATIONS THAT THE
// SOFTWARE IS ERROR FREE OR THAT THE SOFTWARE WILL NOT INFRINGE ANY
// PATENT, COPYRIGHT, TRADEMARK, OR OTHER THIRD PARTY PROPRIETARY RIGHTS.
// IN NO EVENT SHALL BROWN UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
// OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY OR CAUSE OF ACTION, WHETHER IN CONTRACT,
// STRICT LIABILITY, TORT, NEGLIGENCE OR OTHERWISE, ARISING IN ANY WAY
// OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
// SUCH DAMAGE. ANY RECIPIENT OR USER OF THIS SOFTWARE ACKNOWLEDGES THE
// FOREGOING, AND ACCEPTS ALL RISKS AND LIABILITIES THAT MAY ARISE FROM
// THEIR USE OF THE SOFTWARE.
// ----------------------------------
/// \file Merger_kernels.cu
/// \author Benjamin Knorlein
#include "Merger_kernels.h"
// Define the cuda compositiing kernel
__global__
void merge_kernel(float* src1,
float* src2,
float* dest,
size_t width,
size_t height);
namespace xromm {
namespace gpu {
void merge(float* src1,
float* src2,
float* dest,
size_t width,
size_t height)
{
// Calculate the block and grid sizes.
dim3 blockDim(16, 16);
dim3 gridDim((width+blockDim.x-1)/blockDim.x,
(height+blockDim.y-1)/blockDim.y);
// Call the kernel
merge_kernel<<<gridDim, blockDim>>>(src1,src2,dest,width,height);
}
} // namespace gpu
} // namespace xromm
__global__
void merge_kernel(float* src1,
float* src2,
float* dest,
size_t width,
size_t height)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x > width - 1 || y > height - 1) {
return;
}
const unsigned int xy = y*width + x;
// src1 maps to orange and src2 to blue
dest[xy] = min(src1[xy] + src2[xy], 1.0);
}
|
cf594a058a5a695b66f22e3ee4eeed526c5d73f7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
/**
* 0Map
* 1Gather
* 2Scatter
* 3Stencil
* 4Transpose
*
* CUDA
*/
// GPU
//
// Defining number of elements in vector.
#define N 100
// Defining kernel function for squaring number.
__global__ void gpu_vector_square(double *device_in, double *device_out)
{
// Getting thread index for current kernel.
int tid = threadIdx.x; // handle the data at this index.
double temp = device_in[tid];
device_out[tid] = temp * temp;
}
int main(int argc, char **argv)
{
// Defining vector for host.
double host_in[N], host_out[N];
double *device_in, *device_out;
// Allocate the memory on the device GPU.
hipMalloc((void**)&device_in, N * sizeof(double));
hipMalloc((void**)&device_out, N * sizeof(double));
// Initializing vector.
for (unsigned int i = 0; i < N; ++i)
{
host_in[i] = i;
}
// Copy vector from host to device.
hipMemcpy(device_in, host_in, N * sizeof(double), hipMemcpyHostToDevice);
// Calling kernel with one block and N threads per block.
gpu_vector_square << <1, N>> > (device_in, device_out);
// Coping result back to host from device memory.
hipMemcpy(host_out, device_out, N * sizeof(double), hipMemcpyDeviceToHost);
// Printing result on console.
std::cout << "Square of number on GPU." << std::endl;
for (unsigned int i = 0; i < N; ++i)
{
printf("The square of %f is %f\n", host_in[i], host_out[i]);
}
// Free up memory.
hipFree(device_in);
hipFree(device_out);
return 0;
} | cf594a058a5a695b66f22e3ee4eeed526c5d73f7.cu | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
/**并行通信模式
* 0、Map模式,映射,该通信模式中,每个线程或者任务读取单一输入,产生一个输出,一对一的操作。
* 1、Gather模式,收集,该通信模式中,每个线程或者任务具有多个输入,产生单一输出,多对一的操作。
* 2、Scatter模式,分散式,该通信模式中,每个线程或者任务读取单一输入,向存储器产生多个输出,一对多的操作。
* 3、Stencil模式,蒙板,该通信模式中,线程读取固定形状的相邻元素,图像卷积中卷积核。
* 4、Transpose模式,转置,该通信模式中,输入矩阵为行主序,需要输出矩阵为列主序。
*
* CUDA 编程所遵循的模式,使用官网提供的类型应用编程模式,利用范例语法模式很有用。
*/
// 注意启动内核时参数的配置,块的数量和线程的数量控制,不能超过GPU硬件本身的限制。
// 应该合理的选择合适数量的块和每一个块所用的线程数。
// Defining number of elements in vector.
#define N 100
// Defining kernel function for squaring number.
__global__ void gpu_vector_square(double *device_in, double *device_out)
{
// Getting thread index for current kernel.
int tid = threadIdx.x; // handle the data at this index.
double temp = device_in[tid];
device_out[tid] = temp * temp;
}
int main(int argc, char **argv)
{
// Defining vector for host.
double host_in[N], host_out[N];
double *device_in, *device_out;
// Allocate the memory on the device GPU.
cudaMalloc((void**)&device_in, N * sizeof(double));
cudaMalloc((void**)&device_out, N * sizeof(double));
// Initializing vector.
for (unsigned int i = 0; i < N; ++i)
{
host_in[i] = i;
}
// Copy vector from host to device.
cudaMemcpy(device_in, host_in, N * sizeof(double), cudaMemcpyHostToDevice);
// Calling kernel with one block and N threads per block.
gpu_vector_square << <1, N>> > (device_in, device_out);
// Coping result back to host from device memory.
cudaMemcpy(host_out, device_out, N * sizeof(double), cudaMemcpyDeviceToHost);
// Printing result on console.
std::cout << "Square of number on GPU." << std::endl;
for (unsigned int i = 0; i < N; ++i)
{
printf("The square of %f is %f\n", host_in[i], host_out[i]);
}
// Free up memory.
cudaFree(device_in);
cudaFree(device_out);
return 0;
} |
8e3ab24e5d33f88983d7bebc6d10c50562c5224c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "forward_bias.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *X = NULL;
hipMalloc(&X, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int ch_in = 1;
int h_in = 1;
int w_in = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
forward_bias), dim3(gridBlock),dim3(threadBlock), 0, 0, X,b,N,ch_in,h_in,w_in);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
forward_bias), dim3(gridBlock),dim3(threadBlock), 0, 0, X,b,N,ch_in,h_in,w_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
forward_bias), dim3(gridBlock),dim3(threadBlock), 0, 0, X,b,N,ch_in,h_in,w_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 8e3ab24e5d33f88983d7bebc6d10c50562c5224c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "forward_bias.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *X = NULL;
cudaMalloc(&X, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int ch_in = 1;
int h_in = 1;
int w_in = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
forward_bias<<<gridBlock,threadBlock>>>(X,b,N,ch_in,h_in,w_in);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
forward_bias<<<gridBlock,threadBlock>>>(X,b,N,ch_in,h_in,w_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
forward_bias<<<gridBlock,threadBlock>>>(X,b,N,ch_in,h_in,w_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
1879655b82c5f3778e675306c5f68fefa591c2e8.hip | // !!! This is a file automatically generated by hipify!!!
/********************************************************
* *
* Licensed under the Academic Free License version 3.0 *
* *
********************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
extern "C" {
#include "beam_common.h"
#include "form_beam.h"
#include "mycomplex.h"
}
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
/* Wrapper function for GPU/CUDA error handling. Every CUDA call goes through
this function. It will return a message giving your the error string,
file name and line of the error. Aborts on error. */
if (code != 0)
{
fprintf(stderr, "GPUAssert:: %s - %s (%d)\n", hipGetErrorString(code), file, line);
if (abort)
{
exit(code);
}
}
}
// define a macro for accessing gpuAssert
#define gpuErrchk(ans) {gpuAssert((ans), __FILE__, __LINE__, true);}
// define constants to be used in the kernel
#define NSTATION 128
#define NPOL 2
#define NSTOKES 4
// maximum number of pointings (currently)
#define NPOINTING 4
__global__ void invj_the_data( uint8_t *data,
ComplexDouble *J,
ComplexDouble *W,
ComplexDouble *JDx,
ComplexDouble *JDy,
float *Ia,
int incoh )
/* Layout for input arrays:
* data [nsamples] [nchan] [NPFB] [NREC] [NINC] -- see docs
* J [NSTATION] [nchan] [NPOL] [NPOL] -- jones matrix
* incoh --true if outputing an incoherent beam
* Layout for output arrays:
* JDx [nsamples] [nchan] [NPFB] [NREC] [NINC]
* JDy [nsamples] [nchan] [NPFB] [NREC] [NINC]
*/
{
// Translate GPU block/thread numbers into meaning->l names
int c = blockIdx.x; /* The (c)hannel number */
int nc = gridDim.x; /* The (n)umber of (c)hannels (=128) */
int s = blockIdx.y; /* The (s)ample number */
int ant = threadIdx.x; /* The (ant)enna number */
ComplexDouble Dx, Dy;
// Convert input data to complex float
Dx = UCMPLX4_TO_CMPLX_FLT(data[D_IDX(s,c,ant,0,nc)]);
Dy = UCMPLX4_TO_CMPLX_FLT(data[D_IDX(s,c,ant,1,nc)]);
// If tile is flagged in the calibration, flag it in the incoherent beam
if (incoh)
{
if (CReald(W[W_IDX(0,ant,c,0,nc)]) == 0.0 &&
CImagd(W[W_IDX(0,ant,c,0,nc)]) == 0.0 &&
CReald(W[W_IDX(0,ant,c,1,nc)]) == 0.0 &&
CImagd(W[W_IDX(0,ant,c,1,nc)]) == 0.0)
Ia[JD_IDX(s,c,ant,nc)] = 0.0;
else
Ia[JD_IDX(s,c,ant,nc)] = DETECT(Dx) + DETECT(Dy);
}
// Calculate the first step (J*D) of the coherent beam (B = J*W*D)
// Nick: by my math the order should be:
// JDx = Jxx*Dx + Jxy*Dy
// JDy = Jyx*Dx + Jyy*Dy
// This is what I have implemented (as it produces higher signal-to-noise
// ratio detections). The original code (the single-pixel beamformer)
// switched the yx and xy terms but we get similar SNs
JDx[JD_IDX(s,c,ant,nc)] = CAddd( CMuld( J[J_IDX(ant,c,0,0,nc)], Dx ),
CMuld( J[J_IDX(ant,c,0,1,nc)], Dy ) );
JDy[JD_IDX(s,c,ant,nc)] = CAddd( CMuld( J[J_IDX(ant,c,1,0,nc)], Dx ),
CMuld( J[J_IDX(ant,c,1,1,nc)], Dy ) );
}
__global__ void beamform_kernel( ComplexDouble *JDx,
ComplexDouble *JDy,
ComplexDouble *W,
float *Iin,
double invw,
int p,
int coh_pol,
int incoh,
int soffset,
int nchunk,
ComplexDouble *Bd,
float *C,
float *I )
/* Layout for input arrays:
* JDx [nsamples] [nchan] [NPFB] [NREC] [NINC] -- calibrated voltages
* JDy [nsamples] [nchan] [NPFB] [NREC] [NINC]
* W [NSTATION] [nchan] [NPOL] -- weights array
* Iin [nsamples] [nchan] [nant] -- detected incoh
* invw -- inverse atrix
* Layout of input options
* p -- pointing number
* coh_pol -- coherent polorisation number
* incoh -- true if outputing an incoherent beam
* soffset -- sample offset (10000/nchunk)
* nchunk -- number of chunks each second is split into
* Layout for output arrays:
* Bd [nsamples] [nchan] [NPOL] -- detected beam
* C [nsamples] [NSTOKES] [nchan] -- coherent full stokes
* I [nsamples] [nchan] -- incoherent
*/
{
// Translate GPU block/thread numbers into meaning->l names
int c = blockIdx.x; /* The (c)hannel number */
int nc = gridDim.x; /* The (n)umber of (c)hannels (=128) */
int s = blockIdx.y; /* The (s)ample number */
int ns = gridDim.y*nchunk; /* The (n)umber of (s)amples (=10000)*/
int ant = threadIdx.x; /* The (ant)enna number */
int nant = blockDim.x; /* The (n)_umber of (ant)ennas */
/*// GPU profiling
clock_t start, stop;
double setup_t, detect_t, sum_t, stokes_t;
if ((p == 0) && (ant == 0) && (c == 0) && (s == 0)) start = clock();*/
// Calculate the beam and the noise floor
__shared__ double Ia[NSTATION];
__shared__ ComplexDouble Bx[NSTATION], By[NSTATION];
__shared__ ComplexDouble Nxx[NSTATION], Nxy[NSTATION],
Nyy[NSTATION];//Nyx[NSTATION]
/* Fix from Maceij regarding NaNs in output when running on Athena, 13 April 2018.
Apparently the different compilers and architectures are treating what were
unintialised variables very differently */
Bx[ant] = CMaked( 0.0, 0.0 );
By[ant] = CMaked( 0.0, 0.0 );
Nxx[ant] = CMaked( 0.0, 0.0 );
Nxy[ant] = CMaked( 0.0, 0.0 );
//Nyx[ant] = CMaked( 0.0, 0.0 );
Nyy[ant] = CMaked( 0.0, 0.0 );
if ((p == 0) && (incoh)) Ia[ant] = Iin[JD_IDX(s,c,ant,nc)];
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
setup_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
start = clock();
}*/
// Calculate beamform products for each antenna, and then add them together
// Calculate the coherent beam (B = J*W*D)
Bx[ant] = CMuld( W[W_IDX(p,ant,c,0,nc)], JDx[JD_IDX(s,c,ant,nc)] );
By[ant] = CMuld( W[W_IDX(p,ant,c,1,nc)], JDy[JD_IDX(s,c,ant,nc)] );
Nxx[ant] = CMuld( Bx[ant], CConjd(Bx[ant]) );
Nxy[ant] = CMuld( Bx[ant], CConjd(By[ant]) );
//Nyx[ant] = CMuld( By[ant], CConjd(Bx[ant]) );
Nyy[ant] = CMuld( By[ant], CConjd(By[ant]) );
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
detect_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
start = clock();
}*/
// Detect the coherent beam
// A summation over an array is faster on a GPU if you add half on array
// to its other half as than can be done in parallel. Then this is repeated
// with half of the previous array until the array is down to 1.
__syncthreads();
for ( int h_ant = nant / 2; h_ant > 0; h_ant = h_ant / 2 )
{
if (ant < h_ant)
{
if ( (p == 0) && (incoh)) Ia[ant] += Ia[ant+h_ant];
Bx[ant] = CAddd( Bx[ant], Bx[ant + h_ant] );
By[ant] = CAddd( By[ant], By[ant + h_ant] );
Nxx[ant] = CAddd( Nxx[ant], Nxx[ant + h_ant] );
Nxy[ant] = CAddd( Nxy[ant], Nxy[ant + h_ant] );
//Nyx[ant]=CAddd( Nyx[ant], Nyx[ant + h_ant] );
Nyy[ant] = CAddd( Nyy[ant], Nyy[ant + h_ant] );
}
// below makes no difference so removed
// else return;
__syncthreads();
}
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
sum_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
start = clock();
}*/
// Form the stokes parameters for the coherent beam
// Only doing it for ant 0 so that it only prints once
if ( ant == 0 )
{
float bnXX = DETECT(Bx[0]) - CReald(Nxx[0]);
float bnYY = DETECT(By[0]) - CReald(Nyy[0]);
ComplexDouble bnXY = CSubd( CMuld( Bx[0], CConjd( By[0] ) ),
Nxy[0] );
// The incoherent beam
if ( (p == 0) && (incoh)) I[I_IDX(s+soffset,c,nc)] = Ia[0];
// Stokes I, Q, U, V:
C[C_IDX(p,s+soffset,0,c,ns,coh_pol,nc)] = invw*(bnXX + bnYY);
if ( coh_pol == 4 )
{
C[C_IDX(p,s+soffset,1,c,ns,coh_pol,nc)] = invw*(bnXX - bnYY);
C[C_IDX(p,s+soffset,2,c,ns,coh_pol,nc)] = 2.0*invw*CReald( bnXY );
C[C_IDX(p,s+soffset,3,c,ns,coh_pol,nc)] = -2.0*invw*CImagd( bnXY );
}
// The beamformed products
Bd[B_IDX(p,s+soffset,c,0,ns,nc)] = Bx[0];
Bd[B_IDX(p,s+soffset,c,1,ns,nc)] = By[0];
}
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
stokes_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
printf("Time: setup: % f detect: %f sum: %f stokes: %f\n", setup_t, detect_t, sum_t, stokes_t);
}*/
}
__global__ void flatten_bandpass_I_kernel( float *I,
int nstep )
{
// For just doing stokes I
// One block
// 128 threads each thread will do one channel
// (we have already summed over all ant)
// For doing the C array (I,Q,U,V)
// ... figure it out later.
// Translate GPU block/thread numbers into meaningful names
int chan = threadIdx.x; /* The (c)hannel number */
int nchan = blockDim.x; /* The total number of channels */
float band;
int new_var = 32; /* magic number */
int i;
float *data_ptr;
// initialise the band 'array'
band = 0.0;
// accumulate abs(data) over all time samples and save into band
data_ptr = I + I_IDX(0, chan, nchan);
for (i=0;i<nstep;i++) { // time steps
band += fabsf(*data_ptr);
data_ptr = I + I_IDX(i,chan,nchan);
}
// now normalise the incoherent beam
data_ptr = I + I_IDX(0, chan, nchan);
for (i=0;i<nstep;i++) { // time steps
*data_ptr = (*data_ptr)/( (band/nstep)/new_var );
data_ptr = I + I_IDX(i,chan,nchan);
}
}
__global__ void flatten_bandpass_C_kernel( float *C, int nstep )
{
// For just doing stokes I
// One block
// 128 threads each thread will do one channel
// (we have already summed over all ant)
// For doing the C array (I,Q,U,V)
// ... figure it out later.
// Translate GPU block/thread numbers into meaningful names
int chan = threadIdx.x; /* The (c)hannel number */
int nchan = blockDim.x; /* The (n)umber of (c)hannels */
int stokes = threadIdx.y; /* The (stokes) number */
int nstokes = blockDim.y; /* The (n)umber of (stokes) */
int p = blockIdx.x; /* The (p)ointing number */
float band;
int new_var = 32; /* magic number */
int i;
float *data_ptr;
// initialise the band 'array'
band = 0.0;
// accumulate abs(data) over all time samples and save into band
//data_ptr = C + C_IDX(0,stokes,chan,nchan);
for (i=0;i<nstep;i++) { // time steps
data_ptr = C + C_IDX(p,i,stokes,chan,nstep,nstokes,nchan);
band += fabsf(*data_ptr);
}
// now normalise the coherent beam
//data_ptr = C + C_IDX(0,stokes,chan,nchan);
for (i=0;i<nstep;i++) { // time steps
data_ptr = C + C_IDX(p,i,stokes,chan,nstep,nstokes,nchan);
*data_ptr = (*data_ptr)/( (band/nstep)/new_var );
}
}
void cu_form_beam( uint8_t *data, struct make_beam_opts *opts,
ComplexDouble ****complex_weights_array,
ComplexDouble ****invJi, int file_no,
int npointing, int nstation, int nchan,
int npol, int outpol_coh, double invw,
struct gpu_formbeam_arrays *g,
ComplexDouble ****detected_beam, float *coh, float *incoh,
hipStream_t *streams, int incoh_check, int nchunk )
/* The CPU version of the beamforming operations, using OpenMP for
* parallelisation.
*
* Inputs:
* data = array of 4bit+4bit complex numbers. For data order, refer to the
* documentation.
* opts = passed option parameters, containing meta information about the
* obs and the data
* W = complex weights array. [npointing][nstation][nchan][npol]
* J = inverse Jones matrix. [nstation][nchan][npol][npol]
* file_no = number of file we are processing, starting at 0.
* nstation = 128
* nchan = 128
* npol = 2 (X,Y)
* outpol_coh = 4 (I,Q,U,V)
* invw = the reciprocal of the sum of the antenna weights
* g = struct containing pointers to various arrays on
* both host and device
*
* Outputs:
* detected_beam = result of beamforming operation, summed over antennas
* [2*nsamples][nchan][npol]
* coh = result in Stokes parameters (minus noise floor)
* [nsamples][nstokes][nchan]
* incoh = result (just Stokes I)
* [nsamples][nchan]
*
* Assumes "coh" and "incoh" contain only zeros.
*/
{
// Setup input values (= populate W and J)
int p, ant, ch, pol, pol2;
int Wi, Ji;
for (p = 0; p < npointing; p++ )
for (ant = 0; ant < nstation ; ant++)
for (ch = 0; ch < nchan ; ch++ )
for (pol = 0; pol < npol ; pol++)
{
Wi = p * (npol*nchan*nstation) +
ant * (npol*nchan) +
ch * (npol) +
pol;
g->W[Wi] = complex_weights_array[p][ant][ch][pol];
if ( p == 0 )
for (pol2 = 0; pol2 < npol; pol2++)
{
Ji = ant * (npol*npol*nchan) +
ch * (npol*npol) +
pol * (npol) +
pol2;
g->J[Ji] = invJi[ant][ch][pol][pol2];
}
}
// Copy the data to the device
gpuErrchk(hipMemcpyAsync( g->d_W, g->W, g->W_size, hipMemcpyHostToDevice ));
gpuErrchk(hipMemcpyAsync( g->d_J, g->J, g->J_size, hipMemcpyHostToDevice ));
// Divide the gpu calculation into multiple time chunks so there is enough room on the GPU
for (int ichunk = 0; ichunk < nchunk; ichunk++)
{
//int dataoffset = ichunk * g->data_size / sizeof(uint8_t);
gpuErrchk(hipMemcpyAsync( g->d_data,
data + ichunk * g->data_size / sizeof(uint8_t),
g->data_size, hipMemcpyHostToDevice ));
// Call the kernels
// samples_chan(index=blockIdx.x size=gridDim.x,
// index=blockIdx.y size=gridDim.y)
// stat_point (index=threadIdx.x size=blockDim.x,
// index=threadIdx.y size=blockDim.y)
//dim3 samples_chan(opts->sample_rate, nchan);
dim3 chan_samples( nchan, opts->sample_rate / nchunk );
dim3 stat( NSTATION );
// convert the data and multiply it by J
hipLaunchKernelGGL(( invj_the_data), dim3(chan_samples), dim3(stat), 0, 0, g->d_data, g->d_J, g->d_W, g->d_JDx, g->d_JDy,
g->d_Ia, incoh_check );
// Send off a parrellel cuda stream for each pointing
for ( int p = 0; p < npointing; p++ )
{
hipLaunchKernelGGL(( beamform_kernel), dim3(chan_samples), dim3(stat), 0, streams[p], g->d_JDx, g->d_JDy,
g->d_W, g->d_Ia, invw,
p, outpol_coh , incoh_check, ichunk*opts->sample_rate/nchunk, nchunk,
g->d_Bd, g->d_coh, g->d_incoh );
gpuErrchk( hipPeekAtLastError() );
}
}
gpuErrchk( hipDeviceSynchronize() );
// Flatten the bandpass
if ( incoh_check )
{
hipLaunchKernelGGL(( flatten_bandpass_I_kernel), dim3(1), dim3(nchan), 0, streams[0], g->d_incoh,
opts->sample_rate );
gpuErrchk( hipPeekAtLastError() );
}
// Now do the same for the coherent beam
dim3 chan_stokes(nchan, outpol_coh);
hipLaunchKernelGGL(( flatten_bandpass_C_kernel), dim3(npointing), dim3(chan_stokes), 0, streams[0], g->d_coh,
opts->sample_rate );
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// Copy the results back into host memory
gpuErrchk(hipMemcpyAsync( g->Bd, g->d_Bd, g->Bd_size, hipMemcpyDeviceToHost ));
gpuErrchk(hipMemcpyAsync( incoh, g->d_incoh, g->incoh_size, hipMemcpyDeviceToHost ));
gpuErrchk(hipMemcpyAsync( coh, g->d_coh, g->coh_size, hipMemcpyDeviceToHost ));
// Copy the data back from Bd back into the detected_beam array
// Make sure we put it back into the correct half of the array, depending
// on whether this is an even or odd second.
int offset, i;
offset = file_no % 2 * opts->sample_rate;
for ( int p = 0; p < npointing ; p++ )
for ( int s = 0; s < opts->sample_rate; s++ )
for ( int ch = 0; ch < nchan ; ch++ )
for ( int pol = 0; pol < npol ; pol++)
{
i = p * (npol*nchan*opts->sample_rate) +
s * (npol*nchan) +
ch * (npol) +
pol;
detected_beam[p][s+offset][ch][pol] = g->Bd[i];
}
}
void malloc_formbeam( struct gpu_formbeam_arrays *g, unsigned int sample_rate,
int nstation, int nchan, int npol, int nchunk, int outpol_coh,
int outpol_incoh, int npointing, double time )
{
// Calculate array sizes for host and device
g->coh_size = npointing * sample_rate * outpol_coh * nchan * sizeof(float);
g->incoh_size = sample_rate * outpol_incoh * nchan * sizeof(float);
g->data_size = sample_rate * nstation * nchan * npol / nchunk * sizeof(uint8_t);
g->Bd_size = npointing * sample_rate * nchan * npol * sizeof(ComplexDouble);
g->W_size = npointing * nstation * nchan * npol * sizeof(ComplexDouble);
g->J_size = nstation * nchan * npol * npol * sizeof(ComplexDouble);
g->JD_size = sample_rate * nstation * nchan / nchunk * sizeof(ComplexDouble);
// Allocate host memory
//g->W = (ComplexDouble *)malloc( g->W_size );
//g->J = (ComplexDouble *)malloc( g->J_size );
//g->Bd = (ComplexDouble *)malloc( g->Bd_size );
hipHostMalloc( &g->W, g->W_size );
cudaCheckErrors("hipHostMalloc W fail");
hipHostMalloc( &g->J, g->J_size );
cudaCheckErrors("hipHostMalloc J fail");
hipHostMalloc( &g->Bd, g->Bd_size );
cudaCheckErrors("hipHostMalloc Bd fail");
fprintf( stderr, "[%f] coh_size %d MB GPU mem\n", time, g->coh_size /1000000 );
fprintf( stderr, "[%f] incoh_size %d MB GPU mem\n", time, g->incoh_size/1000000 );
fprintf( stderr, "[%f] data_size %d MB GPU mem\n", time, g->data_size /1000000 );
fprintf( stderr, "[%f] Bd_size %d MB GPU mem\n", time, g->Bd_size /1000000 );
fprintf( stderr, "[%f] W_size %d MB GPU mem\n", time, g->W_size /1000000 );
fprintf( stderr, "[%f] J_size %d MB GPU mem\n", time, g->J_size /1000000 );
fprintf( stderr, "[%f] JD_size %d MB GPU mem\n", time, g->JD_size*3 /1000000 );
int GPU_mem = (g->W_size + g->J_size + g->Bd_size + g->data_size +
g->coh_size + g->incoh_size + 3*g->JD_size) /1000000000;
fprintf( stderr, "[%f] %d GB GPU memory allocated\n", time, GPU_mem );
// Allocate device memory
gpuErrchk(hipMalloc( (void **)&g->d_W, g->W_size ));
gpuErrchk(hipMalloc( (void **)&g->d_J, g->J_size ));
gpuErrchk(hipMalloc( (void **)&g->d_JDx, g->JD_size ));
gpuErrchk(hipMalloc( (void **)&g->d_JDy, g->JD_size ));
gpuErrchk(hipMalloc( (void **)&g->d_Ia, g->JD_size ));
gpuErrchk(hipMalloc( (void **)&g->d_Bd, g->Bd_size ));
gpuErrchk(hipMalloc( (void **)&g->d_data, g->data_size ));
gpuErrchk(hipMalloc( (void **)&g->d_coh, g->coh_size ));
gpuErrchk(hipMalloc( (void **)&g->d_incoh, g->incoh_size ));
}
void free_formbeam( struct gpu_formbeam_arrays *g )
{
// Free memory on host and device
hipHostFree( g->W );
hipHostFree( g->J );
hipHostFree( g->Bd );
hipFree( g->d_W );
hipFree( g->d_J );
hipFree( g->d_Bd );
hipFree( g->d_data );
hipFree( g->d_coh );
hipFree( g->d_incoh );
}
float *create_pinned_data_buffer_psrfits( size_t size )
{
float *ptr;
hipHostMalloc( &ptr, size * sizeof(float) );
//hipError_t status = hipHostRegister((void**)&ptr, size * sizeof(float),
// hipHostRegisterPortable );
cudaCheckErrors("hipHostMalloc data_buffer_psrfits fail");
return ptr;
}
float *create_pinned_data_buffer_vdif( size_t size )
{
float *ptr;
hipHostMalloc( &ptr, size * sizeof(float) );
//hipError_t status = hipHostRegister((void**)&ptr, size * sizeof(float),
// hipHostRegisterPortable );
cudaCheckErrors("hipHostMalloc data_buffer_vdif fail");
return ptr;
}
void populate_weights_johnes( struct gpu_formbeam_arrays *g,
ComplexDouble ****complex_weights_array,
ComplexDouble *****invJi,
int npointing, int nstation, int nchan, int npol )
{
// Setup input values (= populate W and J)
int p, ant, ch, pol, pol2;
int Wi, Ji;
for (p = 0; p < npointing; p++ )
for (ant = 0; ant < nstation ; ant++)
for (ch = 0; ch < nchan ; ch++ )
for (pol = 0; pol < npol ; pol++)
{
Wi = p * (npol*nchan*nstation) +
ant * (npol*nchan) +
ch * (npol) +
pol;
g->W[Wi] = complex_weights_array[p][ant][ch][pol];
for (pol2 = 0; pol2 < npol; pol2++)
{
Ji = Wi*npol + pol2;
g->J[Ji] = invJi[p][ant][ch][pol][pol2];
}
}
// Copy the data to the device
gpuErrchk(hipMemcpy( g->d_W, g->W, g->W_size, hipMemcpyHostToDevice ));
gpuErrchk(hipMemcpy( g->d_J, g->J, g->J_size, hipMemcpyHostToDevice ));
}
| 1879655b82c5f3778e675306c5f68fefa591c2e8.cu | /********************************************************
* *
* Licensed under the Academic Free License version 3.0 *
* *
********************************************************/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <time.h>
#include <cuda_runtime.h>
extern "C" {
#include "beam_common.h"
#include "form_beam.h"
#include "mycomplex.h"
}
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
/* Wrapper function for GPU/CUDA error handling. Every CUDA call goes through
this function. It will return a message giving your the error string,
file name and line of the error. Aborts on error. */
if (code != 0)
{
fprintf(stderr, "GPUAssert:: %s - %s (%d)\n", cudaGetErrorString(code), file, line);
if (abort)
{
exit(code);
}
}
}
// define a macro for accessing gpuAssert
#define gpuErrchk(ans) {gpuAssert((ans), __FILE__, __LINE__, true);}
// define constants to be used in the kernel
#define NSTATION 128
#define NPOL 2
#define NSTOKES 4
// maximum number of pointings (currently)
#define NPOINTING 4
__global__ void invj_the_data( uint8_t *data,
ComplexDouble *J,
ComplexDouble *W,
ComplexDouble *JDx,
ComplexDouble *JDy,
float *Ia,
int incoh )
/* Layout for input arrays:
* data [nsamples] [nchan] [NPFB] [NREC] [NINC] -- see docs
* J [NSTATION] [nchan] [NPOL] [NPOL] -- jones matrix
* incoh --true if outputing an incoherent beam
* Layout for output arrays:
* JDx [nsamples] [nchan] [NPFB] [NREC] [NINC]
* JDy [nsamples] [nchan] [NPFB] [NREC] [NINC]
*/
{
// Translate GPU block/thread numbers into meaning->l names
int c = blockIdx.x; /* The (c)hannel number */
int nc = gridDim.x; /* The (n)umber of (c)hannels (=128) */
int s = blockIdx.y; /* The (s)ample number */
int ant = threadIdx.x; /* The (ant)enna number */
ComplexDouble Dx, Dy;
// Convert input data to complex float
Dx = UCMPLX4_TO_CMPLX_FLT(data[D_IDX(s,c,ant,0,nc)]);
Dy = UCMPLX4_TO_CMPLX_FLT(data[D_IDX(s,c,ant,1,nc)]);
// If tile is flagged in the calibration, flag it in the incoherent beam
if (incoh)
{
if (CReald(W[W_IDX(0,ant,c,0,nc)]) == 0.0 &&
CImagd(W[W_IDX(0,ant,c,0,nc)]) == 0.0 &&
CReald(W[W_IDX(0,ant,c,1,nc)]) == 0.0 &&
CImagd(W[W_IDX(0,ant,c,1,nc)]) == 0.0)
Ia[JD_IDX(s,c,ant,nc)] = 0.0;
else
Ia[JD_IDX(s,c,ant,nc)] = DETECT(Dx) + DETECT(Dy);
}
// Calculate the first step (J*D) of the coherent beam (B = J*W*D)
// Nick: by my math the order should be:
// JDx = Jxx*Dx + Jxy*Dy
// JDy = Jyx*Dx + Jyy*Dy
// This is what I have implemented (as it produces higher signal-to-noise
// ratio detections). The original code (the single-pixel beamformer)
// switched the yx and xy terms but we get similar SNs
JDx[JD_IDX(s,c,ant,nc)] = CAddd( CMuld( J[J_IDX(ant,c,0,0,nc)], Dx ),
CMuld( J[J_IDX(ant,c,0,1,nc)], Dy ) );
JDy[JD_IDX(s,c,ant,nc)] = CAddd( CMuld( J[J_IDX(ant,c,1,0,nc)], Dx ),
CMuld( J[J_IDX(ant,c,1,1,nc)], Dy ) );
}
__global__ void beamform_kernel( ComplexDouble *JDx,
ComplexDouble *JDy,
ComplexDouble *W,
float *Iin,
double invw,
int p,
int coh_pol,
int incoh,
int soffset,
int nchunk,
ComplexDouble *Bd,
float *C,
float *I )
/* Layout for input arrays:
* JDx [nsamples] [nchan] [NPFB] [NREC] [NINC] -- calibrated voltages
* JDy [nsamples] [nchan] [NPFB] [NREC] [NINC]
* W [NSTATION] [nchan] [NPOL] -- weights array
* Iin [nsamples] [nchan] [nant] -- detected incoh
* invw -- inverse atrix
* Layout of input options
* p -- pointing number
* coh_pol -- coherent polorisation number
* incoh -- true if outputing an incoherent beam
* soffset -- sample offset (10000/nchunk)
* nchunk -- number of chunks each second is split into
* Layout for output arrays:
* Bd [nsamples] [nchan] [NPOL] -- detected beam
* C [nsamples] [NSTOKES] [nchan] -- coherent full stokes
* I [nsamples] [nchan] -- incoherent
*/
{
// Translate GPU block/thread numbers into meaning->l names
int c = blockIdx.x; /* The (c)hannel number */
int nc = gridDim.x; /* The (n)umber of (c)hannels (=128) */
int s = blockIdx.y; /* The (s)ample number */
int ns = gridDim.y*nchunk; /* The (n)umber of (s)amples (=10000)*/
int ant = threadIdx.x; /* The (ant)enna number */
int nant = blockDim.x; /* The (n)_umber of (ant)ennas */
/*// GPU profiling
clock_t start, stop;
double setup_t, detect_t, sum_t, stokes_t;
if ((p == 0) && (ant == 0) && (c == 0) && (s == 0)) start = clock();*/
// Calculate the beam and the noise floor
__shared__ double Ia[NSTATION];
__shared__ ComplexDouble Bx[NSTATION], By[NSTATION];
__shared__ ComplexDouble Nxx[NSTATION], Nxy[NSTATION],
Nyy[NSTATION];//Nyx[NSTATION]
/* Fix from Maceij regarding NaNs in output when running on Athena, 13 April 2018.
Apparently the different compilers and architectures are treating what were
unintialised variables very differently */
Bx[ant] = CMaked( 0.0, 0.0 );
By[ant] = CMaked( 0.0, 0.0 );
Nxx[ant] = CMaked( 0.0, 0.0 );
Nxy[ant] = CMaked( 0.0, 0.0 );
//Nyx[ant] = CMaked( 0.0, 0.0 );
Nyy[ant] = CMaked( 0.0, 0.0 );
if ((p == 0) && (incoh)) Ia[ant] = Iin[JD_IDX(s,c,ant,nc)];
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
setup_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
start = clock();
}*/
// Calculate beamform products for each antenna, and then add them together
// Calculate the coherent beam (B = J*W*D)
Bx[ant] = CMuld( W[W_IDX(p,ant,c,0,nc)], JDx[JD_IDX(s,c,ant,nc)] );
By[ant] = CMuld( W[W_IDX(p,ant,c,1,nc)], JDy[JD_IDX(s,c,ant,nc)] );
Nxx[ant] = CMuld( Bx[ant], CConjd(Bx[ant]) );
Nxy[ant] = CMuld( Bx[ant], CConjd(By[ant]) );
//Nyx[ant] = CMuld( By[ant], CConjd(Bx[ant]) );
Nyy[ant] = CMuld( By[ant], CConjd(By[ant]) );
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
detect_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
start = clock();
}*/
// Detect the coherent beam
// A summation over an array is faster on a GPU if you add half on array
// to its other half as than can be done in parallel. Then this is repeated
// with half of the previous array until the array is down to 1.
__syncthreads();
for ( int h_ant = nant / 2; h_ant > 0; h_ant = h_ant / 2 )
{
if (ant < h_ant)
{
if ( (p == 0) && (incoh)) Ia[ant] += Ia[ant+h_ant];
Bx[ant] = CAddd( Bx[ant], Bx[ant + h_ant] );
By[ant] = CAddd( By[ant], By[ant + h_ant] );
Nxx[ant] = CAddd( Nxx[ant], Nxx[ant + h_ant] );
Nxy[ant] = CAddd( Nxy[ant], Nxy[ant + h_ant] );
//Nyx[ant]=CAddd( Nyx[ant], Nyx[ant + h_ant] );
Nyy[ant] = CAddd( Nyy[ant], Nyy[ant + h_ant] );
}
// below makes no difference so removed
// else return;
__syncthreads();
}
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
sum_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
start = clock();
}*/
// Form the stokes parameters for the coherent beam
// Only doing it for ant 0 so that it only prints once
if ( ant == 0 )
{
float bnXX = DETECT(Bx[0]) - CReald(Nxx[0]);
float bnYY = DETECT(By[0]) - CReald(Nyy[0]);
ComplexDouble bnXY = CSubd( CMuld( Bx[0], CConjd( By[0] ) ),
Nxy[0] );
// The incoherent beam
if ( (p == 0) && (incoh)) I[I_IDX(s+soffset,c,nc)] = Ia[0];
// Stokes I, Q, U, V:
C[C_IDX(p,s+soffset,0,c,ns,coh_pol,nc)] = invw*(bnXX + bnYY);
if ( coh_pol == 4 )
{
C[C_IDX(p,s+soffset,1,c,ns,coh_pol,nc)] = invw*(bnXX - bnYY);
C[C_IDX(p,s+soffset,2,c,ns,coh_pol,nc)] = 2.0*invw*CReald( bnXY );
C[C_IDX(p,s+soffset,3,c,ns,coh_pol,nc)] = -2.0*invw*CImagd( bnXY );
}
// The beamformed products
Bd[B_IDX(p,s+soffset,c,0,ns,nc)] = Bx[0];
Bd[B_IDX(p,s+soffset,c,1,ns,nc)] = By[0];
}
/*if ((p == 0) && (ant == 0) && (c == 0) && (s == 0))
{
stop = clock();
stokes_t = (double)(stop - start) / CLOCKS_PER_SEC * NPOINTING * NANT;
printf("Time: setup: % f detect: %f sum: %f stokes: %f\n", setup_t, detect_t, sum_t, stokes_t);
}*/
}
__global__ void flatten_bandpass_I_kernel( float *I,
int nstep )
{
// For just doing stokes I
// One block
// 128 threads each thread will do one channel
// (we have already summed over all ant)
// For doing the C array (I,Q,U,V)
// ... figure it out later.
// Translate GPU block/thread numbers into meaningful names
int chan = threadIdx.x; /* The (c)hannel number */
int nchan = blockDim.x; /* The total number of channels */
float band;
int new_var = 32; /* magic number */
int i;
float *data_ptr;
// initialise the band 'array'
band = 0.0;
// accumulate abs(data) over all time samples and save into band
data_ptr = I + I_IDX(0, chan, nchan);
for (i=0;i<nstep;i++) { // time steps
band += fabsf(*data_ptr);
data_ptr = I + I_IDX(i,chan,nchan);
}
// now normalise the incoherent beam
data_ptr = I + I_IDX(0, chan, nchan);
for (i=0;i<nstep;i++) { // time steps
*data_ptr = (*data_ptr)/( (band/nstep)/new_var );
data_ptr = I + I_IDX(i,chan,nchan);
}
}
__global__ void flatten_bandpass_C_kernel( float *C, int nstep )
{
// For just doing stokes I
// One block
// 128 threads each thread will do one channel
// (we have already summed over all ant)
// For doing the C array (I,Q,U,V)
// ... figure it out later.
// Translate GPU block/thread numbers into meaningful names
int chan = threadIdx.x; /* The (c)hannel number */
int nchan = blockDim.x; /* The (n)umber of (c)hannels */
int stokes = threadIdx.y; /* The (stokes) number */
int nstokes = blockDim.y; /* The (n)umber of (stokes) */
int p = blockIdx.x; /* The (p)ointing number */
float band;
int new_var = 32; /* magic number */
int i;
float *data_ptr;
// initialise the band 'array'
band = 0.0;
// accumulate abs(data) over all time samples and save into band
//data_ptr = C + C_IDX(0,stokes,chan,nchan);
for (i=0;i<nstep;i++) { // time steps
data_ptr = C + C_IDX(p,i,stokes,chan,nstep,nstokes,nchan);
band += fabsf(*data_ptr);
}
// now normalise the coherent beam
//data_ptr = C + C_IDX(0,stokes,chan,nchan);
for (i=0;i<nstep;i++) { // time steps
data_ptr = C + C_IDX(p,i,stokes,chan,nstep,nstokes,nchan);
*data_ptr = (*data_ptr)/( (band/nstep)/new_var );
}
}
void cu_form_beam( uint8_t *data, struct make_beam_opts *opts,
ComplexDouble ****complex_weights_array,
ComplexDouble ****invJi, int file_no,
int npointing, int nstation, int nchan,
int npol, int outpol_coh, double invw,
struct gpu_formbeam_arrays *g,
ComplexDouble ****detected_beam, float *coh, float *incoh,
cudaStream_t *streams, int incoh_check, int nchunk )
/* The CPU version of the beamforming operations, using OpenMP for
* parallelisation.
*
* Inputs:
* data = array of 4bit+4bit complex numbers. For data order, refer to the
* documentation.
* opts = passed option parameters, containing meta information about the
* obs and the data
* W = complex weights array. [npointing][nstation][nchan][npol]
* J = inverse Jones matrix. [nstation][nchan][npol][npol]
* file_no = number of file we are processing, starting at 0.
* nstation = 128
* nchan = 128
* npol = 2 (X,Y)
* outpol_coh = 4 (I,Q,U,V)
* invw = the reciprocal of the sum of the antenna weights
* g = struct containing pointers to various arrays on
* both host and device
*
* Outputs:
* detected_beam = result of beamforming operation, summed over antennas
* [2*nsamples][nchan][npol]
* coh = result in Stokes parameters (minus noise floor)
* [nsamples][nstokes][nchan]
* incoh = result (just Stokes I)
* [nsamples][nchan]
*
* Assumes "coh" and "incoh" contain only zeros.
*/
{
// Setup input values (= populate W and J)
int p, ant, ch, pol, pol2;
int Wi, Ji;
for (p = 0; p < npointing; p++ )
for (ant = 0; ant < nstation ; ant++)
for (ch = 0; ch < nchan ; ch++ )
for (pol = 0; pol < npol ; pol++)
{
Wi = p * (npol*nchan*nstation) +
ant * (npol*nchan) +
ch * (npol) +
pol;
g->W[Wi] = complex_weights_array[p][ant][ch][pol];
if ( p == 0 )
for (pol2 = 0; pol2 < npol; pol2++)
{
Ji = ant * (npol*npol*nchan) +
ch * (npol*npol) +
pol * (npol) +
pol2;
g->J[Ji] = invJi[ant][ch][pol][pol2];
}
}
// Copy the data to the device
gpuErrchk(cudaMemcpyAsync( g->d_W, g->W, g->W_size, cudaMemcpyHostToDevice ));
gpuErrchk(cudaMemcpyAsync( g->d_J, g->J, g->J_size, cudaMemcpyHostToDevice ));
// Divide the gpu calculation into multiple time chunks so there is enough room on the GPU
for (int ichunk = 0; ichunk < nchunk; ichunk++)
{
//int dataoffset = ichunk * g->data_size / sizeof(uint8_t);
gpuErrchk(cudaMemcpyAsync( g->d_data,
data + ichunk * g->data_size / sizeof(uint8_t),
g->data_size, cudaMemcpyHostToDevice ));
// Call the kernels
// samples_chan(index=blockIdx.x size=gridDim.x,
// index=blockIdx.y size=gridDim.y)
// stat_point (index=threadIdx.x size=blockDim.x,
// index=threadIdx.y size=blockDim.y)
//dim3 samples_chan(opts->sample_rate, nchan);
dim3 chan_samples( nchan, opts->sample_rate / nchunk );
dim3 stat( NSTATION );
// convert the data and multiply it by J
invj_the_data<<<chan_samples, stat>>>( g->d_data, g->d_J, g->d_W, g->d_JDx, g->d_JDy,
g->d_Ia, incoh_check );
// Send off a parrellel cuda stream for each pointing
for ( int p = 0; p < npointing; p++ )
{
beamform_kernel<<<chan_samples, stat, 0, streams[p]>>>( g->d_JDx, g->d_JDy,
g->d_W, g->d_Ia, invw,
p, outpol_coh , incoh_check, ichunk*opts->sample_rate/nchunk, nchunk,
g->d_Bd, g->d_coh, g->d_incoh );
gpuErrchk( cudaPeekAtLastError() );
}
}
gpuErrchk( cudaDeviceSynchronize() );
// Flatten the bandpass
if ( incoh_check )
{
flatten_bandpass_I_kernel<<<1, nchan, 0, streams[0]>>>( g->d_incoh,
opts->sample_rate );
gpuErrchk( cudaPeekAtLastError() );
}
// Now do the same for the coherent beam
dim3 chan_stokes(nchan, outpol_coh);
flatten_bandpass_C_kernel<<<npointing, chan_stokes, 0, streams[0]>>>( g->d_coh,
opts->sample_rate );
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// Copy the results back into host memory
gpuErrchk(cudaMemcpyAsync( g->Bd, g->d_Bd, g->Bd_size, cudaMemcpyDeviceToHost ));
gpuErrchk(cudaMemcpyAsync( incoh, g->d_incoh, g->incoh_size, cudaMemcpyDeviceToHost ));
gpuErrchk(cudaMemcpyAsync( coh, g->d_coh, g->coh_size, cudaMemcpyDeviceToHost ));
// Copy the data back from Bd back into the detected_beam array
// Make sure we put it back into the correct half of the array, depending
// on whether this is an even or odd second.
int offset, i;
offset = file_no % 2 * opts->sample_rate;
for ( int p = 0; p < npointing ; p++ )
for ( int s = 0; s < opts->sample_rate; s++ )
for ( int ch = 0; ch < nchan ; ch++ )
for ( int pol = 0; pol < npol ; pol++)
{
i = p * (npol*nchan*opts->sample_rate) +
s * (npol*nchan) +
ch * (npol) +
pol;
detected_beam[p][s+offset][ch][pol] = g->Bd[i];
}
}
void malloc_formbeam( struct gpu_formbeam_arrays *g, unsigned int sample_rate,
int nstation, int nchan, int npol, int nchunk, int outpol_coh,
int outpol_incoh, int npointing, double time )
{
// Calculate array sizes for host and device
g->coh_size = npointing * sample_rate * outpol_coh * nchan * sizeof(float);
g->incoh_size = sample_rate * outpol_incoh * nchan * sizeof(float);
g->data_size = sample_rate * nstation * nchan * npol / nchunk * sizeof(uint8_t);
g->Bd_size = npointing * sample_rate * nchan * npol * sizeof(ComplexDouble);
g->W_size = npointing * nstation * nchan * npol * sizeof(ComplexDouble);
g->J_size = nstation * nchan * npol * npol * sizeof(ComplexDouble);
g->JD_size = sample_rate * nstation * nchan / nchunk * sizeof(ComplexDouble);
// Allocate host memory
//g->W = (ComplexDouble *)malloc( g->W_size );
//g->J = (ComplexDouble *)malloc( g->J_size );
//g->Bd = (ComplexDouble *)malloc( g->Bd_size );
cudaMallocHost( &g->W, g->W_size );
cudaCheckErrors("cudaMallocHost W fail");
cudaMallocHost( &g->J, g->J_size );
cudaCheckErrors("cudaMallocHost J fail");
cudaMallocHost( &g->Bd, g->Bd_size );
cudaCheckErrors("cudaMallocHost Bd fail");
fprintf( stderr, "[%f] coh_size %d MB GPU mem\n", time, g->coh_size /1000000 );
fprintf( stderr, "[%f] incoh_size %d MB GPU mem\n", time, g->incoh_size/1000000 );
fprintf( stderr, "[%f] data_size %d MB GPU mem\n", time, g->data_size /1000000 );
fprintf( stderr, "[%f] Bd_size %d MB GPU mem\n", time, g->Bd_size /1000000 );
fprintf( stderr, "[%f] W_size %d MB GPU mem\n", time, g->W_size /1000000 );
fprintf( stderr, "[%f] J_size %d MB GPU mem\n", time, g->J_size /1000000 );
fprintf( stderr, "[%f] JD_size %d MB GPU mem\n", time, g->JD_size*3 /1000000 );
int GPU_mem = (g->W_size + g->J_size + g->Bd_size + g->data_size +
g->coh_size + g->incoh_size + 3*g->JD_size) /1000000000;
fprintf( stderr, "[%f] %d GB GPU memory allocated\n", time, GPU_mem );
// Allocate device memory
gpuErrchk(cudaMalloc( (void **)&g->d_W, g->W_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_J, g->J_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_JDx, g->JD_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_JDy, g->JD_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_Ia, g->JD_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_Bd, g->Bd_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_data, g->data_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_coh, g->coh_size ));
gpuErrchk(cudaMalloc( (void **)&g->d_incoh, g->incoh_size ));
}
void free_formbeam( struct gpu_formbeam_arrays *g )
{
// Free memory on host and device
cudaFreeHost( g->W );
cudaFreeHost( g->J );
cudaFreeHost( g->Bd );
cudaFree( g->d_W );
cudaFree( g->d_J );
cudaFree( g->d_Bd );
cudaFree( g->d_data );
cudaFree( g->d_coh );
cudaFree( g->d_incoh );
}
float *create_pinned_data_buffer_psrfits( size_t size )
{
float *ptr;
cudaMallocHost( &ptr, size * sizeof(float) );
//cudaError_t status = cudaHostRegister((void**)&ptr, size * sizeof(float),
// cudaHostRegisterPortable );
cudaCheckErrors("cudaMallocHost data_buffer_psrfits fail");
return ptr;
}
float *create_pinned_data_buffer_vdif( size_t size )
{
float *ptr;
cudaMallocHost( &ptr, size * sizeof(float) );
//cudaError_t status = cudaHostRegister((void**)&ptr, size * sizeof(float),
// cudaHostRegisterPortable );
cudaCheckErrors("cudaMallocHost data_buffer_vdif fail");
return ptr;
}
void populate_weights_johnes( struct gpu_formbeam_arrays *g,
ComplexDouble ****complex_weights_array,
ComplexDouble *****invJi,
int npointing, int nstation, int nchan, int npol )
{
// Setup input values (= populate W and J)
int p, ant, ch, pol, pol2;
int Wi, Ji;
for (p = 0; p < npointing; p++ )
for (ant = 0; ant < nstation ; ant++)
for (ch = 0; ch < nchan ; ch++ )
for (pol = 0; pol < npol ; pol++)
{
Wi = p * (npol*nchan*nstation) +
ant * (npol*nchan) +
ch * (npol) +
pol;
g->W[Wi] = complex_weights_array[p][ant][ch][pol];
for (pol2 = 0; pol2 < npol; pol2++)
{
Ji = Wi*npol + pol2;
g->J[Ji] = invJi[p][ant][ch][pol][pol2];
}
}
// Copy the data to the device
gpuErrchk(cudaMemcpy( g->d_W, g->W, g->W_size, cudaMemcpyHostToDevice ));
gpuErrchk(cudaMemcpy( g->d_J, g->J, g->J_size, cudaMemcpyHostToDevice ));
}
|
359d031c8c88ea3e06e37bc96cbe5539f5dfb2c4.hip | // !!! This is a file automatically generated by hipify!!!
// System includes
#include <stdio.h>
#include <assert.h>
#include <float.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <EasyBMP.h>
#include <EasyBMP.cpp>
#define BLOCK_DIM 16
typedef struct {
float avgerages[9];
float dispersions[9];
} Pair;
/* The device kernel, takes as input the noisy image
* and outputs the filtered image
*/
template<int BLOCK_SIZE> __global__ void rotatingMaskCUDA(Pair * filtered_r, Pair * filtered_g, Pair * filtered_b, Pair * filtered_a,
unsigned char * img_r, unsigned char * img_g, unsigned char * img_b, unsigned char * img_a,
int rows, int cols) {
__shared__ unsigned char input_img_r[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned char input_img_g[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned char input_img_b[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned char input_img_a[BLOCK_SIZE][BLOCK_SIZE];
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * BLOCK_SIZE + ty;
int col = bx * BLOCK_SIZE + tx;
/* Overlapping the tiles */
row -= 2 * by;
col -= 2 * bx;
if (row < rows && col < cols) {
input_img_r[ty][tx] = img_r[cols * row + col];
input_img_g[ty][tx] = img_g[cols * row + col];
input_img_b[ty][tx] = img_b[cols * row + col];
input_img_a[ty][tx] = img_a[cols * row + col];
}
__syncthreads();
if (row < rows && col < cols) {
float tmp_c = cols;
float tmp_r = rows;
int numberOfBlocksx = (int) ceil(tmp_c / (BLOCK_SIZE - 2));
int numberOfBlocksy = (int) ceil(tmp_r / (BLOCK_SIZE - 2));
// Check if this pixel should compute the average and the dispersion
if ((bx < numberOfBlocksx - 1
|| (bx == numberOfBlocksx - 1
&& (tx < cols - bx * (BLOCK_SIZE - 2) - 2)))
&& (by < numberOfBlocksy - 1
|| (by == numberOfBlocksy - 1
&& (ty < rows - by * (BLOCK_SIZE - 2) - 2)))) {
if (tx < BLOCK_SIZE - 2 && ty < BLOCK_SIZE - 2) {
//---------------------RED CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
float sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_r[tmp_row][tmp_col];
}
}
float average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
float dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_r[tmp_row][tmp_col] - average)
* (input_img_r[tmp_row][tmp_col] - average);
}
}
// dispersion /= 9;
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
int index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_r[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_r[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
//---------------------GREEN CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_g[tmp_row][tmp_col];
}
}
average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_g[tmp_row][tmp_col] - average)
* (input_img_g[tmp_row][tmp_col] - average);
}
}
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_g[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_g[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
//---------------------BLUE CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_b[tmp_row][tmp_col];
}
}
average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_b[tmp_row][tmp_col] - average)
* (input_img_b[tmp_row][tmp_col] - average);
}
}
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_b[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_b[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
//---------------------Alpha CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_a[tmp_row][tmp_col];
}
}
average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_a[tmp_row][tmp_col] - average)
* (input_img_a[tmp_row][tmp_col] - average);
}
}
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_a[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_a[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
}
}
}
}
template<int BLOCK_SIZE> __global__ void getArrayMin(unsigned char * output_img_r, unsigned char * output_img_g, unsigned char * output_img_b, unsigned char * output_img_a,
Pair * input_img_r, Pair * input_img_g, Pair * input_img_b, Pair * input_img_a, int rows, int cols) {
/* Calculate the index of the 2d array */
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * BLOCK_SIZE + ty;
int col = bx * BLOCK_SIZE + tx;
float min = FLT_MAX;
int min_index = 0;
float *dispersions = input_img_r[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_r[col + row * cols] = input_img_r[col + row * cols].avgerages[min_index];
min = FLT_MAX;
min_index = 0;
dispersions = input_img_g[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_g[col + row * cols] = input_img_g[col + row * cols].avgerages[min_index];
min = FLT_MAX;
min_index = 0;
dispersions = input_img_b[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_b[col + row * cols] = input_img_b[col + row * cols].avgerages[min_index];
min = FLT_MAX;
min_index = 0;
dispersions = input_img_a[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_a[col + row * cols] = input_img_a[col + row * cols].avgerages[min_index];
}
void init(BMP* imgOut, unsigned char * img_r, unsigned char * img_g, unsigned char * img_b, unsigned char * img_a, int rows, int cols) {
// Device input image and filtered image
unsigned char *d_img_r, *d_img_g, *d_img_b, *d_img_a;
unsigned char *d_filtered_r, *d_filtered_g, *d_filtered_b, *d_filtered_a;
unsigned char *filtered_r, *filtered_g, *filtered_b, *filtered_a;
filtered_r = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
filtered_g = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
filtered_b = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
filtered_a = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
// The temporary matrix holding the averages
// and dispersions for all 9 mask positions
Pair *d_tmp_r, *d_tmp_g, *d_tmp_b, *d_tmp_a;
// Allocate and copy input image to device
int size = rows * cols * sizeof(unsigned char);
hipMalloc((void**) &d_img_r, size);
hipMemcpy(d_img_r, img_r, size, hipMemcpyHostToDevice);
hipMalloc((void**) &d_img_g, size);
hipMemcpy(d_img_g, img_g, size, hipMemcpyHostToDevice);
hipMalloc((void**) &d_img_b, size);
hipMemcpy(d_img_b, img_b, size, hipMemcpyHostToDevice);
hipMalloc((void**) &d_img_a, size);
hipMemcpy(d_img_a, img_a, size, hipMemcpyHostToDevice);
// Allocate memory for output image
hipMalloc((void**) &d_filtered_r, size);
hipMalloc((void**) &d_filtered_g, size);
hipMalloc((void**) &d_filtered_b, size);
hipMalloc((void**) &d_filtered_a, size);
// Allocate memory for tmp matrix
int size_pair = rows * cols * sizeof(Pair);
hipMalloc((void**) &d_tmp_r, size_pair);
hipMalloc((void**) &d_tmp_g, size_pair);
hipMalloc((void**) &d_tmp_b, size_pair);
hipMalloc((void**) &d_tmp_a, size_pair);
// Define grid and block dimensions
dim3 block(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid((int) ceil((cols * 1.0) / (BLOCK_DIM - 2)),
(int) ceil((rows * 1.0) / (BLOCK_DIM - 2)), 1);
// Kernel invocations
hipLaunchKernelGGL(( rotatingMaskCUDA<BLOCK_DIM>) , dim3(grid), dim3(block), 0, 0, d_tmp_r, d_tmp_g, d_tmp_b, d_tmp_a,
d_img_r, d_img_g, d_img_b, d_img_a,
rows, cols);
dim3 grid2((int) ceil((cols * 1.0) / BLOCK_DIM),
(int) ceil((rows * 1.0) / BLOCK_DIM), 1);
hipLaunchKernelGGL(( getArrayMin<BLOCK_DIM>) , dim3(grid2), dim3(block), 0, 0, d_filtered_r, d_filtered_g, d_filtered_b, d_filtered_a,
d_tmp_r, d_tmp_g, d_tmp_b, d_tmp_a,
rows, cols);
// Copy the filtered image to the host memory
hipMemcpy(filtered_r, d_filtered_r, size, hipMemcpyDeviceToHost);
hipMemcpy(filtered_g, d_filtered_g, size, hipMemcpyDeviceToHost);
hipMemcpy(filtered_b, d_filtered_b, size, hipMemcpyDeviceToHost);
hipMemcpy(filtered_a, d_filtered_a, size, hipMemcpyDeviceToHost);
// Free allocated memory
hipFree(d_img_r); hipFree(d_img_g); hipFree(d_img_b); hipFree(d_img_a);
hipFree(d_tmp_r); hipFree(d_tmp_g); hipFree(d_tmp_b); hipFree(d_tmp_a);
hipFree(d_filtered_r); hipFree(d_filtered_g); hipFree(d_filtered_b); hipFree(d_filtered_a);
(*imgOut).fromPixelArrays(filtered_r, filtered_g, filtered_b, filtered_a,
cols, rows);
}
void checkCuda(int argc, char **argv) {
printf(
"[Rotating mask technique for image filtering Using CUDA] - Starting...\n");
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **) argv, "device")) {
devID = getCmdLineArgumentInt(argc, (const char **) argv, "device");
hipSetDevice(devID);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&devID);
if (error != hipSuccess) {
printf("hipGetDevice returned error code %d, line(%d)\n", error,
__LINE__);
}
error = hipGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == hipComputeModeProhibited) {
fprintf(stderr,
"Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess) {
printf("hipGetDeviceProperties returned error code %d, line(%d)\n",
error, __LINE__);
} else {
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID,
deviceProp.name, deviceProp.major, deviceProp.minor);
}
}
int main(int argc, char **argv) {
/* Check if CUDA is available */
checkCuda(argc, argv);
BMP imgIn, imgOut;
imgIn.ReadFromFile("../../test images/lena_noise.bmp");
int width = imgIn.TellWidth();
int height = imgIn.TellHeight();
unsigned char *pixelsIn_r, *pixelsIn_g, *pixelsIn_b, *pixelsIn_a;
// read the 4 channels R, G, B and A from the BMP object
pixelsIn_r = imgIn.getPixelArray(Red);
pixelsIn_g = imgIn.getPixelArray(Green);
pixelsIn_b = imgIn.getPixelArray(Blue);
pixelsIn_a = imgIn.getPixelArray(Alpha);
/************************************** Timing **************************************/
// hipError_t error;
// // Allocate CUDA events that we'll use for timing
// hipEvent_t start;
// error = hipEventCreate(&start);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// hipEvent_t stop;
// error = hipEventCreate(&stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Record the start event
// error = hipEventRecord(start, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
/************************************** Timing **************************************/
// compute the corresponding 4 channels after performing filtering
init(&imgOut, pixelsIn_r, pixelsIn_g, pixelsIn_b, pixelsIn_a, height, width);
/************************************** Timing **************************************/
// // Record the stop event
// error = hipEventRecord(stop, NULL);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Wait for the stop event to complete
// error = hipEventSynchronize(stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// float msecTotal = 0.0f;
// error = hipEventElapsedTime(&msecTotal, start, stop);
// if (error != hipSuccess)
// {
// fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
// exit(EXIT_FAILURE);
// }
/************************************** Timing **************************************/
// write the computed channels to a bmp image file
imgOut.WriteToFile("../../output images/lena_noise_filtered.bmp");
return 0;
} | 359d031c8c88ea3e06e37bc96cbe5539f5dfb2c4.cu | // System includes
#include <stdio.h>
#include <assert.h>
#include <float.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <EasyBMP.h>
#include <EasyBMP.cpp>
#define BLOCK_DIM 16
typedef struct {
float avgerages[9];
float dispersions[9];
} Pair;
/* The device kernel, takes as input the noisy image
* and outputs the filtered image
*/
template<int BLOCK_SIZE> __global__ void rotatingMaskCUDA(Pair * filtered_r, Pair * filtered_g, Pair * filtered_b, Pair * filtered_a,
unsigned char * img_r, unsigned char * img_g, unsigned char * img_b, unsigned char * img_a,
int rows, int cols) {
__shared__ unsigned char input_img_r[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned char input_img_g[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned char input_img_b[BLOCK_SIZE][BLOCK_SIZE];
__shared__ unsigned char input_img_a[BLOCK_SIZE][BLOCK_SIZE];
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * BLOCK_SIZE + ty;
int col = bx * BLOCK_SIZE + tx;
/* Overlapping the tiles */
row -= 2 * by;
col -= 2 * bx;
if (row < rows && col < cols) {
input_img_r[ty][tx] = img_r[cols * row + col];
input_img_g[ty][tx] = img_g[cols * row + col];
input_img_b[ty][tx] = img_b[cols * row + col];
input_img_a[ty][tx] = img_a[cols * row + col];
}
__syncthreads();
if (row < rows && col < cols) {
float tmp_c = cols;
float tmp_r = rows;
int numberOfBlocksx = (int) ceil(tmp_c / (BLOCK_SIZE - 2));
int numberOfBlocksy = (int) ceil(tmp_r / (BLOCK_SIZE - 2));
// Check if this pixel should compute the average and the dispersion
if ((bx < numberOfBlocksx - 1
|| (bx == numberOfBlocksx - 1
&& (tx < cols - bx * (BLOCK_SIZE - 2) - 2)))
&& (by < numberOfBlocksy - 1
|| (by == numberOfBlocksy - 1
&& (ty < rows - by * (BLOCK_SIZE - 2) - 2)))) {
if (tx < BLOCK_SIZE - 2 && ty < BLOCK_SIZE - 2) {
//---------------------RED CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
float sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_r[tmp_row][tmp_col];
}
}
float average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
float dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_r[tmp_row][tmp_col] - average)
* (input_img_r[tmp_row][tmp_col] - average);
}
}
// dispersion /= 9;
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
int index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_r[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_r[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
//---------------------GREEN CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_g[tmp_row][tmp_col];
}
}
average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_g[tmp_row][tmp_col] - average)
* (input_img_g[tmp_row][tmp_col] - average);
}
}
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_g[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_g[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
//---------------------BLUE CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_b[tmp_row][tmp_col];
}
}
average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_b[tmp_row][tmp_col] - average)
* (input_img_b[tmp_row][tmp_col] - average);
}
}
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_b[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_b[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
//---------------------Alpha CHANNEL---------------------
/* Calculate the average for the mask
* with the current pixel positioned at
* the upper-left corner */
sum = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
sum += input_img_a[tmp_row][tmp_col];
}
}
average = sum / 9;
/* Calculate the dispersion for the mask
* with the current pixel positioned at
* the upper-left corner */
dispersion = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = tx + j;
int tmp_row = ty + i;
dispersion += (input_img_a[tmp_row][tmp_col] - average)
* (input_img_a[tmp_row][tmp_col] - average);
}
}
/* Assign the value of the calculated mask to each pixel
* i.e. the current mask will be added to index 0
* of the Upper left pixel, and index 1 of the
* Upper left-but-one pixel, and so on.
*/
index = 0;
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
int tmp_col = col + j;
int tmp_row = row + i;
filtered_a[tmp_col + tmp_row * cols].avgerages[index] =
average;
filtered_a[tmp_col + tmp_row * cols].dispersions[index] =
dispersion;
index++;
}
}
}
}
}
}
template<int BLOCK_SIZE> __global__ void getArrayMin(unsigned char * output_img_r, unsigned char * output_img_g, unsigned char * output_img_b, unsigned char * output_img_a,
Pair * input_img_r, Pair * input_img_g, Pair * input_img_b, Pair * input_img_a, int rows, int cols) {
/* Calculate the index of the 2d array */
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * BLOCK_SIZE + ty;
int col = bx * BLOCK_SIZE + tx;
float min = FLT_MAX;
int min_index = 0;
float *dispersions = input_img_r[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_r[col + row * cols] = input_img_r[col + row * cols].avgerages[min_index];
min = FLT_MAX;
min_index = 0;
dispersions = input_img_g[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_g[col + row * cols] = input_img_g[col + row * cols].avgerages[min_index];
min = FLT_MAX;
min_index = 0;
dispersions = input_img_b[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_b[col + row * cols] = input_img_b[col + row * cols].avgerages[min_index];
min = FLT_MAX;
min_index = 0;
dispersions = input_img_a[col + row * cols].dispersions;
for (int i = 0; i < 9; i++) {
float tmp = dispersions[i];
if (tmp < min && tmp >= 0) {
min = tmp;
min_index = i;
}
}
output_img_a[col + row * cols] = input_img_a[col + row * cols].avgerages[min_index];
}
void init(BMP* imgOut, unsigned char * img_r, unsigned char * img_g, unsigned char * img_b, unsigned char * img_a, int rows, int cols) {
// Device input image and filtered image
unsigned char *d_img_r, *d_img_g, *d_img_b, *d_img_a;
unsigned char *d_filtered_r, *d_filtered_g, *d_filtered_b, *d_filtered_a;
unsigned char *filtered_r, *filtered_g, *filtered_b, *filtered_a;
filtered_r = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
filtered_g = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
filtered_b = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
filtered_a = (unsigned char *) malloc(sizeof(unsigned char) * cols * rows);
// The temporary matrix holding the averages
// and dispersions for all 9 mask positions
Pair *d_tmp_r, *d_tmp_g, *d_tmp_b, *d_tmp_a;
// Allocate and copy input image to device
int size = rows * cols * sizeof(unsigned char);
cudaMalloc((void**) &d_img_r, size);
cudaMemcpy(d_img_r, img_r, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_img_g, size);
cudaMemcpy(d_img_g, img_g, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_img_b, size);
cudaMemcpy(d_img_b, img_b, size, cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_img_a, size);
cudaMemcpy(d_img_a, img_a, size, cudaMemcpyHostToDevice);
// Allocate memory for output image
cudaMalloc((void**) &d_filtered_r, size);
cudaMalloc((void**) &d_filtered_g, size);
cudaMalloc((void**) &d_filtered_b, size);
cudaMalloc((void**) &d_filtered_a, size);
// Allocate memory for tmp matrix
int size_pair = rows * cols * sizeof(Pair);
cudaMalloc((void**) &d_tmp_r, size_pair);
cudaMalloc((void**) &d_tmp_g, size_pair);
cudaMalloc((void**) &d_tmp_b, size_pair);
cudaMalloc((void**) &d_tmp_a, size_pair);
// Define grid and block dimensions
dim3 block(BLOCK_DIM, BLOCK_DIM, 1);
dim3 grid((int) ceil((cols * 1.0) / (BLOCK_DIM - 2)),
(int) ceil((rows * 1.0) / (BLOCK_DIM - 2)), 1);
// Kernel invocations
rotatingMaskCUDA<BLOCK_DIM> <<<grid, block>>>(d_tmp_r, d_tmp_g, d_tmp_b, d_tmp_a,
d_img_r, d_img_g, d_img_b, d_img_a,
rows, cols);
dim3 grid2((int) ceil((cols * 1.0) / BLOCK_DIM),
(int) ceil((rows * 1.0) / BLOCK_DIM), 1);
getArrayMin<BLOCK_DIM> <<<grid2, block>>>(d_filtered_r, d_filtered_g, d_filtered_b, d_filtered_a,
d_tmp_r, d_tmp_g, d_tmp_b, d_tmp_a,
rows, cols);
// Copy the filtered image to the host memory
cudaMemcpy(filtered_r, d_filtered_r, size, cudaMemcpyDeviceToHost);
cudaMemcpy(filtered_g, d_filtered_g, size, cudaMemcpyDeviceToHost);
cudaMemcpy(filtered_b, d_filtered_b, size, cudaMemcpyDeviceToHost);
cudaMemcpy(filtered_a, d_filtered_a, size, cudaMemcpyDeviceToHost);
// Free allocated memory
cudaFree(d_img_r); cudaFree(d_img_g); cudaFree(d_img_b); cudaFree(d_img_a);
cudaFree(d_tmp_r); cudaFree(d_tmp_g); cudaFree(d_tmp_b); cudaFree(d_tmp_a);
cudaFree(d_filtered_r); cudaFree(d_filtered_g); cudaFree(d_filtered_b); cudaFree(d_filtered_a);
(*imgOut).fromPixelArrays(filtered_r, filtered_g, filtered_b, filtered_a,
cols, rows);
}
void checkCuda(int argc, char **argv) {
printf(
"[Rotating mask technique for image filtering Using CUDA] - Starting...\n");
// By default, we use device 0, otherwise we override the device ID based on what is provided at the command line
int devID = 0;
if (checkCmdLineFlag(argc, (const char **) argv, "device")) {
devID = getCmdLineArgumentInt(argc, (const char **) argv, "device");
cudaSetDevice(devID);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
if (error != cudaSuccess) {
printf("cudaGetDevice returned error code %d, line(%d)\n", error,
__LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited) {
fprintf(stderr,
"Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess) {
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n",
error, __LINE__);
} else {
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID,
deviceProp.name, deviceProp.major, deviceProp.minor);
}
}
int main(int argc, char **argv) {
/* Check if CUDA is available */
checkCuda(argc, argv);
BMP imgIn, imgOut;
imgIn.ReadFromFile("../../test images/lena_noise.bmp");
int width = imgIn.TellWidth();
int height = imgIn.TellHeight();
unsigned char *pixelsIn_r, *pixelsIn_g, *pixelsIn_b, *pixelsIn_a;
// read the 4 channels R, G, B and A from the BMP object
pixelsIn_r = imgIn.getPixelArray(Red);
pixelsIn_g = imgIn.getPixelArray(Green);
pixelsIn_b = imgIn.getPixelArray(Blue);
pixelsIn_a = imgIn.getPixelArray(Alpha);
/************************************** Timing **************************************/
// cudaError_t error;
// // Allocate CUDA events that we'll use for timing
// cudaEvent_t start;
// error = cudaEventCreate(&start);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// cudaEvent_t stop;
// error = cudaEventCreate(&stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Record the start event
// error = cudaEventRecord(start, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
/************************************** Timing **************************************/
// compute the corresponding 4 channels after performing filtering
init(&imgOut, pixelsIn_r, pixelsIn_g, pixelsIn_b, pixelsIn_a, height, width);
/************************************** Timing **************************************/
// // Record the stop event
// error = cudaEventRecord(stop, NULL);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// // Wait for the stop event to complete
// error = cudaEventSynchronize(stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
// float msecTotal = 0.0f;
// error = cudaEventElapsedTime(&msecTotal, start, stop);
// if (error != cudaSuccess)
// {
// fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
// exit(EXIT_FAILURE);
// }
/************************************** Timing **************************************/
// write the computed channels to a bmp image file
imgOut.WriteToFile("../../output images/lena_noise_filtered.bmp");
return 0;
} |
40ab73c998aecbe8af72386727e0eec6eea5e4cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_powx.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL b = 1;
REAL *c = NULL;
hipMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_powx), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,c,offset_c,ld_c);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_powx), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_powx), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 40ab73c998aecbe8af72386727e0eec6eea5e4cd.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_powx.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
const REAL b = 1;
REAL *c = NULL;
cudaMalloc(&c, XSIZE*YSIZE);
const int offset_c = 1;
const int ld_c = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_powx<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,c,offset_c,ld_c);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_powx<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,c,offset_c,ld_c);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_powx<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,c,offset_c,ld_c);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6355ba7ade2c8cb917e270a6503d0f8278753d77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/affine_grid.hpp>
#include <nbla/cuda/utils/nd_index.cuh>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, bool align_corners>
__global__ void kernel_generate_target_grid_2d(const Size_t isize, T *grid,
int3 shape, int2 stride, int B) {
NBLA_CUDA_KERNEL_LOOP(idx, isize) {
auto H = shape.x;
auto W = shape.y;
auto nd_index = device_flat_to_3d(idx, stride);
auto h = nd_index.x;
auto w = nd_index.y;
auto v = nd_index.z;
for (auto b = 0; b < B; b++) {
auto bidx = idx + b * isize;
// [-1, 1] <--> [0, S - 1] if align_corner
// [-1, 1] <--> [-0.5, S - 0.5] = [0 - 0.5, S - 1 + 0.5] if not
// align_corner
// Num. of v = 3, corresponding to (x, y, 1)
if (v == 0) {
auto x = T(2.0) * w / (W - 1) - T(1.0);
x = align_corners ? x : x * (T(W - 1) / T(W));
grid[bidx] = x;
} else if (v == 1) {
auto y = T(2.0) * h / (H - 1) - T(1.0);
y = align_corners ? y : y * (T(H - 1) / T(H));
grid[bidx] = y;
} else {
grid[bidx] = T(1);
}
}
}
}
template <typename T, bool align_corners>
__global__ void kernel_generate_target_grid_3d(const Size_t isize, T *grid,
int4 shape, int3 stride, int B) {
NBLA_CUDA_KERNEL_LOOP(idx, isize) {
auto D = shape.x;
auto H = shape.y;
auto W = shape.z;
auto nd_index = device_flat_to_4d(idx, stride);
auto d = nd_index.x;
auto h = nd_index.y;
auto w = nd_index.z;
auto v = nd_index.w;
for (auto b = 0; b < B; b++) {
auto bidx = idx + b * isize;
// [-1, 1] <--> [0, S - 1] if align_corner
// [-1, 1] <--> [-0.5, S - 0.5] = [0 - 0.5, S - 1 + 0.5] if not
// align_corner
// Num. of v = 3, corresponding to (x, y, 1)
if (v == 0) {
auto x = T(2.0) * w / (W - 1) - T(1.0);
x = align_corners ? x : x * (T(W - 1) / T(W));
grid[bidx] = x;
} else if (v == 1) {
auto y = T(2.0) * h / (H - 1) - T(1.0);
y = align_corners ? y : y * (T(H - 1) / T(H));
grid[bidx] = y;
} else if (v == 2) {
auto z = T(2.0) * d / (D - 1) - T(1.0);
z = align_corners ? z : z * (T(D - 1) / T(D));
grid[bidx] = z;
} else {
grid[bidx] = T(1);
}
}
}
}
template <typename T>
void AffineGridCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
AffineGrid<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
}
template <typename T>
void AffineGridCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto affine = inputs[0];
auto grid_s = outputs[0];
if (this->size_.size() == 2) {
// Target grid (with 1 for the translation)
auto B = affine->shape()[0];
auto H = this->size_[0];
auto W = this->size_[1];
Variable grid_t(Shape_t{B, H, W, 3});
auto isize = H * W * 3;
auto shape = make_int3(H, W, 3);
auto stride = make_int2(W * 3, 3);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_2d<Tcu, true>
: kernel_generate_target_grid_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
// Transform: (B, H, W, 3) @ (B, 2, 3) --> (B, H, W, 2)
grid_t.reshape(Shape_t{B, H * W, 3}, false);
grid_s->reshape(Shape_t{B, H * W, 2}, false);
execute(this->batch_matmul_, Variables{&grid_t, affine}, Variables{grid_s});
grid_s->reshape(Shape_t{B, H, W, 2}, false);
} else if (this->size_.size() == 3) {
// Target grid (with 1 for the translation)
auto B = affine->shape()[0];
auto D = this->size_[0];
auto H = this->size_[1];
auto W = this->size_[2];
Variable grid_t(Shape_t{B, D, H, W, 4});
auto isize = D * H * W * 4;
auto shape = make_int4(D, H, W, 4);
auto stride = make_int3(H * W * 4, W * 4, 4);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_3d<Tcu, true>
: kernel_generate_target_grid_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
/// Transform: (B, D, H, W, 4) @ (B, 3, 4) --> (B, D, H, W, 3)
grid_t.reshape(Shape_t{B, D * H * W, 4}, false);
grid_s->reshape(Shape_t{B, D * H * W, 3}, false);
execute(this->batch_matmul_, Variables{&grid_t, affine}, Variables{grid_s});
grid_s->reshape(Shape_t{B, D, H, W, 3}, false);
}
}
template <typename T>
void AffineGridCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
// Gradient of outputs
auto affine = inputs[0];
auto B = affine->shape()[0];
auto grid_s = outputs[0];
if (this->size_.size() == 2) {
// Target grid with 1 for the translation
auto B = affine->shape()[0];
auto H = this->size_[0];
auto W = this->size_[1];
Variable grid_t(Shape_t{B, H, W, 3});
auto isize = H * W * 3;
auto shape = make_int3(H, W, 3);
auto stride = make_int2(W * 3, 3);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_2d<Tcu, true>
: kernel_generate_target_grid_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
// Backward of the transformation: (B, H, W, 2) @ (B, 2, 3) --> (B, H, W, 2)
grid_t.reshape(Shape_t{B, H * W, 3}, false);
grid_s->reshape(Shape_t{B, H * W, 2}, false);
nbla::backward(this->batch_matmul_, Variables{&grid_t, affine},
Variables{grid_s}, vector<bool>{false, propagate_down[0]},
vector<bool>{false, accum[0]});
grid_s->reshape(Shape_t{B, H, W, 2}, false);
} else if (this->size_.size() == 3) {
auto B = affine->shape()[0];
auto D = this->size_[0];
auto H = this->size_[1];
auto W = this->size_[2];
Variable grid_t(Shape_t{B, D, H, W, 4});
auto isize = D * H * W * 4;
auto shape = make_int4(D, H, W, 4);
auto stride = make_int3(H * W * 4, W * 4, 4);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_3d<Tcu, true>
: kernel_generate_target_grid_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
// Backward of the transformation: (B, D, H, W, 4) @ (B, 3, 4) --> (B, D, H,
// W, 3)
grid_t.reshape(Shape_t{B, D * H * W, 4}, false);
grid_s->reshape(Shape_t{B, D * H * W, 3}, false);
nbla::backward(this->batch_matmul_, Variables{&grid_t, affine},
Variables{grid_s}, vector<bool>{false, propagate_down[0]},
vector<bool>{false, accum[0]});
grid_s->reshape(Shape_t{B, D, H, W, 3}, false);
}
}
} // namespace nbla
| 6355ba7ade2c8cb917e270a6503d0f8278753d77.cu | // Copyright 2020,2021 Sony Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <nbla/array.hpp>
#include <nbla/cuda/common.hpp>
#include <nbla/cuda/function/affine_grid.hpp>
#include <nbla/cuda/utils/nd_index.cuh>
#include <nbla/variable.hpp>
namespace nbla {
template <typename T, bool align_corners>
__global__ void kernel_generate_target_grid_2d(const Size_t isize, T *grid,
int3 shape, int2 stride, int B) {
NBLA_CUDA_KERNEL_LOOP(idx, isize) {
auto H = shape.x;
auto W = shape.y;
auto nd_index = device_flat_to_3d(idx, stride);
auto h = nd_index.x;
auto w = nd_index.y;
auto v = nd_index.z;
for (auto b = 0; b < B; b++) {
auto bidx = idx + b * isize;
// [-1, 1] <--> [0, S - 1] if align_corner
// [-1, 1] <--> [-0.5, S - 0.5] = [0 - 0.5, S - 1 + 0.5] if not
// align_corner
// Num. of v = 3, corresponding to (x, y, 1)
if (v == 0) {
auto x = T(2.0) * w / (W - 1) - T(1.0);
x = align_corners ? x : x * (T(W - 1) / T(W));
grid[bidx] = x;
} else if (v == 1) {
auto y = T(2.0) * h / (H - 1) - T(1.0);
y = align_corners ? y : y * (T(H - 1) / T(H));
grid[bidx] = y;
} else {
grid[bidx] = T(1);
}
}
}
}
template <typename T, bool align_corners>
__global__ void kernel_generate_target_grid_3d(const Size_t isize, T *grid,
int4 shape, int3 stride, int B) {
NBLA_CUDA_KERNEL_LOOP(idx, isize) {
auto D = shape.x;
auto H = shape.y;
auto W = shape.z;
auto nd_index = device_flat_to_4d(idx, stride);
auto d = nd_index.x;
auto h = nd_index.y;
auto w = nd_index.z;
auto v = nd_index.w;
for (auto b = 0; b < B; b++) {
auto bidx = idx + b * isize;
// [-1, 1] <--> [0, S - 1] if align_corner
// [-1, 1] <--> [-0.5, S - 0.5] = [0 - 0.5, S - 1 + 0.5] if not
// align_corner
// Num. of v = 3, corresponding to (x, y, 1)
if (v == 0) {
auto x = T(2.0) * w / (W - 1) - T(1.0);
x = align_corners ? x : x * (T(W - 1) / T(W));
grid[bidx] = x;
} else if (v == 1) {
auto y = T(2.0) * h / (H - 1) - T(1.0);
y = align_corners ? y : y * (T(H - 1) / T(H));
grid[bidx] = y;
} else if (v == 2) {
auto z = T(2.0) * d / (D - 1) - T(1.0);
z = align_corners ? z : z * (T(D - 1) / T(D));
grid[bidx] = z;
} else {
grid[bidx] = T(1);
}
}
}
}
template <typename T>
void AffineGridCuda<T>::setup_impl(const Variables &inputs,
const Variables &outputs) {
AffineGrid<T>::setup_impl(inputs, outputs);
cuda_set_device(this->device_);
}
template <typename T>
void AffineGridCuda<T>::forward_impl(const Variables &inputs,
const Variables &outputs) {
cuda_set_device(this->device_);
auto affine = inputs[0];
auto grid_s = outputs[0];
if (this->size_.size() == 2) {
// Target grid (with 1 for the translation)
auto B = affine->shape()[0];
auto H = this->size_[0];
auto W = this->size_[1];
Variable grid_t(Shape_t{B, H, W, 3});
auto isize = H * W * 3;
auto shape = make_int3(H, W, 3);
auto stride = make_int2(W * 3, 3);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_2d<Tcu, true>
: kernel_generate_target_grid_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
// Transform: (B, H, W, 3) @ (B, 2, 3) --> (B, H, W, 2)
grid_t.reshape(Shape_t{B, H * W, 3}, false);
grid_s->reshape(Shape_t{B, H * W, 2}, false);
execute(this->batch_matmul_, Variables{&grid_t, affine}, Variables{grid_s});
grid_s->reshape(Shape_t{B, H, W, 2}, false);
} else if (this->size_.size() == 3) {
// Target grid (with 1 for the translation)
auto B = affine->shape()[0];
auto D = this->size_[0];
auto H = this->size_[1];
auto W = this->size_[2];
Variable grid_t(Shape_t{B, D, H, W, 4});
auto isize = D * H * W * 4;
auto shape = make_int4(D, H, W, 4);
auto stride = make_int3(H * W * 4, W * 4, 4);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_3d<Tcu, true>
: kernel_generate_target_grid_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
/// Transform: (B, D, H, W, 4) @ (B, 3, 4) --> (B, D, H, W, 3)
grid_t.reshape(Shape_t{B, D * H * W, 4}, false);
grid_s->reshape(Shape_t{B, D * H * W, 3}, false);
execute(this->batch_matmul_, Variables{&grid_t, affine}, Variables{grid_s});
grid_s->reshape(Shape_t{B, D, H, W, 3}, false);
}
}
template <typename T>
void AffineGridCuda<T>::backward_impl(const Variables &inputs,
const Variables &outputs,
const vector<bool> &propagate_down,
const vector<bool> &accum) {
if (!(propagate_down[0])) {
return;
}
// Gradient of outputs
auto affine = inputs[0];
auto B = affine->shape()[0];
auto grid_s = outputs[0];
if (this->size_.size() == 2) {
// Target grid with 1 for the translation
auto B = affine->shape()[0];
auto H = this->size_[0];
auto W = this->size_[1];
Variable grid_t(Shape_t{B, H, W, 3});
auto isize = H * W * 3;
auto shape = make_int3(H, W, 3);
auto stride = make_int2(W * 3, 3);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_2d<Tcu, true>
: kernel_generate_target_grid_2d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
// Backward of the transformation: (B, H, W, 2) @ (B, 2, 3) --> (B, H, W, 2)
grid_t.reshape(Shape_t{B, H * W, 3}, false);
grid_s->reshape(Shape_t{B, H * W, 2}, false);
nbla::backward(this->batch_matmul_, Variables{&grid_t, affine},
Variables{grid_s}, vector<bool>{false, propagate_down[0]},
vector<bool>{false, accum[0]});
grid_s->reshape(Shape_t{B, H, W, 2}, false);
} else if (this->size_.size() == 3) {
auto B = affine->shape()[0];
auto D = this->size_[0];
auto H = this->size_[1];
auto W = this->size_[2];
Variable grid_t(Shape_t{B, D, H, W, 4});
auto isize = D * H * W * 4;
auto shape = make_int4(D, H, W, 4);
auto stride = make_int3(H * W * 4, W * 4, 4);
auto grid_t_ptr = grid_t.cast_data_and_get_pointer<Tcu>(this->ctx_, true);
auto generate_target_grid =
this->align_corners_ ? kernel_generate_target_grid_3d<Tcu, true>
: kernel_generate_target_grid_3d<Tcu, false>;
NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(generate_target_grid, isize, grid_t_ptr,
shape, stride, B);
// Backward of the transformation: (B, D, H, W, 4) @ (B, 3, 4) --> (B, D, H,
// W, 3)
grid_t.reshape(Shape_t{B, D * H * W, 4}, false);
grid_s->reshape(Shape_t{B, D * H * W, 3}, false);
nbla::backward(this->batch_matmul_, Variables{&grid_t, affine},
Variables{grid_s}, vector<bool>{false, propagate_down[0]},
vector<bool>{false, accum[0]});
grid_s->reshape(Shape_t{B, D, H, W, 3}, false);
}
}
} // namespace nbla
|
989d1ebc62cccbf51006a532c8e17d55702d6d74.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Please write your name and net ID below
*
* Last name: RIVERA RUIZ
* First name: DANIEL
* Net ID: drr342
*
*/
/*
* This file contains the code for doing the heat distribution problem.
* You do not need to modify anything except starting gpu_heat_dist() at the bottom
* of this file.
* In gpu_heat_dist() you can organize your data structure and the call to your
* kernel(s) that you need to write too.
*
* You compile with:
* cuda2: nvcc -o heatdist -arch=sm_52 heatdist.cu
* cuda5: nvcc -o heatdist -arch=sm_35 heatdist.cout
*/
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "cudaFunctions.h"
/* To index element (i,j) of a 2D array stored as 1D */
#define index(i, j, N) ((i)*(N)) + (j)
/*****************************************************************/
// Function declarations: Feel free to add any functions you want.
void seq_heat_dist(float *, unsigned int, unsigned int);
void gpu_heat_dist(float *, unsigned int, unsigned int);
float checksum(float * playground, unsigned int N);
int setDimensions(int N, dim3 * grid, dim3 * block, hipDeviceProp_t * prop);
__global__ void calculate(float * dPlayground, float * dTemp, int * dParams);
/*****************************************************************/
/**** Do NOT CHANGE ANYTHING in main() function ******/
int main(int argc, char * argv[])
{
unsigned int N; /* Dimention of NxN matrix */
int type_of_device = 0; // CPU or GPU
int iterations = 0;
int i;
/* The 2D array of points will be treated as 1D array of NxN elements */
float * playground;
// to measure time taken by a specific part of the code
double time_taken;
clock_t start, end;
if(argc != 4)
{
fprintf(stderr, "usage: heatdist num iterations who\n");
fprintf(stderr, "num = dimension of the square matrix (50 and up)\n");
fprintf(stderr, "iterations = number of iterations till stopping (1 and up)\n");
fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n");
exit(1);
}
type_of_device = atoi(argv[3]);
N = (unsigned int) atoi(argv[1]);
iterations = (unsigned int) atoi(argv[2]);
/* Dynamically allocate NxN array of floats */
playground = (float *)calloc(N*N, sizeof(float));
if( !playground )
{
fprintf(stderr, " Cannot allocate the %u x %u array\n", N, N);
exit(1);
}
/* Initialize it: calloc already initalized everything to 0 */
// Edge elements to 70F
for(i = 0; i < N; i++)
playground[index(0,i,N)] = 70;
for(i = 0; i < N; i++)
playground[index(i,0,N)] = 70;
for(i = 0; i < N; i++)
playground[index(i,N-1, N)] = 70;
for(i = 0; i < N; i++)
playground[index(N-1,i,N)] = 70;
// from (0,10) to (0,30) inclusive are 100F
for(i = 10; i <= 30; i++)
playground[index(0,i,N)] = 100;
// from (n-1,10) to (n-1,30) inclusive are 150F
for(i = 10; i <= 30; i++)
playground[index(N-1,i,N)] = 150;
if( !type_of_device ) // The CPU sequential version
{
start = clock();
seq_heat_dist(playground, N, iterations);
end = clock();
}
else // The GPU version
{
start = clock();
gpu_heat_dist(playground, N, iterations);
end = clock();
}
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
printf("Checksum: %f\n", checksum(playground, N));
printf("Time taken for %s is %lf\n", type_of_device == 0? "CPU" : "GPU", time_taken);
free(playground);
return 0;
}
/***************** The CPU sequential version (DO NOT CHANGE THAT) **************/
void seq_heat_dist(float * playground, unsigned int N, unsigned int iterations)
{
// Loop indices
int i, j, k;
int upper = N-1;
// number of bytes to be copied between array temp and array playground
unsigned int num_bytes = 0;
float * temp;
/* Dynamically allocate another array for temp values */
/* Dynamically allocate NxN array of floats */
temp = (float *)calloc(N*N, sizeof(float));
if( !temp )
{
fprintf(stderr, " Cannot allocate temp %u x %u array\n", N, N);
exit(1);
}
num_bytes = N*N*sizeof(float);
/* Copy initial array in temp */
memcpy((void *)temp, (void *) playground, num_bytes);
for( k = 0; k < iterations; k++)
{
/* Calculate new values and store them in temp */
for(i = 1; i < upper; i++)
for(j = 1; j < upper; j++)
temp[index(i,j,N)] = (playground[index(i-1,j,N)] +
playground[index(i+1,j,N)] +
playground[index(i,j-1,N)] +
playground[index(i,j+1,N)])/4.0;
/* Move new values into old values */
memcpy((void *)playground, (void *) temp, num_bytes);
}
}
float checksum(float * playground, unsigned int N) {
float sum = 0;
int i, j;
for(i = 0; i < N; i++)
for(j = 0; j < N; j++)
sum += playground[index(i, j, N)];
return sum;
}
/***************** The GPU version: Write your code here *********************/
/* This function can call one or more kernels if you want ********************/
void gpu_heat_dist(float * playground, unsigned int N, unsigned int iterations)
{
float * dPlayground, * dTemp;
int * dParams;
size_t sizeN = sizeof(float) * N * N;
size_t sizeParams = sizeof(int) * 2;
hipMalloc(&dPlayground, sizeN);
hipMalloc(&dTemp, sizeN);
hipMalloc(&dParams, sizeParams);
int device;
hipDeviceProp_t prop;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
dim3 dimGrid, dimBlock;
int split = setDimensions(N - 2, &dimGrid, &dimBlock, &prop);
int params[2] = {N, split};
hipMemcpy(dPlayground, playground, sizeN, hipMemcpyHostToDevice);
hipMemcpy(dTemp, playground, sizeN, hipMemcpyHostToDevice);
hipMemcpy(dParams, params, sizeParams, hipMemcpyHostToDevice);
for (int k = 0; k < iterations; k++) {
hipLaunchKernelGGL(( calculate), dim3(dimGrid), dim3(dimBlock), 0, 0, dPlayground, dTemp, dParams);
hipMemcpy(dPlayground, dTemp, sizeN, hipMemcpyDeviceToDevice);
}
hipMemcpy(playground, dPlayground, sizeN, hipMemcpyDeviceToHost);
hipFree(dPlayground);
hipFree(dTemp);
hipFree(dParams);
}
int setDimensions(int N, dim3 * grid, dim3 * block, hipDeviceProp_t * prop) {
int maxTpSM = maxThreadsPerSM(prop);
int maxBpSM = maxBlocksPerSM(prop);
int TpB = prop->maxThreadsPerBlock / maxBpSM;
// while (maxTpSM % TpB != 0) TpB /= 2;
if (TpB > N) TpB = N;
int maxBpGy = prop->maxGridSize[1];
int BpGy = N;
int BpGx = (int) ceil((float)N / TpB);
while (BpGy > maxBpGy) {
BpGy = (int) ceil(BpGy / 2.0);
BpGx *= 2;
}
grid->x = BpGx;
grid->y = BpGy;
block->x = TpB;
return TpB * (int) ceil((N - 2.0) / TpB);
}
__global__ void calculate(float * dPlayground, float * dTemp, int * dParams) {
int dN = dParams[0];
int dSplit = dParams[1];
int blockId = getBlockId(blockIdx, gridDim);
int threadId = getThreadId(blockId, threadIdx, blockDim);
int row = threadId / dSplit + 1;
int col = threadId % dSplit + 1;
if (col > dN - 2 || row > dN - 2) return;
dTemp[index(row, col, dN)] =
(dPlayground[index(row - 1, col, dN)] +
dPlayground[index(row + 1, col, dN)] +
dPlayground[index(row, col - 1, dN)] +
dPlayground[index(row, col + 1, dN)]) / 4.0;
}
| 989d1ebc62cccbf51006a532c8e17d55702d6d74.cu | /*
* Please write your name and net ID below
*
* Last name: RIVERA RUIZ
* First name: DANIEL
* Net ID: drr342
*
*/
/*
* This file contains the code for doing the heat distribution problem.
* You do not need to modify anything except starting gpu_heat_dist() at the bottom
* of this file.
* In gpu_heat_dist() you can organize your data structure and the call to your
* kernel(s) that you need to write too.
*
* You compile with:
* cuda2: nvcc -o heatdist -arch=sm_52 heatdist.cu
* cuda5: nvcc -o heatdist -arch=sm_35 heatdist.cout
*/
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "cudaFunctions.h"
/* To index element (i,j) of a 2D array stored as 1D */
#define index(i, j, N) ((i)*(N)) + (j)
/*****************************************************************/
// Function declarations: Feel free to add any functions you want.
void seq_heat_dist(float *, unsigned int, unsigned int);
void gpu_heat_dist(float *, unsigned int, unsigned int);
float checksum(float * playground, unsigned int N);
int setDimensions(int N, dim3 * grid, dim3 * block, cudaDeviceProp * prop);
__global__ void calculate(float * dPlayground, float * dTemp, int * dParams);
/*****************************************************************/
/**** Do NOT CHANGE ANYTHING in main() function ******/
int main(int argc, char * argv[])
{
unsigned int N; /* Dimention of NxN matrix */
int type_of_device = 0; // CPU or GPU
int iterations = 0;
int i;
/* The 2D array of points will be treated as 1D array of NxN elements */
float * playground;
// to measure time taken by a specific part of the code
double time_taken;
clock_t start, end;
if(argc != 4)
{
fprintf(stderr, "usage: heatdist num iterations who\n");
fprintf(stderr, "num = dimension of the square matrix (50 and up)\n");
fprintf(stderr, "iterations = number of iterations till stopping (1 and up)\n");
fprintf(stderr, "who = 0: sequential code on CPU, 1: GPU execution\n");
exit(1);
}
type_of_device = atoi(argv[3]);
N = (unsigned int) atoi(argv[1]);
iterations = (unsigned int) atoi(argv[2]);
/* Dynamically allocate NxN array of floats */
playground = (float *)calloc(N*N, sizeof(float));
if( !playground )
{
fprintf(stderr, " Cannot allocate the %u x %u array\n", N, N);
exit(1);
}
/* Initialize it: calloc already initalized everything to 0 */
// Edge elements to 70F
for(i = 0; i < N; i++)
playground[index(0,i,N)] = 70;
for(i = 0; i < N; i++)
playground[index(i,0,N)] = 70;
for(i = 0; i < N; i++)
playground[index(i,N-1, N)] = 70;
for(i = 0; i < N; i++)
playground[index(N-1,i,N)] = 70;
// from (0,10) to (0,30) inclusive are 100F
for(i = 10; i <= 30; i++)
playground[index(0,i,N)] = 100;
// from (n-1,10) to (n-1,30) inclusive are 150F
for(i = 10; i <= 30; i++)
playground[index(N-1,i,N)] = 150;
if( !type_of_device ) // The CPU sequential version
{
start = clock();
seq_heat_dist(playground, N, iterations);
end = clock();
}
else // The GPU version
{
start = clock();
gpu_heat_dist(playground, N, iterations);
end = clock();
}
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
printf("Checksum: %f\n", checksum(playground, N));
printf("Time taken for %s is %lf\n", type_of_device == 0? "CPU" : "GPU", time_taken);
free(playground);
return 0;
}
/***************** The CPU sequential version (DO NOT CHANGE THAT) **************/
void seq_heat_dist(float * playground, unsigned int N, unsigned int iterations)
{
// Loop indices
int i, j, k;
int upper = N-1;
// number of bytes to be copied between array temp and array playground
unsigned int num_bytes = 0;
float * temp;
/* Dynamically allocate another array for temp values */
/* Dynamically allocate NxN array of floats */
temp = (float *)calloc(N*N, sizeof(float));
if( !temp )
{
fprintf(stderr, " Cannot allocate temp %u x %u array\n", N, N);
exit(1);
}
num_bytes = N*N*sizeof(float);
/* Copy initial array in temp */
memcpy((void *)temp, (void *) playground, num_bytes);
for( k = 0; k < iterations; k++)
{
/* Calculate new values and store them in temp */
for(i = 1; i < upper; i++)
for(j = 1; j < upper; j++)
temp[index(i,j,N)] = (playground[index(i-1,j,N)] +
playground[index(i+1,j,N)] +
playground[index(i,j-1,N)] +
playground[index(i,j+1,N)])/4.0;
/* Move new values into old values */
memcpy((void *)playground, (void *) temp, num_bytes);
}
}
float checksum(float * playground, unsigned int N) {
float sum = 0;
int i, j;
for(i = 0; i < N; i++)
for(j = 0; j < N; j++)
sum += playground[index(i, j, N)];
return sum;
}
/***************** The GPU version: Write your code here *********************/
/* This function can call one or more kernels if you want ********************/
void gpu_heat_dist(float * playground, unsigned int N, unsigned int iterations)
{
float * dPlayground, * dTemp;
int * dParams;
size_t sizeN = sizeof(float) * N * N;
size_t sizeParams = sizeof(int) * 2;
cudaMalloc(&dPlayground, sizeN);
cudaMalloc(&dTemp, sizeN);
cudaMalloc(&dParams, sizeParams);
int device;
cudaDeviceProp prop;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
dim3 dimGrid, dimBlock;
int split = setDimensions(N - 2, &dimGrid, &dimBlock, &prop);
int params[2] = {N, split};
cudaMemcpy(dPlayground, playground, sizeN, cudaMemcpyHostToDevice);
cudaMemcpy(dTemp, playground, sizeN, cudaMemcpyHostToDevice);
cudaMemcpy(dParams, params, sizeParams, cudaMemcpyHostToDevice);
for (int k = 0; k < iterations; k++) {
calculate<<<dimGrid, dimBlock>>>(dPlayground, dTemp, dParams);
cudaMemcpy(dPlayground, dTemp, sizeN, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(playground, dPlayground, sizeN, cudaMemcpyDeviceToHost);
cudaFree(dPlayground);
cudaFree(dTemp);
cudaFree(dParams);
}
int setDimensions(int N, dim3 * grid, dim3 * block, cudaDeviceProp * prop) {
int maxTpSM = maxThreadsPerSM(prop);
int maxBpSM = maxBlocksPerSM(prop);
int TpB = prop->maxThreadsPerBlock / maxBpSM;
// while (maxTpSM % TpB != 0) TpB /= 2;
if (TpB > N) TpB = N;
int maxBpGy = prop->maxGridSize[1];
int BpGy = N;
int BpGx = (int) ceil((float)N / TpB);
while (BpGy > maxBpGy) {
BpGy = (int) ceil(BpGy / 2.0);
BpGx *= 2;
}
grid->x = BpGx;
grid->y = BpGy;
block->x = TpB;
return TpB * (int) ceil((N - 2.0) / TpB);
}
__global__ void calculate(float * dPlayground, float * dTemp, int * dParams) {
int dN = dParams[0];
int dSplit = dParams[1];
int blockId = getBlockId(blockIdx, gridDim);
int threadId = getThreadId(blockId, threadIdx, blockDim);
int row = threadId / dSplit + 1;
int col = threadId % dSplit + 1;
if (col > dN - 2 || row > dN - 2) return;
dTemp[index(row, col, dN)] =
(dPlayground[index(row - 1, col, dN)] +
dPlayground[index(row + 1, col, dN)] +
dPlayground[index(row, col - 1, dN)] +
dPlayground[index(row, col + 1, dN)]) / 4.0;
}
|
c0ef48f96c08dc5d9ded34f0315c0ab1061f0bb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
// Check tensor dimensions for index operations, and return the slice size.
// src can be nullptr in case of indexFill: in that case it is ignored.
static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst,
int dim,
THCudaLongTensor *index,
THCTensor *src)
{
int dstDims = THCTensor_(_nDimension)(state, dst);
int srcDims = (src == nullptr) ? dstDims : THCTensor_(_nDimension)(state, src);
THArgCheck(THCudaLongTensor__nDimension(state, index) == 1, 4,
"expecting vector of indices");
THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds");
ptrdiff_t dstSliceSize = 1;
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst->size[d];
}
}
if (src == nullptr) return dstSliceSize;
THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds");
THArgCheck(THCudaLongTensor_nElement(state, index) == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src->size[d];
if (!mismatch && dst->size[d] != src->size[d]) mismatch = true;
}
}
THArgCheck(dstSliceSize == srcSliceSize, 2,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
static bool warningShown = false;
if (!warningShown) {
warningShown = true;
fprintf(stderr,
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
}
return dstSliceSize;
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
bool THCTensor_(indexShouldBeMajor)(TensorInfo<real, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstCopyDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(!(THCTensor_(_nDimension)(state, src) == 0 && THCudaLongTensor__nDimension(state, index) != 0), 2,
"tried to take from an empty tensor");
THCTensor_(resizeNd)(state, dst, index->dim(), index->size, NULL);
// dispatchTakePut only handles non-empty tensors;
if (index->_dim() > 0) {
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
if (numIndices == 0) {
return;
}
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexAddLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstAddDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize =
THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr);
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
hipStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, indicesInfo, \
dstFillDim, sliceSize * numIndices, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstFillDimSize, val);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim);
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2, true);
} else if (dstInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, -2, false);
}
} else if (dstInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(_nDimension)(state, src);
hipStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor__nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \
, dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \
, dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
srcSelectDimSize);
dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#endif
| c0ef48f96c08dc5d9ded34f0315c0ab1061f0bb3.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorIndex.cu"
#else
// Check tensor dimensions for index operations, and return the slice size.
// src can be nullptr in case of indexFill: in that case it is ignored.
static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst,
int dim,
THCudaLongTensor *index,
THCTensor *src)
{
int dstDims = THCTensor_(_nDimension)(state, dst);
int srcDims = (src == nullptr) ? dstDims : THCTensor_(_nDimension)(state, src);
THArgCheck(THCudaLongTensor__nDimension(state, index) == 1, 4,
"expecting vector of indices");
THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds");
ptrdiff_t dstSliceSize = 1;
for (int d = 0; d < dstDims; d++) {
if (d != dim) {
dstSliceSize *= dst->size[d];
}
}
if (src == nullptr) return dstSliceSize;
THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds");
THArgCheck(THCudaLongTensor_nElement(state, index) == src->size[dim], 4,
"length of src.size[dim] is not equal to length of indices");
ptrdiff_t srcSliceSize = 1;
bool mismatch = false;
if (dstDims != srcDims) mismatch = true;
for (int d = 0; d < srcDims; d++) {
if (d != dim) {
srcSliceSize *= src->size[d];
if (!mismatch && dst->size[d] != src->size[d]) mismatch = true;
}
}
THArgCheck(dstSliceSize == srcSliceSize, 2,
"Source/destination tensor have different slice sizes (%ld vs %ld)",
dstSliceSize, srcSliceSize);
if (mismatch) {
static bool warningShown = false;
if (!warningShown) {
warningShown = true;
fprintf(stderr,
"Warning: source/destination slices have same size but different "
"shape for an index operation. This behavior is deprecated.\n");
}
}
return dstSliceSize;
}
// Compare the stride between adjacent slices (sliceStride) with strides in the
// other dimensions (i.e., strides *inside* each slice).
//
// - Returns true if some dimension inside the slice has lower stride than
// sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim
// == 0 (that is, each slice is a row).
//
// In this case, we choose the CUDA kernel that processes the data in
// "index-major order". For example, if thread count equals slice size, then
// all threads process slice #0 in lockstep, and then slice #1, and so on.
//
// - Otherwise (i.e., sliceStride has the lowest value), this function returns
// false. The simplest example is a 2-D contiguous tensor with sliceDim == 1
// (each slice is a column).
//
// In this case, we choose the CUDA kernel that processes the data in
// "elementInSlice-major order". For example, each thread can process element
// #0 of every slice, and then element #1 of every slice, and so on.
bool THCTensor_(indexShouldBeMajor)(TensorInfo<real, unsigned int> &info,
int sliceDim)
{
// The stride between adjacent slices (e.g., between element #0 of slice #100
// and element #0 of slice #101).
unsigned int sliceStride = info.strides[sliceDim];
for (int i = 0; i < info.dims; ++i) {
if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) {
return true;
}
}
return false;
}
void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstCopyDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexCopyLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstCopyDim, srcCopyDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstCopyDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstCopyDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstCopyDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcCopyDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcCopyDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(!(THCTensor_(_nDimension)(state, src) == 0 && THCudaLongTensor__nDimension(state, index) != 0), 2,
"tried to take from an empty tensor");
THCTensor_(resizeNd)(state, dst, index->dim(), index->size, NULL);
// dispatchTakePut only handles non-empty tensors;
if (index->_dim() > 0) {
dispatchTakePut<real, TensorTakeOp>(state, src, dst, index);
}
}
static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) {
THCThrustAllocator thrustAlloc(state);
auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index));
auto src_iter = thrust::device_ptr<real>(THCTensor_(data)(state, src));
auto numel = THCTensor_(numel)(state, src);
thrust::sort_by_key(
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
index_iter, index_iter + numel,
src_iter, ThrustLTOp<int64_t>());
}
void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index));
ptrdiff_t dstSize = THCTensor_(nElement)(state, dst);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index);
THArgCheck(THCTensor_(nElement)(state, src) == numIndices,
3, "src should have the same number of elements as index");
THArgCheck(THCTensor_(_nDimension)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCTensor_(_nDimension)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
THArgCheck(THCudaLongTensor__nDimension(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
if (numIndices == 0) {
return;
}
if (accumulate) {
// wrap indices so to replace negative indices
THCudaLongTensor* sorted_index = THCudaLongTensor_new(state);
THCudaLongTensor_resizeAs(state, sorted_index, index);
THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize));
THCTensor* sorted_src = THCTensor_(newClone)(state, src);
THCTensor_(sort_indices)(state, sorted_index, sorted_src);
dispatchTakePut<real, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index);
THCTensor_(free)(state, sorted_src);
THCudaLongTensor_free(state, sorted_index);
} else {
dispatchTakePut<real, TensorPutOp>(state, dst, src, index);
}
}
void THCTensor_(indexAdd)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src);
ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src);
int64_t dstAddDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexAddSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, sliceSize, dstAddDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexAddLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstAddDim, srcAddDim, srcTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstAddDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstAddDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstAddDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstAddDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcAddDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcAddDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, real val)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t sliceSize =
THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr);
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t dstFillDimSize = THCTensor_(size)(state, dst, dim);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
cudaStream_t stream = THCState_getCurrentStream(state);
int indContig = THCudaLongTensor_isContiguous(state, indices);
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \
indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize, dstFillDimSize, val);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, indicesInfo, \
dstFillDim, sliceSize * numIndices, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
dstFillDimSize, val);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, -2);
} else if (dstInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, -2);
} else if (dstInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim);
if (dstInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, -2, true);
} else if (dstInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, -2, false);
}
} else if (dstInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstFillDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstFillDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices));
int dims = THCTensor_(_nDimension)(state, dst);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING);
dims = THCTensor_(_nDimension)(state, src);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING);
dims = THCudaLongTensor__nDimension(state, indices);
THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING);
ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices);
int srcDims = THCTensor_(_nDimension)(state, src);
cudaStream_t stream = THCState_getCurrentStream(state);
THArgCheck(THCudaLongTensor__nDimension(state, indices) <= 1, 3,
"Index is supposed to be an empty tensor or a vector");
THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds");
THArgCheck(srcDims > 0, 2, "Source tensor is empty");
THLongStorage *newSize;
if (numIndices == 0) {
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, 0, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
return;
}
newSize = THCTensor_(newSizeOf)(state, src);
THLongStorage_set(newSize, dim, numIndices);
THCTensor_(resize)(state, dst, newSize, NULL);
THLongStorage_free(newSize);
int indContig = THCudaLongTensor_isContiguous(state, indices);
// The `src` is partitioned into two parts:
// -the size of each slice we are indexing, which is the
// total size of the tensor ignoring dimension `dim`;
// -the number of indices we are choosing, which is the total size
// of the tensor `indices`.
ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst);
int64_t srcSelectDimSize = THCTensor_(size)(state, src, dim);
ptrdiff_t sliceSize = dstTotalSize / numIndices;
int mpc = THCState_getCurrentDeviceProperties(state)->multiProcessorCount;
#define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \
indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \
<<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, sliceSize, srcSelectDimSize);
#define LARGE_INDEX(TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \
indexSelectLargeIndex<TENSOR_TYPE, TYPE, \
DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \
<<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \
dstInfo, srcInfo, indicesInfo, \
dstSelectDim, srcSelectDim, dstTotalSize, \
(IDX_IS_MAJOR) ? sliceSize : numIndices, \
srcSelectDimSize);
dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128));
dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8)));
dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128));
if (THCTensor_canUse32BitIndexMath(state, dst) &&
THCTensor_canUse32BitIndexMath(state, src) &&
THCTensor_canUse32BitIndexMath(state, indices)) {
TensorInfo<real, unsigned int> dstInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, unsigned int> srcInfo =
getTensorInfo<real, THCTensor, unsigned int>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, unsigned int> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices);
indicesInfo.collapseDims();
// A reasonable choice for when to have each thread iterate over
// indices to choose
if (numIndices <= 16) {
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
SMALL_INDEX(real, unsigned int, 1, 1, -2);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
SMALL_INDEX(real, unsigned int, 2, 2, -2);
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
SMALL_INDEX(real, unsigned int, 3, 3, -2);
} else {
SMALL_INDEX(real, unsigned int, -1, -1, -1);
}
} else {
bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim);
if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) {
LARGE_INDEX(real, unsigned int, 1, 1, -2, true);
} else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 2, 2, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 2, 2, -2, false);
}
} else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) {
if (indexIsMajor) {
LARGE_INDEX(real, unsigned int, 3, 3, -2, true);
} else {
LARGE_INDEX(real, unsigned int, 3, 3, -2, false);
}
} else {
LARGE_INDEX(real, unsigned int, -1, -1, -1, true);
}
}
} else {
TensorInfo<real, uint64_t> dstInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, dst);
int dstSelectDim = dstInfo.collapseDims(dim);
dstInfo.reduceDim(dstSelectDim);
TensorInfo<real, uint64_t> srcInfo =
getTensorInfo<real, THCTensor, uint64_t>(state, src);
int srcSelectDim = srcInfo.collapseDims(dim);
srcInfo.reduceDim(srcSelectDim);
TensorInfo<int64_t, uint64_t> indicesInfo =
getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices);
indicesInfo.collapseDims();
LARGE_INDEX(real, uint64_t, -1, -1, -1, true);
}
#undef SMALL_INDEX
#undef LARGE_INDEX
}
#endif
|
76cea9e316a0d945fa4747a971290e5abd4c7e10.hip | // !!! This is a file automatically generated by hipify!!!
/*
Created by Jane/Santaizi 3/19/2016
*/
#include <GL/freeglut.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include <device_launch_parameters.h>
#include "particleSystem_cuda_defines.h"
#include "particleSys_kernel.cuh"
#define DEFAULT_THREADS_PER_BLOCK 256
extern "C"
{
/*-------- here goes the functions ----------*/
void cudaGLInit_c(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaGLDevice(argc, (const char **)argv);
}
// set best performance device's property
void cudaInit(int devID)
{
hipDeviceProp_t dProp;
hipGetDeviceProperties(&dProp, devID);
int threadsPerBlock =
(dProp.major >= 2 ?
2 * DEFAULT_THREADS_PER_BLOCK : DEFAULT_THREADS_PER_BLOCK);
// here is the trick YOU SHOULD NEVER USE
int *ptr = (int *)(&THREADS_MAX);
*ptr = threadsPerBlock;
}
void cudaGraphicsGLRegisterBuffer_c(cudaGraphicsResource_t *resource, GLuint buffer, unsigned int flags)
{
checkCudaErrors(hipGraphicsGLRegisterBuffer(resource, buffer, flags));
}
void cudaMalloc_c(void **devPtr, size_t size)
{
checkCudaErrors(hipMalloc(devPtr, size));
}
void cudaFree_c(void *devPtr)
{
checkCudaErrors(hipFree(devPtr));
}
void cudaSetParams(SimParam *hostParams)
{
/*
I'm not sure if the prototype appearing in the CUDA Runtime API document should better read as `
template<class T>
hipError_t hipGetSymbolAddress (void **devPtr, const T symbol)
*/
checkCudaErrors(hipMemcpyToSymbol(d_cParam, hostParams, sizeof(SimParam)));
}
void cudaGraphicsUnregisterResource_c(cudaGraphicsResource_t cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnregisterResource(cuda_vbo_resource));
}
void cudaGraphicsMapResources_c(int count, cudaGraphicsResource_t *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsMapResources(count, cuda_vbo_resource, 0));
}
void cudaGraphicsUnmapResources_c(int count, cudaGraphicsResource_t *cuda_vbo_resource)
{
checkCudaErrors(hipGraphicsUnmapResources(count, cuda_vbo_resource, 0));
}
void cudaGraphicsResourceGetMappedPointer_c(void **devPtr, size_t *size, cudaGraphicsResource_t resource)
{
checkCudaErrors(hipGraphicsResourceGetMappedPointer(devPtr, size,
resource));
}
void cudaMemcpy_c(void *dst, const void *src, size_t count, enum hipMemcpyKind kind)
{
checkCudaErrors(hipMemcpy(dst, src, count, kind));
}
/* which only apply gravity to integrate system */
void integrateSystem(float *pos, float *vel, float deltaTime, uint numParticles)
{
thrust::device_ptr<float4> d_pos4((float4 *)pos);
thrust::device_ptr<float4> d_vel4((float4 *)vel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_vel4)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos4 + numParticles, d_vel4 + numParticles)),
integrate_functor(deltaTime));
}
/*
@param return: uint2.x blocks
*/
inline uint2 calcGridSize(uint numThreads, uint blockSize)
{
uint2 size;
size.y = (uint)THREADS_MAX < blockSize ? (uint)THREADS_MAX : blockSize;
size.x = (uint)ceilf((float)numThreads / size.y);
return size;
}
void calcHash(uint *gridParticleHash, uint *gridParticleIndex, float *pos, uint numParticles)
{
uint2 gridSize = calcGridSize(numParticles, THREADS_MAX);
hipLaunchKernelGGL(( calcHash_kernel), dim3(gridSize.x),dim3(gridSize.y), 0, 0, gridParticleHash, gridParticleIndex, reinterpret_cast<float4 *>(pos), numParticles);
}
} | 76cea9e316a0d945fa4747a971290e5abd4c7e10.cu | /*
Created by Jane/Santaizi 3/19/2016
*/
#include <GL/freeglut.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_cuda_gl.h>
#include <helper_functions.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include <device_launch_parameters.h>
#include "particleSystem_cuda_defines.h"
#include "particleSys_kernel.cuh"
#define DEFAULT_THREADS_PER_BLOCK 256
extern "C"
{
/*-------- here goes the functions ----------*/
void cudaGLInit_c(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaGLDevice(argc, (const char **)argv);
}
// set best performance device's property
void cudaInit(int devID)
{
cudaDeviceProp dProp;
cudaGetDeviceProperties(&dProp, devID);
int threadsPerBlock =
(dProp.major >= 2 ?
2 * DEFAULT_THREADS_PER_BLOCK : DEFAULT_THREADS_PER_BLOCK);
// here is the trick YOU SHOULD NEVER USE
int *ptr = (int *)(&THREADS_MAX);
*ptr = threadsPerBlock;
}
void cudaGraphicsGLRegisterBuffer_c(cudaGraphicsResource_t *resource, GLuint buffer, unsigned int flags)
{
checkCudaErrors(cudaGraphicsGLRegisterBuffer(resource, buffer, flags));
}
void cudaMalloc_c(void **devPtr, size_t size)
{
checkCudaErrors(cudaMalloc(devPtr, size));
}
void cudaFree_c(void *devPtr)
{
checkCudaErrors(cudaFree(devPtr));
}
void cudaSetParams(SimParam *hostParams)
{
/*
I'm not sure if the prototype appearing in the CUDA Runtime API document should better read as `
template<class T>
cudaError_t cudaGetSymbolAddress (void **devPtr, const T symbol)
*/
checkCudaErrors(cudaMemcpyToSymbol(d_cParam, hostParams, sizeof(SimParam)));
}
void cudaGraphicsUnregisterResource_c(cudaGraphicsResource_t cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsUnregisterResource(cuda_vbo_resource));
}
void cudaGraphicsMapResources_c(int count, cudaGraphicsResource_t *cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsMapResources(count, cuda_vbo_resource, 0));
}
void cudaGraphicsUnmapResources_c(int count, cudaGraphicsResource_t *cuda_vbo_resource)
{
checkCudaErrors(cudaGraphicsUnmapResources(count, cuda_vbo_resource, 0));
}
void cudaGraphicsResourceGetMappedPointer_c(void **devPtr, size_t *size, cudaGraphicsResource_t resource)
{
checkCudaErrors(cudaGraphicsResourceGetMappedPointer(devPtr, size,
resource));
}
void cudaMemcpy_c(void *dst, const void *src, size_t count, enum cudaMemcpyKind kind)
{
checkCudaErrors(cudaMemcpy(dst, src, count, kind));
}
/* which only apply gravity to integrate system */
void integrateSystem(float *pos, float *vel, float deltaTime, uint numParticles)
{
thrust::device_ptr<float4> d_pos4((float4 *)pos);
thrust::device_ptr<float4> d_vel4((float4 *)vel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_vel4)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos4 + numParticles, d_vel4 + numParticles)),
integrate_functor(deltaTime));
}
/*
@param return: uint2.x blocks
*/
inline uint2 calcGridSize(uint numThreads, uint blockSize)
{
uint2 size;
size.y = (uint)THREADS_MAX < blockSize ? (uint)THREADS_MAX : blockSize;
size.x = (uint)ceilf((float)numThreads / size.y);
return size;
}
void calcHash(uint *gridParticleHash, uint *gridParticleIndex, float *pos, uint numParticles)
{
uint2 gridSize = calcGridSize(numParticles, THREADS_MAX);
calcHash_kernel<<<gridSize.x,gridSize.y>>>(gridParticleHash, gridParticleIndex, reinterpret_cast<float4 *>(pos), numParticles);
}
} |
324548e3cb857c05f632edd5739ac5d0c561ea9f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ConvDepthWiseExecution.hpp"
#include "core/ConvolutionCommon.hpp"
#include "Raster.cuh"
#include <float.h>
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
template<typename T>
__global__ void CONV_DW(const T* input,
const half* kernel,
const half* bias,
T *output,
const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER;
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
int kw = uConstant->kernelSize[0];
int kh = uConstant->kernelSize[1];
int dw = uConstant->dilate[0];
int dh = uConstant->dilate[1];
int sw = uConstant->stride[0];
int sh = uConstant->stride[1];
int pw = uConstant->pad[0];
int ph = uConstant->pad[1];
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total/2; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2 << 1;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
float color0 = bias[oz];
float color1 = bias[oz+1];
int fxSta = max(0, (UP_DIV(-ix, dw)));
int fySta = max(0, (UP_DIV(-iy, dh)));
int fxEnd = min(kw, UP_DIV(iw - ix, dw));
int fyEnd = min(kh, UP_DIV(ih - iy, dh));
int fx, fy, fz;
for (fy=fySta; fy<fyEnd; ++fy) {
int sy = fy*dh + iy;
for (fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx*dw + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
float inp0 = input[src_offset];
float inp1 = input[src_offset+1];
float ker0 = kernel[(fy * kw + fx) * c_p + oz];
float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1];
color0 = color0 + inp0 * ker0;
color1 = color1 + inp1 * ker1;
}
}
color0 = max(color0, minV);
color0 = min(color0, maxV);
color1 = max(color1, minV);
color1 = min(color1, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color0;
output[dst_offset+1] = color1;
}
}
__global__ void CONV_DW_HALF2_OPT(const half2* input,
const half2* kernel,
const half2* bias,
half2 *output,
const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER / 2;
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
int kw = uConstant->kernelSize[0];
int kh = uConstant->kernelSize[1];
int sw = uConstant->stride[0];
int sh = uConstant->stride[1];
int pw = uConstant->pad[0];
int ph = uConstant->pad[1];
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total/2; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
half2 color = bias[oz];
int fxSta = max(0, -ix);
int fySta = max(0, -iy);
int fxEnd = min(kw, iw - ix);
int fyEnd = min(kh, ih - iy);
int fx, fy, fz;
for (fy=fySta; fy<fyEnd; ++fy) {
int sy = fy + iy;
for (fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
half2 inp = input[src_offset];
half2 ker = kernel[(fy * kw + fx) * c_p + oz];
color = __hfma2(inp, ker, color);
}
}
color.x = max(color.x, minV);
color.x = min(color.x, maxV);
color.y = max(color.y, minV);
color.y = min(color.y, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color;
}
}
__global__ void CONV_DW3x3_HALF2_OPT(const half2* input,
const half2* kernel,
const half2* bias,
half2 *output,
const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER / 2;
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total/4; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox_2, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox_2);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2;
int ox = ox_2 << 1;
int ix = ox - 1;
int iy = oy - 1;
half2 color0 = bias[oz];
half2 color1 = color0;
half2 zero;
zero.x = (half)0.0;
zero.y = (half)0.0;
half2 inp[12];
half2 ker[3][3];
for(int j=0; j<3; j++) {
if(iy < 0 && j==0) {
for(int i=0; i<4; i++) {
inp[i] = zero;
}
continue;
}
if(iy+2 > ih-1 && j==2) {
for(int i=0; i<4; i++) {
inp[8+i] = zero;
}
continue;
}
for(int i=0; i<4; i++) {
if(ix < 0 && i==0) {
for(int j=0; j<3; j++) {
inp[4*j+0] = zero;
}
continue;
}
if(ix+3 > iw-1 && i==3) {
for(int j=0; j<3; j++) {
inp[4*j+3] = zero;
}
continue;
}
int src_offset = ((ob * ih + iy+j) * iw + ix+i) * c_p + oz;
inp[4*j+i] = input[src_offset];
}
}
for(int j=0; j<3; j++) {
for(int i=0; i<3; i++) {
ker[j][i] = kernel[(j * 3 + i) * c_p + oz];
}
}
for(int j=0; j<3; j++) {
for(int i=0; i<3; i++) {
color0 = __hfma2(inp[4*j+i], ker[j][i], color0);
color1 = __hfma2(inp[4*j+i+1], ker[j][i], color1);
}
}
color0.x = max(color0.x, minV);
color0.x = min(color0.x, maxV);
color0.y = max(color0.y, minV);
color0.y = min(color0.y, maxV);
color1.x = max(color1.x, minV);
color1.x = min(color1.x, maxV);
color1.y = max(color1.y, minV);
color1.y = min(color1.y, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color0;
output[dst_offset+c_p] = color1;
}
}
__global__ void CONV_DW_OPT(const float* input, const half* kernel, const half* bias, float *output, const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
int kw = uConstant->kernelSize[0];
int kh = uConstant->kernelSize[1];
int sw = uConstant->stride[0];
int sh = uConstant->stride[1];
int pw = uConstant->pad[0];
int ph = uConstant->pad[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER;
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total / 2; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2 << 1;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
float color0 = bias[oz];
float color1 = bias[oz+1];
int fxSta = max(0, -ix);
int fySta = max(0, -iy);
int fxEnd = min(kw, iw - ix);
int fyEnd = min(kh, ih - iy);
int fx, fy, fz;
for (fy=fySta; fy<fyEnd; ++fy) {
int sy = fy + iy;
for (fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
float inp0 = input[src_offset];
float inp1 = input[src_offset+1];
float ker0 = kernel[(fy * kw + fx) * c_p + oz];
float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1];
color0 = color0 + inp0 * ker0;
color1 = color1 + inp1 * ker1;
}
}
color0 = max(color0, minV);
color0 = min(color0, maxV);
color1 = max(color1, minV);
color1 = min(color1, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color0;
output[dst_offset+1] = color1;
}
}
static std::shared_ptr<ConvDepthWiseExecution::Resource> _makeResource(const Op* op, Backend* bn) {
std::shared_ptr<ConvDepthWiseExecution::Resource> res(new ConvDepthWiseExecution::Resource);
auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool();
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto convCommon = conv->common();
int kernelX = convCommon->kernelX();
int kernelY = convCommon->kernelY();
int depth = convCommon->outputCount();
int depthC = UP_DIV(depth, PACK_NUMBER);
res->weightTensor.reset(Tensor::createDevice<float>({kernelX * kernelY * depthC * PACK_NUMBER}));
bool success = bn->onAcquireBuffer(res->weightTensor.get(), Backend::STATIC);
if (!success) {
return nullptr;
}
res->mFilter = (void *)res->weightTensor.get()->buffer().device;
FuseRegion reg;
int offset[8 * PACK_NUMBER];
auto regionStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(FuseRegion));
auto offsetGpuStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(offset));
auto offsetGpu = (uint8_t*)offsetGpuStorage.first + offsetGpuStorage.second;
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
auto tempWeightStorage = pool->alloc(depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float));
auto tempWeight = (uint8_t*)tempWeightStorage.first + tempWeightStorage.second;
cuda_check(hipMemset(tempWeight, 0, depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float)));
cuda_check(hipMemcpy(tempWeight, filterDataPtr, weightSize*sizeof(float), hipMemcpyHostToDevice));
reg.size[0] = 1;
reg.size[1] = kernelY * kernelX;
reg.size[2] = depthC * PACK_NUMBER;
reg.srcStride[0] = 0;
reg.srcStride[1] = 1;
reg.srcStride[2] = kernelY * kernelX;
reg.dstStride[0] = 0;
reg.dstStride[1] = depthC * PACK_NUMBER;
reg.dstStride[2] = 1;
offset[0] = 1;
offset[1] = kernelY * kernelX;
offset[2] = depth;
offset[3] = 0;
offset[4] = 1;
offset[5] = reg.size[1];
offset[6] = reg.size[2];
offset[7] = 0;
reg.fuseNumber = 1;
runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, ®, sizeof(FuseRegion), MNNMemcpyHostToDevice, true);
runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true);
FuseRasterBlitFloatToHalf((uint8_t*)res->mFilter, (uint8_t*)tempWeight, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime);
pool->free(tempWeightStorage);
res->biasTensor.reset(Tensor::createDevice<float>({depthC * PACK_NUMBER}));
success = bn->onAcquireBuffer(res->biasTensor.get(), Backend::STATIC);
res->mBias = (void *)res->biasTensor.get()->buffer().device;
if (!success) {
return nullptr;
}
if(conv->bias() != nullptr) {
auto tempBiasStorage = pool->alloc(depth * sizeof(float));
auto tempBias = (uint8_t*)tempBiasStorage.first + tempBiasStorage.second;
cuda_check(hipMemcpy(tempBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), hipMemcpyHostToDevice));
reg.size[0] = 1;
reg.size[1] = 1;
reg.size[2] = depthC * PACK_NUMBER;
reg.srcStride[0] = 0;
reg.srcStride[1] = 0;
reg.srcStride[2] = 1;
reg.dstStride[0] = 0;
reg.dstStride[1] = 0;
reg.dstStride[2] = 1;
offset[0] = 1;
offset[1] = 1;
offset[2] = conv->bias()->size();
offset[3] = 0;
offset[4] = 1;
offset[5] = 1;
offset[6] = reg.size[2];
offset[7] = 0;
reg.fuseNumber = 1;
runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, ®, sizeof(FuseRegion), MNNMemcpyHostToDevice, true);
runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true);
FuseRasterBlitFloatToHalf((uint8_t*)res->mBias, (uint8_t*)tempBias, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime);
pool->free(tempBiasStorage);
}
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(regionStorage);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(offsetGpuStorage);
return res;
}
ConvDepthWiseExecution::ConvDepthWiseExecution(const Op* op, Backend* bn, std::shared_ptr<Resource> resource) : Execution(bn) {
mOp = op;
mResource = resource;
auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool();
mConstBuffer = pool->alloc(sizeof(constBuffer));
}
ConvDepthWiseExecution::~ ConvDepthWiseExecution() {
auto pool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
pool->free(mConstBuffer);
}
ErrorCode ConvDepthWiseExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto pad = ConvolutionCommon::convolutionPad(inputs[0], outputs[0], mOp->main_as_Convolution2D()->common());
auto conv = mOp->main_as_Convolution2D();
auto convCommon = mOp->main_as_Convolution2D()->common();
int channel = inputs[0]->channel();
int channelDiv = UP_DIV(channel, PACK_NUMBER);
parameters.pad[0] = pad.first;
parameters.pad[1] = pad.second;
parameters.kernelSize[0] = convCommon->kernelX();
parameters.kernelSize[1] = convCommon->kernelY();
parameters.stride[0] = convCommon->strideX();
parameters.stride[1] = convCommon->strideY();
parameters.dilate[0] = convCommon->dilateX();
parameters.dilate[1] = convCommon->dilateY();
parameters.inputSize[0] = inputs[0]->width();
parameters.inputSize[1] = inputs[0]->height();
parameters.channel = channelDiv;
parameters.outputSize[0] = outputs[0]->width();
parameters.outputSize[1] = outputs[0]->height();
parameters.batch = inputs[0]->batch();
parameters.total = parameters.batch * parameters.outputSize[1] * parameters.outputSize[0] * parameters.channel * PACK_NUMBER;
if (static_cast<CUDABackend*>(backend())->useFp16()) {
// Do nothing
} else {
parameters.minValue = -FLT_MAX;
parameters.maxValue = FLT_MAX;
}
if (convCommon->relu()) {
parameters.minValue = 0.0f;
}
if (convCommon->relu6()) {
parameters.minValue = 0.0f;
parameters.maxValue = 6.0f;
}
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
runtime->memcpy((uint8_t*)mConstBuffer.first + mConstBuffer.second, ¶meters, sizeof(constBuffer), MNNMemcpyHostToDevice);
mTotalCount = parameters.total;
//MNN_PRINT("%d-%d-%d-%d, %d-%d-%d-%d-%d\n", parameters.kernelSize[0], parameters.kernelSize[1], parameters.stride[0], parameters.stride[1], parameters.inputSize[0], parameters.inputSize[1], channel, parameters.outputSize[0], parameters.outputSize[1]);
return NO_ERROR;
}
ErrorCode ConvDepthWiseExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto& prop = runtime->prop();
int limitThreads = UP_DIV(mTotalCount, prop.multiProcessorCount);
int threads_num = ALIMIN(prop.maxThreadsPerBlock/2, limitThreads);
int block_num = prop.multiProcessorCount;
auto constPtr = (uint8_t*)mConstBuffer.first + mConstBuffer.second;
DivModFast d_oc(parameters.channel * PACK_NUMBER / 2);
DivModFast d_ow(parameters.outputSize[0]);
DivModFast d_oh(parameters.outputSize[1]);
if (static_cast<CUDABackend*>(backend())->useFp16()) {
if(parameters.kernelSize[0]==3 && parameters.kernelSize[1]==3 && parameters.stride[0]==1 && parameters.stride[1]==1 && parameters.pad[0]==1 && parameters.pad[1]==1 && parameters.outputSize[0] % 2 ==0) {
DivModFast d_ow2(parameters.outputSize[0]/2);
hipLaunchKernelGGL(( CONV_DW3x3_HALF2_OPT), dim3(block_num), dim3(threads_num), 0, 0, (const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter,
(const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow2, d_oh);
checkKernelErrors;
return NO_ERROR;
}
if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) {
hipLaunchKernelGGL(( CONV_DW_HALF2_OPT), dim3(block_num), dim3(threads_num), 0, 0, (const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter,
(const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);//_HALF_OPT
checkKernelErrors;
} else {
hipLaunchKernelGGL(( CONV_DW), dim3(block_num), dim3(threads_num), 0, 0, (const half*)inputs[0]->deviceId(), (const half*)mResource->mFilter,
(const half*)mResource->mBias, (half*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);
checkKernelErrors;
}
return NO_ERROR;
}
if (inputs.size() == 1) {
// block_num = runtime->blocks_num(mTotalCount);
// threads_num = runtime->threads_num();
if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) {
hipLaunchKernelGGL(( CONV_DW_OPT), dim3(block_num), dim3(threads_num), 0, 0, (const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter,
(const half*)mResource->mBias, (float*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);
checkKernelErrors;
} else {
hipLaunchKernelGGL(( CONV_DW), dim3(block_num), dim3(threads_num), 0, 0, (const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter,
(const half*)mResource->mBias, (float*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);
checkKernelErrors;
}
}
return NO_ERROR;
}
class ConvDepthWiseExecutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (inputs.size() > 1) {
return nullptr;
}
auto res = _makeResource(op, backend);
if (nullptr == res) {
return nullptr;
}
return new ConvDepthWiseExecution(op, backend, res);
}
};
static CUDACreatorRegister<ConvDepthWiseExecutionCreator> __init(OpType_ConvolutionDepthwise);
}
} | 324548e3cb857c05f632edd5739ac5d0c561ea9f.cu | #include "ConvDepthWiseExecution.hpp"
#include "core/ConvolutionCommon.hpp"
#include "Raster.cuh"
#include <float.h>
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
template<typename T>
__global__ void CONV_DW(const T* input,
const half* kernel,
const half* bias,
T *output,
const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER;
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
int kw = uConstant->kernelSize[0];
int kh = uConstant->kernelSize[1];
int dw = uConstant->dilate[0];
int dh = uConstant->dilate[1];
int sw = uConstant->stride[0];
int sh = uConstant->stride[1];
int pw = uConstant->pad[0];
int ph = uConstant->pad[1];
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total/2; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2 << 1;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
float color0 = bias[oz];
float color1 = bias[oz+1];
int fxSta = max(0, (UP_DIV(-ix, dw)));
int fySta = max(0, (UP_DIV(-iy, dh)));
int fxEnd = min(kw, UP_DIV(iw - ix, dw));
int fyEnd = min(kh, UP_DIV(ih - iy, dh));
int fx, fy, fz;
for (fy=fySta; fy<fyEnd; ++fy) {
int sy = fy*dh + iy;
for (fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx*dw + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
float inp0 = input[src_offset];
float inp1 = input[src_offset+1];
float ker0 = kernel[(fy * kw + fx) * c_p + oz];
float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1];
color0 = color0 + inp0 * ker0;
color1 = color1 + inp1 * ker1;
}
}
color0 = max(color0, minV);
color0 = min(color0, maxV);
color1 = max(color1, minV);
color1 = min(color1, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color0;
output[dst_offset+1] = color1;
}
}
__global__ void CONV_DW_HALF2_OPT(const half2* input,
const half2* kernel,
const half2* bias,
half2 *output,
const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER / 2;
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
int kw = uConstant->kernelSize[0];
int kh = uConstant->kernelSize[1];
int sw = uConstant->stride[0];
int sh = uConstant->stride[1];
int pw = uConstant->pad[0];
int ph = uConstant->pad[1];
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total/2; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
half2 color = bias[oz];
int fxSta = max(0, -ix);
int fySta = max(0, -iy);
int fxEnd = min(kw, iw - ix);
int fyEnd = min(kh, ih - iy);
int fx, fy, fz;
for (fy=fySta; fy<fyEnd; ++fy) {
int sy = fy + iy;
for (fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
half2 inp = input[src_offset];
half2 ker = kernel[(fy * kw + fx) * c_p + oz];
color = __hfma2(inp, ker, color);
}
}
color.x = max(color.x, minV);
color.x = min(color.x, maxV);
color.y = max(color.y, minV);
color.y = min(color.y, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color;
}
}
__global__ void CONV_DW3x3_HALF2_OPT(const half2* input,
const half2* kernel,
const half2* bias,
half2 *output,
const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER / 2;
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total/4; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox_2, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox_2);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2;
int ox = ox_2 << 1;
int ix = ox - 1;
int iy = oy - 1;
half2 color0 = bias[oz];
half2 color1 = color0;
half2 zero;
zero.x = (half)0.0;
zero.y = (half)0.0;
half2 inp[12];
half2 ker[3][3];
for(int j=0; j<3; j++) {
if(iy < 0 && j==0) {
for(int i=0; i<4; i++) {
inp[i] = zero;
}
continue;
}
if(iy+2 > ih-1 && j==2) {
for(int i=0; i<4; i++) {
inp[8+i] = zero;
}
continue;
}
for(int i=0; i<4; i++) {
if(ix < 0 && i==0) {
for(int j=0; j<3; j++) {
inp[4*j+0] = zero;
}
continue;
}
if(ix+3 > iw-1 && i==3) {
for(int j=0; j<3; j++) {
inp[4*j+3] = zero;
}
continue;
}
int src_offset = ((ob * ih + iy+j) * iw + ix+i) * c_p + oz;
inp[4*j+i] = input[src_offset];
}
}
for(int j=0; j<3; j++) {
for(int i=0; i<3; i++) {
ker[j][i] = kernel[(j * 3 + i) * c_p + oz];
}
}
for(int j=0; j<3; j++) {
for(int i=0; i<3; i++) {
color0 = __hfma2(inp[4*j+i], ker[j][i], color0);
color1 = __hfma2(inp[4*j+i+1], ker[j][i], color1);
}
}
color0.x = max(color0.x, minV);
color0.x = min(color0.x, maxV);
color0.y = max(color0.y, minV);
color0.y = min(color0.y, maxV);
color1.x = max(color1.x, minV);
color1.x = min(color1.x, maxV);
color1.y = max(color1.y, minV);
color1.y = min(color1.y, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color0;
output[dst_offset+c_p] = color1;
}
}
__global__ void CONV_DW_OPT(const float* input, const half* kernel, const half* bias, float *output, const constBuffer* uConstant,
DivModFast d_oc,
DivModFast d_ow,
DivModFast d_oh
) {
float maxV = uConstant->maxValue;
float minV = uConstant->minValue;
int iw = uConstant->inputSize[0];
int ih = uConstant->inputSize[1];
int ow = uConstant->outputSize[0];
int oh = uConstant->outputSize[1];
int kw = uConstant->kernelSize[0];
int kh = uConstant->kernelSize[1];
int sw = uConstant->stride[0];
int sh = uConstant->stride[1];
int pw = uConstant->pad[0];
int ph = uConstant->pad[1];
int c = uConstant->channel;
int c_p = c * PACK_NUMBER;
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < uConstant->total / 2; index += blockDim.x * gridDim.x) {
int oz_2, tmp2, oy, ox, tmp1, ob;
d_oc.divmod(index, tmp1, oz_2);
d_ow.divmod(tmp1, tmp2, ox);
d_oh.divmod(tmp2, ob, oy);
int oz = oz_2 << 1;
int ix = ox * sw - pw;
int iy = oy * sh - ph;
float color0 = bias[oz];
float color1 = bias[oz+1];
int fxSta = max(0, -ix);
int fySta = max(0, -iy);
int fxEnd = min(kw, iw - ix);
int fyEnd = min(kh, ih - iy);
int fx, fy, fz;
for (fy=fySta; fy<fyEnd; ++fy) {
int sy = fy + iy;
for (fx=fxSta; fx<fxEnd; ++fx) {
int sx = fx + ix;
int src_offset = ((ob * ih + sy) * iw + sx) * c_p + oz;
float inp0 = input[src_offset];
float inp1 = input[src_offset+1];
float ker0 = kernel[(fy * kw + fx) * c_p + oz];
float ker1 = kernel[(fy * kw + fx) * c_p + oz + 1];
color0 = color0 + inp0 * ker0;
color1 = color1 + inp1 * ker1;
}
}
color0 = max(color0, minV);
color0 = min(color0, maxV);
color1 = max(color1, minV);
color1 = min(color1, maxV);
int dst_offset = ((ob * oh + oy) * ow + ox) * c_p + oz;
output[dst_offset] = color0;
output[dst_offset+1] = color1;
}
}
static std::shared_ptr<ConvDepthWiseExecution::Resource> _makeResource(const Op* op, Backend* bn) {
std::shared_ptr<ConvDepthWiseExecution::Resource> res(new ConvDepthWiseExecution::Resource);
auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool();
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto convCommon = conv->common();
int kernelX = convCommon->kernelX();
int kernelY = convCommon->kernelY();
int depth = convCommon->outputCount();
int depthC = UP_DIV(depth, PACK_NUMBER);
res->weightTensor.reset(Tensor::createDevice<float>({kernelX * kernelY * depthC * PACK_NUMBER}));
bool success = bn->onAcquireBuffer(res->weightTensor.get(), Backend::STATIC);
if (!success) {
return nullptr;
}
res->mFilter = (void *)res->weightTensor.get()->buffer().device;
FuseRegion reg;
int offset[8 * PACK_NUMBER];
auto regionStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(FuseRegion));
auto offsetGpuStorage = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(offset));
auto offsetGpu = (uint8_t*)offsetGpuStorage.first + offsetGpuStorage.second;
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
auto tempWeightStorage = pool->alloc(depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float));
auto tempWeight = (uint8_t*)tempWeightStorage.first + tempWeightStorage.second;
cuda_check(cudaMemset(tempWeight, 0, depthC * PACK_NUMBER * kernelY * kernelX * sizeof(float)));
cuda_check(cudaMemcpy(tempWeight, filterDataPtr, weightSize*sizeof(float), cudaMemcpyHostToDevice));
reg.size[0] = 1;
reg.size[1] = kernelY * kernelX;
reg.size[2] = depthC * PACK_NUMBER;
reg.srcStride[0] = 0;
reg.srcStride[1] = 1;
reg.srcStride[2] = kernelY * kernelX;
reg.dstStride[0] = 0;
reg.dstStride[1] = depthC * PACK_NUMBER;
reg.dstStride[2] = 1;
offset[0] = 1;
offset[1] = kernelY * kernelX;
offset[2] = depth;
offset[3] = 0;
offset[4] = 1;
offset[5] = reg.size[1];
offset[6] = reg.size[2];
offset[7] = 0;
reg.fuseNumber = 1;
runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, ®, sizeof(FuseRegion), MNNMemcpyHostToDevice, true);
runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true);
FuseRasterBlitFloatToHalf((uint8_t*)res->mFilter, (uint8_t*)tempWeight, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime);
pool->free(tempWeightStorage);
res->biasTensor.reset(Tensor::createDevice<float>({depthC * PACK_NUMBER}));
success = bn->onAcquireBuffer(res->biasTensor.get(), Backend::STATIC);
res->mBias = (void *)res->biasTensor.get()->buffer().device;
if (!success) {
return nullptr;
}
if(conv->bias() != nullptr) {
auto tempBiasStorage = pool->alloc(depth * sizeof(float));
auto tempBias = (uint8_t*)tempBiasStorage.first + tempBiasStorage.second;
cuda_check(cudaMemcpy(tempBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), cudaMemcpyHostToDevice));
reg.size[0] = 1;
reg.size[1] = 1;
reg.size[2] = depthC * PACK_NUMBER;
reg.srcStride[0] = 0;
reg.srcStride[1] = 0;
reg.srcStride[2] = 1;
reg.dstStride[0] = 0;
reg.dstStride[1] = 0;
reg.dstStride[2] = 1;
offset[0] = 1;
offset[1] = 1;
offset[2] = conv->bias()->size();
offset[3] = 0;
offset[4] = 1;
offset[5] = 1;
offset[6] = reg.size[2];
offset[7] = 0;
reg.fuseNumber = 1;
runtime->memcpy((uint8_t*)regionStorage.first + regionStorage.second, ®, sizeof(FuseRegion), MNNMemcpyHostToDevice, true);
runtime->memcpy(offsetGpu, offset, 8 * sizeof(int), MNNMemcpyHostToDevice, true);
FuseRasterBlitFloatToHalf((uint8_t*)res->mBias, (uint8_t*)tempBias, (FuseRegion*)((uint8_t*)regionStorage.first + regionStorage.second), offsetGpu, runtime);
pool->free(tempBiasStorage);
}
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(regionStorage);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(offsetGpuStorage);
return res;
}
ConvDepthWiseExecution::ConvDepthWiseExecution(const Op* op, Backend* bn, std::shared_ptr<Resource> resource) : Execution(bn) {
mOp = op;
mResource = resource;
auto pool = static_cast<CUDABackend*>(bn)->getStaticBufferPool();
mConstBuffer = pool->alloc(sizeof(constBuffer));
}
ConvDepthWiseExecution::~ ConvDepthWiseExecution() {
auto pool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
pool->free(mConstBuffer);
}
ErrorCode ConvDepthWiseExecution::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto pad = ConvolutionCommon::convolutionPad(inputs[0], outputs[0], mOp->main_as_Convolution2D()->common());
auto conv = mOp->main_as_Convolution2D();
auto convCommon = mOp->main_as_Convolution2D()->common();
int channel = inputs[0]->channel();
int channelDiv = UP_DIV(channel, PACK_NUMBER);
parameters.pad[0] = pad.first;
parameters.pad[1] = pad.second;
parameters.kernelSize[0] = convCommon->kernelX();
parameters.kernelSize[1] = convCommon->kernelY();
parameters.stride[0] = convCommon->strideX();
parameters.stride[1] = convCommon->strideY();
parameters.dilate[0] = convCommon->dilateX();
parameters.dilate[1] = convCommon->dilateY();
parameters.inputSize[0] = inputs[0]->width();
parameters.inputSize[1] = inputs[0]->height();
parameters.channel = channelDiv;
parameters.outputSize[0] = outputs[0]->width();
parameters.outputSize[1] = outputs[0]->height();
parameters.batch = inputs[0]->batch();
parameters.total = parameters.batch * parameters.outputSize[1] * parameters.outputSize[0] * parameters.channel * PACK_NUMBER;
if (static_cast<CUDABackend*>(backend())->useFp16()) {
// Do nothing
} else {
parameters.minValue = -FLT_MAX;
parameters.maxValue = FLT_MAX;
}
if (convCommon->relu()) {
parameters.minValue = 0.0f;
}
if (convCommon->relu6()) {
parameters.minValue = 0.0f;
parameters.maxValue = 6.0f;
}
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
runtime->memcpy((uint8_t*)mConstBuffer.first + mConstBuffer.second, ¶meters, sizeof(constBuffer), MNNMemcpyHostToDevice);
mTotalCount = parameters.total;
//MNN_PRINT("%d-%d-%d-%d, %d-%d-%d-%d-%d\n", parameters.kernelSize[0], parameters.kernelSize[1], parameters.stride[0], parameters.stride[1], parameters.inputSize[0], parameters.inputSize[1], channel, parameters.outputSize[0], parameters.outputSize[1]);
return NO_ERROR;
}
ErrorCode ConvDepthWiseExecution::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto& prop = runtime->prop();
int limitThreads = UP_DIV(mTotalCount, prop.multiProcessorCount);
int threads_num = ALIMIN(prop.maxThreadsPerBlock/2, limitThreads);
int block_num = prop.multiProcessorCount;
auto constPtr = (uint8_t*)mConstBuffer.first + mConstBuffer.second;
DivModFast d_oc(parameters.channel * PACK_NUMBER / 2);
DivModFast d_ow(parameters.outputSize[0]);
DivModFast d_oh(parameters.outputSize[1]);
if (static_cast<CUDABackend*>(backend())->useFp16()) {
if(parameters.kernelSize[0]==3 && parameters.kernelSize[1]==3 && parameters.stride[0]==1 && parameters.stride[1]==1 && parameters.pad[0]==1 && parameters.pad[1]==1 && parameters.outputSize[0] % 2 ==0) {
DivModFast d_ow2(parameters.outputSize[0]/2);
CONV_DW3x3_HALF2_OPT<<<block_num, threads_num>>>((const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter,
(const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow2, d_oh);
checkKernelErrors;
return NO_ERROR;
}
if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) {
CONV_DW_HALF2_OPT<<<block_num, threads_num>>>((const half2*)inputs[0]->deviceId(), (const half2*)mResource->mFilter,
(const half2*)mResource->mBias, (half2*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);//_HALF_OPT
checkKernelErrors;
} else {
CONV_DW<<<block_num, threads_num>>>((const half*)inputs[0]->deviceId(), (const half*)mResource->mFilter,
(const half*)mResource->mBias, (half*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);
checkKernelErrors;
}
return NO_ERROR;
}
if (inputs.size() == 1) {
// block_num = runtime->blocks_num(mTotalCount);
// threads_num = runtime->threads_num();
if(parameters.dilate[0] == 1 && parameters.dilate[1] == 1) {
CONV_DW_OPT<<<block_num, threads_num>>>((const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter,
(const half*)mResource->mBias, (float*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);
checkKernelErrors;
} else {
CONV_DW<<<block_num, threads_num>>>((const float*)inputs[0]->deviceId(), (const half*)mResource->mFilter,
(const half*)mResource->mBias, (float*)outputs[0]->deviceId(), (const constBuffer*)(constPtr),
d_oc, d_ow, d_oh);
checkKernelErrors;
}
}
return NO_ERROR;
}
class ConvDepthWiseExecutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (inputs.size() > 1) {
return nullptr;
}
auto res = _makeResource(op, backend);
if (nullptr == res) {
return nullptr;
}
return new ConvDepthWiseExecution(op, backend, res);
}
};
static CUDACreatorRegister<ConvDepthWiseExecutionCreator> __init(OpType_ConvolutionDepthwise);
}
} |
8cb19c0411ea691ffb8633fea8c9a5a41717051a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <cstdio>
#include <math.h>
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
__global__ void vector_add(const float *a,
const float *b,
float *c,
const size_t n){
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
// avoid accessing out of bounds elements
if(i < n)
{
// sum elements
c[i] = a[i] + b[i];
}
}
int main(void)
{
// create arrays of 1M elements
int num_elements = 0 ;
printf("Enter number of elements to add");
scanf("%d", &num_elements);
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(float);
// points to host & device arrays
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
// hipMalloc the device arrays
hipMalloc((void**)&device_array_a, num_bytes);
hipMalloc((void**)&device_array_b, num_bytes);
hipMalloc((void**)&device_array_c, num_bytes);
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i)
{
// make array a a linear ramp
host_array_a[i] = (float)i;
// make array b random
host_array_b[i] = (float)rand() / RAND_MAX;
}
// copy arrays a & b to the device memory space
hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice);
hipMemcpy(device_array_b, host_array_b, num_bytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vector_add) , dim3(ceil(num_elements/32.0)), dim3(32), 0, 0, device_array_a, device_array_b, device_array_c, num_elements);
hipMemcpy(host_array_c, device_array_c, num_bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < num_elements; ++i)
{
printf("result %d: %1.1f + %7.1f = %7.1f\n", i, host_array_a[i], host_array_b[i], host_array_c[i]);
}
// deallocate memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
hipFree(device_array_a);
hipFree(device_array_b);
hipFree(device_array_c);
}
| 8cb19c0411ea691ffb8633fea8c9a5a41717051a.cu | #include <stdlib.h>
#include <cstdio>
#include <math.h>
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
__global__ void vector_add(const float *a,
const float *b,
float *c,
const size_t n){
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
// avoid accessing out of bounds elements
if(i < n)
{
// sum elements
c[i] = a[i] + b[i];
}
}
int main(void)
{
// create arrays of 1M elements
int num_elements = 0 ;
printf("Enter number of elements to add");
scanf("%d", &num_elements);
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(float);
// points to host & device arrays
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
// cudaMalloc the device arrays
cudaMalloc((void**)&device_array_a, num_bytes);
cudaMalloc((void**)&device_array_b, num_bytes);
cudaMalloc((void**)&device_array_c, num_bytes);
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i)
{
// make array a a linear ramp
host_array_a[i] = (float)i;
// make array b random
host_array_b[i] = (float)rand() / RAND_MAX;
}
// copy arrays a & b to the device memory space
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_array_b, host_array_b, num_bytes, cudaMemcpyHostToDevice);
vector_add <<< ceil(num_elements/32.0), 32>>>(device_array_a, device_array_b, device_array_c, num_elements);
cudaMemcpy(host_array_c, device_array_c, num_bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < num_elements; ++i)
{
printf("result %d: %1.1f + %7.1f = %7.1f\n", i, host_array_a[i], host_array_b[i], host_array_c[i]);
}
// deallocate memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
cudaFree(device_array_a);
cudaFree(device_array_b);
cudaFree(device_array_c);
}
|
cc6936e5fc1ace1df1556b663e063d684ffee132.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CUDACONV2_EXPORT
#define _CUDACONV2_EXPORT
#endif
#include <cudaconv2.cuh>
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart,
const int moduleStride, const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image, also sample
* In essence, blockIdx.y.x = 1..numRegions
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
__shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegions = numRegionsX * numRegionsX;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int overSample = gridDim.y / numRegions;
const int blockSample = blockIdx.y / numRegions;
const int groupsPerSample = numGroups / overSample;
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockRegionIdx = blockIdx.y % numRegions;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image, sample idx.
* In essence, blockIdx.y.x = 1..imgPixels
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
* numFilterColors*numGroups must be divisible by numImgColors.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
__shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesY * numModulesX;
const int overSample = gridDim.y / imgPixels;
const int blockSample = blockIdx.y / imgPixels;
const int groupsPerSample = numGroups / overSample;
// const int overSample = (numFilterColors * numGroups) / numImgColors;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
// const int filterColorsPerSample = numFilterColors / overSample;
const int blockPixelIdx = blockIdx.y % imgPixels;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
if (tidx < colorsPerThread * B_Y) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt((double)filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesY * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
// assert changed into if statement by Ian Goodfellow
if (paddingStart + (numModulesX-1)*moduleStride + filterSize < imgSizeX)
{
printf("imgSizeX: %d\n", imgSizeX);
printf("Bound on image size: %d\n", paddingStart + (numModulesX-1)*moduleStride+filterSize);
printf("paddingStart: %d\n", paddingStart);
printf("numModulesX: %d\n", numModulesX);
printf("moduleStride: %d\n", moduleStride);
printf("filterSize: %d\n", filterSize);
assert(false);
}
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( conv_img_acts_manycolor<4, 32, 1, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor<2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<8, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<4, 1, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<4, 3, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<4, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
hipFuncSetCacheConfig(img_acts_color<2, 1, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 1, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
hipFuncSetCacheConfig(img_acts_color<2, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
hipFuncSetCacheConfig(img_acts_color<2, 3, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_color<2, 3, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* where overSample := (numFilterColors * numGroups) / numImgColors
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
// int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt((double)filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
int overSample = (numFilterColors * numGroups) / numImgColors;
assert(numImgColors % numFilterColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numGroups > 1);
assert(numFilterColors > 3 && numFilterColors % 2 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesY * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread;
int imgsPerThread;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels);
} else if (numFilterColors > 3) {
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*imgsPerThread) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(overSample*numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == overSample * numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, true, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, true>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, false, true>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
} else {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, false, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<4, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, true, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 4, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
hipFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, false>, hipFuncCachePreferShared);
hipLaunchKernelGGL(( img_acts_mediumcolor_sparse_rand<2, 2, true, false, false>), dim3(blocks), dim3(threads), 0, 0, hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
}
cutilCheckMsg("imgActsSparse: kernel execution failed");
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true);
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
| cc6936e5fc1ace1df1556b663e063d684ffee132.cu | /*
* Copyright (c) 2011, Alex Krizhevsky ([email protected])
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CUDACONV2_EXPORT
#define _CUDACONV2_EXPORT
#endif
#include <cudaconv2.cuh>
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread.
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numColors, filterPixels, numFilters) otherwise
* targets: (numColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* Number of filters must be divisible by 16.
* Number of images must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*/
template <int imgsPerThread, int numColors, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_color(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX,
const int paddingStart, const int moduleStride,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[numColors*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int blockCaseIdx = blockIdx.x * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const int numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeX * imgSizeY;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + loadY * numImages * numModules + loadX;
filters += threadIdx.x;
targets += pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[numColors][imgsPerThread];
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFilters; f += 16) { // multiply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + i + loadX < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[(moduleIdx * numColors * filterPixels + pxIdxInModule) * numFilters + f];
#pragma unroll
for (int c = 0; c < numColors; c++) {
shilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numImageColors/numGroups must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart,
const int moduleStride, const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int blockRegionIdx = blockIdx.y;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += imgColorIdx * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleTargets * targets[c * imgPixels * numImages + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * imgPixels * numImages + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image.
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (numImageColors, imgSizeY, imgSizeX, numImages)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void conv_img_acts_manycolor(const float* hidActs, const float* filters, float* targets,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y*colorsPerThread; // color idx globally
const int numFilterColors = numImgColors / numGroups;
const int blockGroupIdx = imgColorIdx / numFilterColors;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockPixelIdx = blockIdx.y;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesY * numModulesX;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += (imgColorIdx + threadIdx.y) * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleTargets * targets[c * B_Y * imgPixels * numImages + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[c * B_Y * imgPixels * numImages + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* Block size: 16x16.
* blockIdx.x determines case in batches of 16*imgsPerThread, also color in batches of colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(16*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/colorsPerThread
* blockIdx.y determines 4x4 image region in target image, also sample
* In essence, blockIdx.y.x = 1..numRegions
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines pixel.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one 4x4 pixels from 16*imgsPerThread cases.
*
* numImages must be divisible by 16*imgsPerThread if checkCaseBounds is false.
* 16 * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by colorsPerThread.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are 4-16 color channels.
*/
template <int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_mediumcolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*16][16 + 1];
__shared__ float shHidActs[16][16*imgsPerThread];
__shared__ int shColors[colorsPerThread]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,16*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * 16*imgsPerThread;
const int numRegionsX = DIVUP(imgSizeX, 4);
const int numRegions = numRegionsX * numRegionsX;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * colorsPerThread; // color idx globally
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int overSample = gridDim.y / numRegions;
const int blockSample = blockIdx.y / numRegions;
const int groupsPerSample = numGroups / overSample;
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
const int blockRegionIdx = blockIdx.y % numRegions;
const int blockRegionIdxX = blockRegionIdx % numRegionsX;
const int blockRegionIdxY = blockRegionIdx / numRegionsX;
const int blockRegionLeft = blockRegionIdxX * 4;
const int blockRegionTop = blockRegionIdxY * 4;
const int pxYInRegion = threadIdx.y / 4, pxXInRegion = threadIdx.y % 4;
const int pxY = blockRegionTop + pxYInRegion;
const int pxX = blockRegionLeft + pxXInRegion;
const int pxIdx = pxY * imgSizeX + pxX;
const bool isPxInImg = pxY < imgSizeY && pxX < imgSizeX;
const uint numModules = numModulesY * numModulesX;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * 16 + threadIdx.x;
const int loadY = tidx / 32, loadX = tidx % 32;
hidActs += blockCaseIdx + (blockFilterIdx + loadY) * numImages * numModules + loadX;
filters += blockFilterIdx + filterColorIdx * filterPixels * numFilters + threadIdx.x;
targets += blockSample * numImgColors * imgPixels * numImages + pxIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockRegionTop - paddingStart < filterSize ? 0
: 1 + (blockRegionTop - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockRegionTop + 3 - paddingStart) / moduleStride);
const int startX = blockRegionLeft - paddingStart < filterSize ? 0
: 1 + (blockRegionLeft - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockRegionLeft + 3 - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[threadIdx.y][threadIdx.x];
float* shHidActLoad = &shHidActs[loadY][loadX];
if (tidx < colorsPerThread) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInModuleY = pxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInModuleX = pxX - moduleLeft;
const bool isPxInModule = pxInModuleY >= 0 && pxInModuleY < filterSize && pxInModuleX >= 0 && pxInModuleX < filterSize;
const int pxIdxInModule = pxInModuleY * filterSize + pxInModuleX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multipply with 16 filters at a time
// Now the threads split up into half-warps, and each half-warp decides if it's interested.
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * 16; i += 32) {
if (!checkCaseBounds || blockCaseIdx + loadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += 8) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * 16 * imgsPerThread + i] = 0;
}
}
}
if (isPxInImg && isPxInModule) {
// This half-warp is interested, so it's going to load the weights from this module to its pixel.
// Not fully coalesced read :(
// But taking out this read entirely only reduces the runtime by ~2.8%, so it isn't costing me much.
const float* fLoad = conv ? &filters[pxIdxInModule * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInModule * numFilters + f];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
shFilterLoad[c * 16 * (16 + 1)] = fLoad[c * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
if (isPxInImg && isPxInModule) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[threadIdx.y + c * 16][w] * shHidActs[w][threadIdx.x + i * 16];
}
}
}
}
__syncthreads();
}
}
}
// Not fully coalesced write :(... shmem (and fully coalesced) version is actually slightly slower, though
if (isPxInImg) {
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleTargets * targets[shColors[c] + i * 16] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * 16 < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c] + i * 16] = scaleOutputs * prod[c][i];
}
}
}
}
}
}
/*
* Block size: B_YxB_X.
* blockIdx.x determines case in batches of B_X*imgsPerThread, also color in batches of B_Y*colorsPerThread.
* In essence, blockIdx.x.x = 1..numImages/(B_X*imgsPerThread)
* blockIdx.x.y = 1..numImgColors/(B_Y*colorsPerThread)
* blockIdx.y determines image pixel in target image, sample idx.
* In essence, blockIdx.y.x = 1..imgPixels
* blockIdx.y.y = 1..overSample
*
* threadIdx.x determines case.
* threadIdx.y determines color.
*
* overSample := numFilterColors*numGroups/numImgColors
* ^ this is the number of groups that each color channel is connected to
*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages)
*
* colorIndices: (numGroups, numFilterColors)
*
* Each block reconstructs one B_Y*colorsPerThread colors from 1 pixel from B_X*imgsPerThread cases.
*
* numImages must be divisible by B_X*imgsPerThread if checkCaseBounds is false.
* numFiltersPerGroup must be divisible by 16.
* numFilterColors*numGroups must be divisible by numImgColors.
*
* B_X * imgsPerThread must be divisible by 32.
* numFilterColors must be divisible by B_Y*colorsPerThread.
* B_X*B_Y must be divisible by 32.
*
* This version loads 32 cases at a time, so it gets full coalescing on that load.
* It only loads 16 weights at a time, so those aren't fully coalesced.
* This version conserves shared memory by loading 16 filters at a time rather than 32.
*
* To be used when there are >= 16 color channels.
*/
template <int B_Y, int B_X, int imgsPerThread, int colorsPerThread, bool scale, bool checkCaseBounds, bool conv>
__global__ void img_acts_manycolor_sparse_rand(const float* hidActs, const float* filters, float* targets, int* colorIndices,
const int numModulesY, const int numModulesX, const int numImages, const int numFilters,
const int filterSize, const int imgSizeY, const int imgSizeX, const int paddingStart, const int moduleStride,
const int numImgColors, const int numFilterColors, const int numGroups,
const float scaleTargets, const float scaleOutputs) {
__shared__ float shFilters[colorsPerThread*B_Y][16 + 1]; // TODO: perhaps reconsider this 16
__shared__ float shHidActs[16][B_X*imgsPerThread];
__shared__ int shColors[colorsPerThread * B_Y]; // not really necessary -- can repurpose the other shmems
const int numImgBlocks = DIVUP(numImages,B_X*imgsPerThread);
const int blockCaseIdx = (blockIdx.x % numImgBlocks) * B_X*imgsPerThread;
const int filterPixels = filterSize * filterSize;
const int imgPixels = imgSizeY * imgSizeX;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int hidActLoadY = tidx / 32, hidActLoadX = tidx % 32;
const int filtersLoadY = tidx / 16, filtersLoadX = tidx % 16;
const int numModules = numModulesY * numModulesX;
const int overSample = gridDim.y / imgPixels;
const int blockSample = blockIdx.y / imgPixels;
const int groupsPerSample = numGroups / overSample;
// const int overSample = (numFilterColors * numGroups) / numImgColors;
const int imgColorIdx = (blockIdx.x / numImgBlocks) * B_Y * colorsPerThread; // color idx globally
const int blockGroupIdx = imgColorIdx / numFilterColors + blockSample * groupsPerSample;
// const int filterColorsPerSample = numFilterColors / overSample;
const int blockPixelIdx = blockIdx.y % imgPixels;
const int blockPixelIdxX = blockPixelIdx % imgSizeX;
const int blockPixelIdxY = blockPixelIdx / imgSizeX;
const int filterColorIdx = imgColorIdx % numFilterColors; // color idx within group
const int numFiltersPerGroup = numFilters / numGroups;
const int blockFilterIdx = blockGroupIdx * numFiltersPerGroup;
hidActs += blockCaseIdx + (blockFilterIdx + hidActLoadY) * numImages * numModules + hidActLoadX;
filters += blockFilterIdx + (filterColorIdx + filtersLoadY) * filterPixels * numFilters + filtersLoadX;
targets += blockSample * numImgColors * imgPixels * numImages + blockPixelIdx * numImages + blockCaseIdx + threadIdx.x;
float prod[colorsPerThread][imgsPerThread];
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] = 0;
}
}
const int startY = blockPixelIdxY - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxY - paddingStart - filterSize) / moduleStride;
const int endY = MIN(numModulesY, 1 + (blockPixelIdxY - paddingStart) / moduleStride);
const int startX = blockPixelIdxX - paddingStart < filterSize ? 0
: 1 + (blockPixelIdxX - paddingStart - filterSize) / moduleStride;
const int endX = MIN(numModulesX, 1 + (blockPixelIdxX - paddingStart) / moduleStride);
float* shFilterLoad = &shFilters[filtersLoadY][filtersLoadX];
float* shHidActLoad = &shHidActs[hidActLoadY][hidActLoadX];
if (tidx < colorsPerThread * B_Y) {
shColors[tidx] = colorIndices[blockGroupIdx * numFilterColors + filterColorIdx + tidx] * imgPixels * numImages;
}
for (int my = startY; my < endY; my++) {
const int moduleTop = paddingStart + my * moduleStride;
const int pxInFilterY = blockPixelIdxY - moduleTop;
for (int mx = startX; mx < endX; mx++) {
const int moduleIdx = my * numModulesX + mx;
const int moduleLeft = paddingStart + mx * moduleStride;
const int pxInFilterX = blockPixelIdxX - moduleLeft;
const int pxIdxInFilter = pxInFilterY * filterSize + pxInFilterX;
for (int f = 0; f < numFiltersPerGroup; f += 16) { // multiply with 16 filters at a time
const float* hLoad = &hidActs[(moduleIdx + f * numModules) * numImages];
#pragma unroll
for (int i = 0; i < imgsPerThread * B_X; i += 32) {
if (!checkCaseBounds || blockCaseIdx + hidActLoadX + i < numImages) {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = hLoad[j * numModules * numImages + i];
}
} else {
#pragma unroll
for (int j = 0; j < 16; j += B_X*B_Y/32) { // load 16 rows of imgsPerThread*16 cols, 8 * 32 elements at a time.
shHidActLoad[j * B_X * imgsPerThread + i] = 0;
}
}
}
const float* fLoad = conv ? &filters[pxIdxInFilter * numFilters + f]
: &filters[moduleIdx * numFilterColors * filterPixels * numFilters + pxIdxInFilter * numFilters + f];
#pragma unroll
for (int i = 0; i < colorsPerThread*B_Y; i+= B_X*B_Y/16) {
if ((colorsPerThread*B_Y) % (B_X*B_Y/16) == 0 || i + filtersLoadY < colorsPerThread*B_Y) {
shFilterLoad[i * (16 + 1)] = fLoad[i * filterPixels * numFilters];
}
}
__syncthreads();
// Do some actual computation
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
#pragma unroll
for (int w = 0; w < 16; w++) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
prod[c][i] += shFilters[c * B_Y + threadIdx.y][w] * shHidActs[w][threadIdx.x + i * B_X];
}
}
}
__syncthreads();
}
}
}
if (scale) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleTargets * targets[shColors[c * B_Y + threadIdx.y] + i * B_X] + scaleOutputs * prod[c][i];
}
}
}
} else {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkCaseBounds || blockCaseIdx + threadIdx.x + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorsPerThread; c++) {
targets[shColors[c * B_Y + threadIdx.y] + i * B_X] = scaleOutputs * prod[c][i];
}
}
}
}
}
/*
* hidActs: (numFilters, numModules, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModules, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgPixels, numImages)
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numFilterColors = numImgColors / numGroups;
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt((double)filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
assert(numImgColors % numGroups == 0);
assert(numFilters % (16*numGroups) == 0);
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 4 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesY * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
// assert changed into if statement by Ian Goodfellow
if (paddingStart + (numModulesX-1)*moduleStride + filterSize < imgSizeX)
{
printf("imgSizeX: %d\n", imgSizeX);
printf("Bound on image size: %d\n", paddingStart + (numModulesX-1)*moduleStride+filterSize);
printf("paddingStart: %d\n", paddingStart);
printf("numModulesX: %d\n", numModulesX);
printf("moduleStride: %d\n", moduleStride);
printf("filterSize: %d\n", filterSize);
assert(false);
}
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads(16,16);
int colorsPerThread;
int imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), imgPixels);
} else if (numFilterColors > 3) {
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread) * (numImgColors / colorsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
} else {
blocks = dim3(DIVUP(numImages,threads.x*imgsPerThread), DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) { // convolutional units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<4, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<4, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<4, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<4, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<2, 1, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, false, true, true>, cudaFuncCachePreferShared);
img_acts_color<2, 3, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<2, 1, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, false, false, true>, cudaFuncCachePreferShared);
img_acts_color<2, 3, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, true>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<4, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<4, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<4, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<4, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<2, 1, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, true, true, true>, cudaFuncCachePreferShared);
img_acts_color<2, 3, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<2, 1, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, true, false, true>, cudaFuncCachePreferShared);
img_acts_color<2, 3, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
} else { // local, unshared units
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, false, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<4, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<4, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<4, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<4, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<2, 1, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, false, true, false>, cudaFuncCachePreferShared);
img_acts_color<2, 3, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<2, 1, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, false, false, false>, cudaFuncCachePreferShared);
img_acts_color<2, 3, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 4, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 2, 2, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, true, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 4, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(conv_img_acts_manycolor<4, 32, 1, 2, true, false, false>, cudaFuncCachePreferShared);
conv_img_acts_manycolor<4, 32, 1, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<4, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor<2, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<8, 1, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<8, 3, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<8, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<4, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<4, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<4, 1, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<4, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<4, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<4, 3, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<4, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<2, 1, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, true, true, false>, cudaFuncCachePreferShared);
img_acts_color<2, 3, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors == 1) {
cudaFuncSetCacheConfig(img_acts_color<2, 1, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<2, 1, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 2) {
cudaFuncSetCacheConfig(img_acts_color<2, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
} else if (numFilterColors == 3) {
cudaFuncSetCacheConfig(img_acts_color<2, 3, true, false, false>, cudaFuncCachePreferShared);
img_acts_color<2, 3, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(),
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, scaleTargets, scaleOutput);
}
}
}
}
}
}
cutilCheckMsg("imgActs: kernel execution failed");
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, true);
}
void convImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, 0, 1, false);
}
void localImgActs(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActs(hidActs, filters, targets, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutput, false);
}
/*
* hidActs: (numFilters, numModulesY, numModulesX, numImages)
* filters: (numFilterColors, filterPixels, numFilters) if conv
* (numModulesY, numModulesX, numFilterColors, filterPixels, numFilters) otherwise
* targets: (overSample, numImgColors, imgSizeY, imgSizeX, numImages)
* colorIndices: (numGroups, numFilterColors)
*
* where overSample := (numFilterColors * numGroups) / numImgColors
*
* Note: all of these convolution routines are optimized for the case when
* the number of images (i.e. the minibatch size) is a multiple of 128.
* Other batch sizes will work, but but I made no attempt whatsoever
* to make them work fast.
*/
void _imgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride,
int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput, bool conv) {
int numImages = hidActs.getNumCols();
int numFilters = filters.getNumCols();
// int numFiltersPerGroup = numFilters / numGroups;
int numModules = hidActs.getNumRows() / numFilters;
int filterModuleMult = conv ? 1 : numModules;
int filterPixels = filters.getNumRows() / (filterModuleMult * numFilterColors);
int filterSize = sqrt((double)filterPixels);
int imgPixels = imgSizeY * imgSizeX;
int numModulesX = numModules / numModulesY;
int overSample = (numFilterColors * numGroups) / numImgColors;
assert(numImgColors % numFilterColors == 0);
assert(numFilters % (16*numGroups) == 0);
assert((numFilterColors * numGroups) % numImgColors == 0);
assert(numGroups > 1);
assert(numFilterColors > 3 && numFilterColors % 2 == 0);
assert(filterPixels == filterSize * filterSize);
assert(hidActs.getNumRows() == numModules * numFilters);
assert(filters.getNumRows() == filterModuleMult * numFilterColors * filterPixels);
assert(numModules == numModulesY * numModulesX);
assert(hidActs.isContiguous());
assert(filters.isContiguous());
assert(!hidActs.isTrans());
assert(!filters.isTrans());
assert(!targets.isTrans());
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
assert(targets.isContiguous()); // no stride support here!
dim3 blocks;
dim3 threads;
int colorsPerThread;
int imgsPerThread;
if (numFilterColors % 8 == 0) {
threads = dim3(32, 4);
colorsPerThread = numFilterColors % 16 == 0 ? 4 : 2;
imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
assert(numFilterColors % (threads.y * colorsPerThread) == 0);
blocks = dim3(DIVUP(numImages, threads.x*imgsPerThread) * (numImgColors/(threads.y*colorsPerThread)), overSample * imgPixels);
} else if (numFilterColors > 3) {
imgsPerThread = numImages % 128 == 0 ? 8 : numImages % 64 == 0 ? 4 : 2;
threads = dim3(16, 16);
colorsPerThread = numFilterColors % 4 == 0 ? 4 : 2;
blocks = dim3(DIVUP(numImages,16*imgsPerThread) * (numImgColors / colorsPerThread), overSample * DIVUP(imgSizeY,4) * DIVUP(imgSizeX,4));
}
bool checkCaseBounds = numImages % (threads.x * imgsPerThread) != 0;
if (scaleTargets == 0) { // do not scale or use targets matrix
targets.resize(overSample*numImgColors*imgPixels, numImages);
} else {
assert(targets.getNumRows() == overSample * numImgColors * imgPixels);
assert(targets.getNumCols() == numImages);
}
if (conv) {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, false, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, false, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, true, true, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, true>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, true, false, true><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
} else {
if (scaleTargets == 0) { // do not scale or use targets matrix
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, false, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, false, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, false, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
} else { // do scale
if (numFilterColors % 8 == 0) {
if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 2) {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (numFilterColors % 16 == 0) {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_manycolor_sparse_rand<4, 32, 1, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
} else if (numFilterColors > 3) {
if (imgsPerThread == 8) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<8, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<8, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else if (imgsPerThread == 4) {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<4, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<4, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
} else {
if (checkCaseBounds) {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, true, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, true, true, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
} else {
if (colorsPerThread == 4) {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 4, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 4, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
} else {
cudaFuncSetCacheConfig(img_acts_mediumcolor_sparse_rand<2, 2, true, false, false>, cudaFuncCachePreferShared);
img_acts_mediumcolor_sparse_rand<2, 2, true, false, false><<<blocks, threads>>>(hidActs.getDevData(), filters.getDevData(), targets.getDevData(), dColorIndices,
numModulesY, numModulesX, numImages, numFilters, filterSize, imgSizeY, imgSizeX, paddingStart, moduleStride, numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput);
}
}
}
}
}
}
cutilCheckMsg("imgActsSparse: kernel execution failed");
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, true);
}
void convImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, true);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart,
moduleStride, numImgColors, numFilterColors, numGroups, 0, 1, false);
}
void localImgActsSparse(NVMatrix& hidActs, NVMatrix& filters, NVMatrix& targets, int* dColorIndices,
int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numFilterColors, int numGroups,
float scaleTargets, float scaleOutput) {
_imgActsSparse(hidActs, filters, targets, dColorIndices, imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride,
numImgColors, numFilterColors, numGroups, scaleTargets, scaleOutput, false);
}
|
d3e347f1dda7484f5ef2a81f402168c8fcf7dcca.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
#include "THHTensor.hpp"
#include "common.h"
#include "vol2col.h"
#include "TH/THHalf.h"
#include "THHHalfAutoNumerics.cuh"
#include "generic/VolumetricFullDilatedConvolution.cu"
#include "THHGenerateFloatTypes.h"
| d3e347f1dda7484f5ef2a81f402168c8fcf7dcca.cu | #include "THCUNN.h"
#include "THCTensor.hpp"
#include "common.h"
#include "vol2col.h"
#include "TH/THHalf.h"
#include "THCHalfAutoNumerics.cuh"
#include "generic/VolumetricFullDilatedConvolution.cu"
#include "THCGenerateFloatTypes.h"
|
3107b6e6742094c745cb9573e1d725f29c8520f4.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2020-2022 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include "../../../../src/tree/gpu_hist/evaluate_splits.cuh"
#include "../../helpers.h"
#include "../../histogram_helpers.h"
#include "../test_evaluate_splits.h" // TestPartitionBasedSplit
namespace xgboost {
namespace tree {
namespace {
auto ZeroParam() {
auto args = Args{{"min_child_weight", "0"},
{"lambda", "0"}};
TrainParam tparam;
tparam.UpdateAllowUnknown(args);
return tparam;
}
} // anonymous namespace
void TestEvaluateSingleSplit(bool is_categorical) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 0.0};
// Setup gradients so that second feature gets higher gain
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(),
FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types;
if (is_categorical) {
d_feature_types = dh::ToSpan(feature_types);
}
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
d_feature_types,
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator{
tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0};
dh::device_vector<common::CatBitField::value_type> out_cats;
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(),
parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(),
parent_sum.GetHess());
}
TEST(GpuHist, EvaluateSingleSplit) {
TestEvaluateSingleSplit(false);
}
TEST(GpuHist, EvaluateCategoricalSplit) {
TestEvaluateSingleSplit(true);
}
TEST(GpuHist, EvaluateSingleSplitMissing) {
GradientPairPrecise parent_sum(1.0, 1.5);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2};
thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0};
thrust::device_vector<float> feature_min_values = std::vector<float>{0.0};
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{{-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator(tparam, feature_set.size(), 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
EXPECT_EQ(result.dir, kRightDir);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(1.5, 1.0));
}
TEST(GpuHist, EvaluateSingleSplitEmpty) {
TrainParam tparam = ZeroParam();
GPUHistEvaluator<GradientPair> evaluator(tparam, 1, 0);
DeviceSplitCandidate result = evaluator
.EvaluateSingleSplit(EvaluateSplitInputs<GradientPair>{}, 0,
ObjInfo{ObjInfo::kRegression})
.split;
EXPECT_EQ(result.findex, -1);
EXPECT_LT(result.loss_chg, 0.0f);
}
// Feature 0 has a better split, but the algorithm must select feature 1
TEST(GpuHist, EvaluateSingleSplitFeatureSampling) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{
{-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(2, 0);
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(0.5, 0.5));
}
// Features 0 and 1 have identical gain, the algorithm must select 0
TEST(GpuHist, EvaluateSingleSplitBreakTies) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{
{-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(2, 0);
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
}
TEST(GpuHist, EvaluateSplits) {
thrust::device_vector<DeviceSplitCandidate> out_splits(2);
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 0.0};
thrust::device_vector<GradientPair> feature_histogram_left =
std::vector<GradientPair>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
thrust::device_vector<GradientPair> feature_histogram_right =
std::vector<GradientPair>{
{-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
EvaluateSplitInputs<GradientPair> input_left{
1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram_left)};
EvaluateSplitInputs<GradientPair> input_right{
2,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram_right)};
GPUHistEvaluator<GradientPair> evaluator{
tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0};
evaluator.EvaluateSplits(input_left, input_right, ObjInfo{ObjInfo::kRegression},
evaluator.GetEvaluator(), dh::ToSpan(out_splits));
DeviceSplitCandidate result_left = out_splits[0];
EXPECT_EQ(result_left.findex, 1);
EXPECT_EQ(result_left.fvalue, 11.0);
DeviceSplitCandidate result_right = out_splits[1];
EXPECT_EQ(result_right.findex, 0);
EXPECT_EQ(result_right.fvalue, 1.0);
}
TEST_F(TestPartitionBasedSplit, GpuHist) {
dh::device_vector<FeatureType> ft{std::vector<FeatureType>{FeatureType::kCategorical}};
GPUHistEvaluator<GradientPairPrecise> evaluator{param_,
static_cast<bst_feature_t>(info_.num_col_), 0};
cuts_.cut_ptrs_.SetDevice(0);
cuts_.cut_values_.SetDevice(0);
cuts_.min_vals_.SetDevice(0);
ObjInfo task{ObjInfo::kRegression};
evaluator.Reset(cuts_, dh::ToSpan(ft), task, info_.num_col_, param_, 0);
dh::device_vector<GradientPairPrecise> d_hist(hist_[0].size());
auto node_hist = hist_[0];
dh::safe_cuda(hipMemcpy(d_hist.data().get(), node_hist.data(), node_hist.size_bytes(),
hipMemcpyHostToDevice));
dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}};
EvaluateSplitInputs<GradientPairPrecise> input{0,
total_gpair_,
GPUTrainingParam{param_},
dh::ToSpan(feature_set),
dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(),
cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
dh::ToSpan(d_hist)};
auto split = evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
ASSERT_NEAR(split.loss_chg, best_score_, 1e-16);
}
} // namespace tree
} // namespace xgboost
| 3107b6e6742094c745cb9573e1d725f29c8520f4.cu | /*!
* Copyright 2020-2022 by XGBoost contributors
*/
#include <gtest/gtest.h>
#include "../../../../src/tree/gpu_hist/evaluate_splits.cuh"
#include "../../helpers.h"
#include "../../histogram_helpers.h"
#include "../test_evaluate_splits.h" // TestPartitionBasedSplit
namespace xgboost {
namespace tree {
namespace {
auto ZeroParam() {
auto args = Args{{"min_child_weight", "0"},
{"lambda", "0"}};
TrainParam tparam;
tparam.UpdateAllowUnknown(args);
return tparam;
}
} // anonymous namespace
void TestEvaluateSingleSplit(bool is_categorical) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 0.0};
// Setup gradients so that second feature gets higher gain
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
dh::device_vector<FeatureType> feature_types(feature_set.size(),
FeatureType::kCategorical);
common::Span<FeatureType> d_feature_types;
if (is_categorical) {
d_feature_types = dh::ToSpan(feature_types);
}
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
d_feature_types,
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator{
tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0};
dh::device_vector<common::CatBitField::value_type> out_cats;
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(),
parent_sum.GetGrad());
EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(),
parent_sum.GetHess());
}
TEST(GpuHist, EvaluateSingleSplit) {
TestEvaluateSingleSplit(false);
}
TEST(GpuHist, EvaluateCategoricalSplit) {
TestEvaluateSingleSplit(true);
}
TEST(GpuHist, EvaluateSingleSplitMissing) {
GradientPairPrecise parent_sum(1.0, 1.5);
TrainParam tparam = ZeroParam();
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2};
thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0};
thrust::device_vector<float> feature_min_values = std::vector<float>{0.0};
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{{-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator(tparam, feature_set.size(), 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
EXPECT_EQ(result.dir, kRightDir);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(1.5, 1.0));
}
TEST(GpuHist, EvaluateSingleSplitEmpty) {
TrainParam tparam = ZeroParam();
GPUHistEvaluator<GradientPair> evaluator(tparam, 1, 0);
DeviceSplitCandidate result = evaluator
.EvaluateSingleSplit(EvaluateSplitInputs<GradientPair>{}, 0,
ObjInfo{ObjInfo::kRegression})
.split;
EXPECT_EQ(result.findex, -1);
EXPECT_LT(result.loss_chg, 0.0f);
}
// Feature 0 has a better split, but the algorithm must select feature 1
TEST(GpuHist, EvaluateSingleSplitFeatureSampling) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{
{-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(2, 0);
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 1);
EXPECT_EQ(result.fvalue, 11.0);
EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5));
EXPECT_EQ(result.right_sum, GradientPairPrecise(0.5, 0.5));
}
// Features 0 and 1 have identical gain, the algorithm must select 0
TEST(GpuHist, EvaluateSingleSplitBreakTies) {
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 10.0};
thrust::device_vector<GradientPair> feature_histogram =
std::vector<GradientPair>{
{-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(2, 0);
EvaluateSplitInputs<GradientPair> input{1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram)};
GPUHistEvaluator<GradientPair> evaluator(tparam, feature_min_values.size(), 0);
DeviceSplitCandidate result =
evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
EXPECT_EQ(result.findex, 0);
EXPECT_EQ(result.fvalue, 1.0);
}
TEST(GpuHist, EvaluateSplits) {
thrust::device_vector<DeviceSplitCandidate> out_splits(2);
GradientPairPrecise parent_sum(0.0, 1.0);
TrainParam tparam = ZeroParam();
tparam.UpdateAllowUnknown(Args{});
GPUTrainingParam param{tparam};
thrust::device_vector<bst_feature_t> feature_set =
std::vector<bst_feature_t>{0, 1};
thrust::device_vector<uint32_t> feature_segments =
std::vector<bst_row_t>{0, 2, 4};
thrust::device_vector<float> feature_values =
std::vector<float>{1.0, 2.0, 11.0, 12.0};
thrust::device_vector<float> feature_min_values =
std::vector<float>{0.0, 0.0};
thrust::device_vector<GradientPair> feature_histogram_left =
std::vector<GradientPair>{
{-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}};
thrust::device_vector<GradientPair> feature_histogram_right =
std::vector<GradientPair>{
{-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}};
thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0);
EvaluateSplitInputs<GradientPair> input_left{
1,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram_left)};
EvaluateSplitInputs<GradientPair> input_right{
2,
parent_sum,
param,
dh::ToSpan(feature_set),
{},
dh::ToSpan(feature_segments),
dh::ToSpan(feature_values),
dh::ToSpan(feature_min_values),
dh::ToSpan(feature_histogram_right)};
GPUHistEvaluator<GradientPair> evaluator{
tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0};
evaluator.EvaluateSplits(input_left, input_right, ObjInfo{ObjInfo::kRegression},
evaluator.GetEvaluator(), dh::ToSpan(out_splits));
DeviceSplitCandidate result_left = out_splits[0];
EXPECT_EQ(result_left.findex, 1);
EXPECT_EQ(result_left.fvalue, 11.0);
DeviceSplitCandidate result_right = out_splits[1];
EXPECT_EQ(result_right.findex, 0);
EXPECT_EQ(result_right.fvalue, 1.0);
}
TEST_F(TestPartitionBasedSplit, GpuHist) {
dh::device_vector<FeatureType> ft{std::vector<FeatureType>{FeatureType::kCategorical}};
GPUHistEvaluator<GradientPairPrecise> evaluator{param_,
static_cast<bst_feature_t>(info_.num_col_), 0};
cuts_.cut_ptrs_.SetDevice(0);
cuts_.cut_values_.SetDevice(0);
cuts_.min_vals_.SetDevice(0);
ObjInfo task{ObjInfo::kRegression};
evaluator.Reset(cuts_, dh::ToSpan(ft), task, info_.num_col_, param_, 0);
dh::device_vector<GradientPairPrecise> d_hist(hist_[0].size());
auto node_hist = hist_[0];
dh::safe_cuda(cudaMemcpy(d_hist.data().get(), node_hist.data(), node_hist.size_bytes(),
cudaMemcpyHostToDevice));
dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}};
EvaluateSplitInputs<GradientPairPrecise> input{0,
total_gpair_,
GPUTrainingParam{param_},
dh::ToSpan(feature_set),
dh::ToSpan(ft),
cuts_.cut_ptrs_.ConstDeviceSpan(),
cuts_.cut_values_.ConstDeviceSpan(),
cuts_.min_vals_.ConstDeviceSpan(),
dh::ToSpan(d_hist)};
auto split = evaluator.EvaluateSingleSplit(input, 0, ObjInfo{ObjInfo::kRegression}).split;
ASSERT_NEAR(split.loss_chg, best_score_, 1e-16);
}
} // namespace tree
} // namespace xgboost
|
31adf919e0dd3fd477d963c9cecfad90243d13be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void gpu_stencil37_hack1_cp_rows(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows begin\n");
printf("gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x*blockIdx.x;
int dst_area = n_rows*n_cols;
int s_area = gridDim.y*n_cols*2;
int base_global_idx = base_global_slice*dst_area + base_global_row * n_cols + base_global_col;
int nextRow = base_global_row+1;
bool legalNextRow = nextRow<n_rows;
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
int idx_dst =base_global_idx + tz*dst_area+ tx ;
int idx = (base_global_slice+tz)*s_area + blockIdx.y*n_cols*2+blockIdx.x*blockDim.x+ tx ;
if(legalCurCol && legalCurSlice){
shared_rows[idx] = dst[idx_dst];
}
if(legalCurCol && legalCurSlice && legalNextRow){
shared_rows[idx+n_cols] = dst[idx_dst+n_cols];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.y==0 && blockIdx.x==0 &&blockIdx.z==0 ){
if((threadIdx.x==0 || threadIdx.x==1 || threadIdx.x==2 ) && threadIdx.y==0){
int addr0 = base_global_idx+0*dst_area+threadIdx.x;
int addr = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x;
int addr1 = s_area*(base_global_slice+1)+n_cols+blockIdx.x*blockDim.x+ threadIdx.x;
int addr2 = s_area*(base_global_slice+2)+n_cols+blockIdx.x*blockDim.x+ threadIdx.x;
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr0,dst[addr0]);
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr,shared_rows[addr]);
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,addr1,shared_rows[addr1]);
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,addr2,shared_rows[addr2]);
}
if(threadIdx.x==0 && threadIdx.y==0){
int addr = 2*s_area+n_cols+256;
int addr1 = 2*dst_area+n_cols+256;
printf("shared_rows: addr:%d, val:%f\n", addr, shared_rows[addr]);
printf("dst : addr:%d, val:%f\n", addr1, dst[addr1]);
}
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows end!\n");
}
#endif
} | 31adf919e0dd3fd477d963c9cecfad90243d13be.cu | #include "includes.h"
__global__ void gpu_stencil37_hack1_cp_rows(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows begin\n");
printf("gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = tile_y * blockIdx.y;
int base_global_col = blockDim.x*blockIdx.x;
int dst_area = n_rows*n_cols;
int s_area = gridDim.y*n_cols*2;
int base_global_idx = base_global_slice*dst_area + base_global_row * n_cols + base_global_col;
int nextRow = base_global_row+1;
bool legalNextRow = nextRow<n_rows;
int tx = threadIdx.x;
bool legalCurCol = (base_global_col + tx)<n_cols;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
int idx_dst =base_global_idx + tz*dst_area+ tx ;
int idx = (base_global_slice+tz)*s_area + blockIdx.y*n_cols*2+blockIdx.x*blockDim.x+ tx ;
if(legalCurCol && legalCurSlice){
shared_rows[idx] = dst[idx_dst];
}
if(legalCurCol && legalCurSlice && legalNextRow){
shared_rows[idx+n_cols] = dst[idx_dst+n_cols];
}
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.y==0 && blockIdx.x==0 &&blockIdx.z==0 ){
if((threadIdx.x==0 || threadIdx.x==1 || threadIdx.x==2 ) && threadIdx.y==0){
int addr0 = base_global_idx+0*dst_area+threadIdx.x;
int addr = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x;
int addr1 = s_area*(base_global_slice+1)+n_cols+blockIdx.x*blockDim.x+ threadIdx.x;
int addr2 = s_area*(base_global_slice+2)+n_cols+blockIdx.x*blockDim.x+ threadIdx.x;
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr0,dst[addr0]);
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr,shared_rows[addr]);
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,addr1,shared_rows[addr1]);
printf("blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_rows: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,addr2,shared_rows[addr2]);
}
if(threadIdx.x==0 && threadIdx.y==0){
int addr = 2*s_area+n_cols+256;
int addr1 = 2*dst_area+n_cols+256;
printf("shared_rows: addr:%d, val:%f\n", addr, shared_rows[addr]);
printf("dst : addr:%d, val:%f\n", addr1, dst[addr1]);
}
}
#endif
#ifdef CUDA_DARTS_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.x==0)){
printf("copy rows end!\n");
}
#endif
} |
ad54fa0fdb99bb9df34da538563f66a05f141618.hip | // !!! This is a file automatically generated by hipify!!!
//Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
#include <cfloat>
#include "..\Activation\ActivationFunction.cu"
extern "C"
{
__global__ void FullyConnectedForwardKernel(
ActivationFunctionEnum activationFunction,
float *inputPtr,
float *outputPtr,
float *weightPtr,
float *neuronInputPtr,
float *biasPtr,
float *dropoutMaskPtr,
float dropout,
int prevLayerSize,
int thisLayerSize
)
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (dropoutMaskPtr[j])
{
neuronInputPtr[j] = 0;
outputPtr[j] = 0;
}
else
{
float sum = 0.0;
int index = j;
for (i = 0; i < prevLayerSize; i++) {
sum += weightPtr[index] * inputPtr[i];
index += thisLayerSize;
}
// add bias
sum += biasPtr[j];
// sum neuron input
neuronInputPtr[j] = sum;
// set output value
outputPtr[j] = Evaluate(activationFunction, sum) / (1.0f - dropout);
}
}
}
__global__ void FullyConnectedForwardBatchKernel(
ActivationFunctionEnum activationFunction,
float *outputPtr,
float *neuronInputPtr,
float *biasPtr,
float *dropoutMaskPtr,
float dropout,
int thisLayerSize,
int batchSize
)
{
int threadId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int neuronId = threadId % thisLayerSize;
if (threadId < thisLayerSize * batchSize)
{
// add bias to neuron input and apply dropout mask
neuronInputPtr[threadId] = !dropoutMaskPtr[neuronId] * (neuronInputPtr[threadId] + biasPtr[neuronId]);
// set output value
outputPtr[threadId] = !dropoutMaskPtr[neuronId] * (Evaluate(activationFunction, neuronInputPtr[threadId]) / (1.0f - dropout));
}
}
__global__ void OneToOneForwardKernel(
ActivationFunctionEnum activationFunction,
float *inputPtr,
float *outputPtr,
int layerSize
)
{
// i: neuron id
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < layerSize)
outputPtr[i] = Evaluate(activationFunction, inputPtr[i]);
}
__global__ void GaussianForwardSamplingKernel(
ActivationFunctionEnum activationFunction,
float* means,
float* sigmas,
float* noisyInput,
float* outputPtr,
float* randomNormalPtr,
int prevLayerSize,
int thisLayerSize
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
float mu = means[j], sigma = sigmas[j], x = randomNormalPtr[j];
// input means after applying noise
noisyInput[j] = mu + x * powf(sigma, 2);
// squashing function applied to noisy input
outputPtr[j] = Evaluate(activationFunction, noisyInput[j]);
}
}
__global__ void GaussianMinMaxField(float* input, int inputCount, float* mins, float* maxes)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
mins[i] = fminf(mins[i], input[i]);
maxes[i] = fmaxf(maxes[i], input[i]);
}
}
__global__ void GaussianResetPriorStats(int inputCount, float* mins, float* maxes)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
mins[i] = FLT_MAX;
maxes[i] = FLT_MIN;
}
}
__global__ void GaussianSamplePrior(float* input, int inputCount, float* mins, float* maxes, float* randomUniform)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
float diff = maxes[i] - mins[i];
input[i] = randomUniform[i] * diff + mins[i];
}
}
__global__ void NegativeCorrelationForwardResetKernel(
float* outputPtr,
int thisLayerSize
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] = 0;
}
}
__global__ void NegativeCorrelationForwardSumKernel(
float* inputPtr,
float* outputPtr,
int thisLayerSize
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] += inputPtr[j];
}
}
__global__ void NegativeCorrelationForwardDivideKernel(
float* outputPtr,
int thisLayerSize,
int inputModelCount
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] /= (float)inputModelCount;
}
}
}
| ad54fa0fdb99bb9df34da538563f66a05f141618.cu | //Includes for IntelliSense
#define _SIZE_T_DEFINED
#include <cuda.h>
#include <device_launch_parameters.h>
#include <texture_fetch_functions.h>
#include "float.h"
#include <builtin_types.h>
#include <vector_functions.h>
#include <math.h>
#include <cfloat>
#include "..\Activation\ActivationFunction.cu"
extern "C"
{
__global__ void FullyConnectedForwardKernel(
ActivationFunctionEnum activationFunction,
float *inputPtr,
float *outputPtr,
float *weightPtr,
float *neuronInputPtr,
float *biasPtr,
float *dropoutMaskPtr,
float dropout,
int prevLayerSize,
int thisLayerSize
)
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (dropoutMaskPtr[j])
{
neuronInputPtr[j] = 0;
outputPtr[j] = 0;
}
else
{
float sum = 0.0;
int index = j;
for (i = 0; i < prevLayerSize; i++) {
sum += weightPtr[index] * inputPtr[i];
index += thisLayerSize;
}
// add bias
sum += biasPtr[j];
// sum neuron input
neuronInputPtr[j] = sum;
// set output value
outputPtr[j] = Evaluate(activationFunction, sum) / (1.0f - dropout);
}
}
}
__global__ void FullyConnectedForwardBatchKernel(
ActivationFunctionEnum activationFunction,
float *outputPtr,
float *neuronInputPtr,
float *biasPtr,
float *dropoutMaskPtr,
float dropout,
int thisLayerSize,
int batchSize
)
{
int threadId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
int neuronId = threadId % thisLayerSize;
if (threadId < thisLayerSize * batchSize)
{
// add bias to neuron input and apply dropout mask
neuronInputPtr[threadId] = !dropoutMaskPtr[neuronId] * (neuronInputPtr[threadId] + biasPtr[neuronId]);
// set output value
outputPtr[threadId] = !dropoutMaskPtr[neuronId] * (Evaluate(activationFunction, neuronInputPtr[threadId]) / (1.0f - dropout));
}
}
__global__ void OneToOneForwardKernel(
ActivationFunctionEnum activationFunction,
float *inputPtr,
float *outputPtr,
int layerSize
)
{
// i: neuron id
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < layerSize)
outputPtr[i] = Evaluate(activationFunction, inputPtr[i]);
}
__global__ void GaussianForwardSamplingKernel(
ActivationFunctionEnum activationFunction,
float* means,
float* sigmas,
float* noisyInput,
float* outputPtr,
float* randomNormalPtr,
int prevLayerSize,
int thisLayerSize
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
float mu = means[j], sigma = sigmas[j], x = randomNormalPtr[j];
// input means after applying noise
noisyInput[j] = mu + x * powf(sigma, 2);
// squashing function applied to noisy input
outputPtr[j] = Evaluate(activationFunction, noisyInput[j]);
}
}
__global__ void GaussianMinMaxField(float* input, int inputCount, float* mins, float* maxes)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
mins[i] = fminf(mins[i], input[i]);
maxes[i] = fmaxf(maxes[i], input[i]);
}
}
__global__ void GaussianResetPriorStats(int inputCount, float* mins, float* maxes)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
mins[i] = FLT_MAX;
maxes[i] = FLT_MIN;
}
}
__global__ void GaussianSamplePrior(float* input, int inputCount, float* mins, float* maxes, float* randomUniform)
{
int i = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (i < inputCount)
{
float diff = maxes[i] - mins[i];
input[i] = randomUniform[i] * diff + mins[i];
}
}
__global__ void NegativeCorrelationForwardResetKernel(
float* outputPtr,
int thisLayerSize
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] = 0;
}
}
__global__ void NegativeCorrelationForwardSumKernel(
float* inputPtr,
float* outputPtr,
int thisLayerSize
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] += inputPtr[j];
}
}
__global__ void NegativeCorrelationForwardDivideKernel(
float* outputPtr,
int thisLayerSize,
int inputModelCount
)
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] /= (float)inputModelCount;
}
}
}
|
ec72b801675b587a974e3e9af32bc4383c017976.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/customized/kernels/nd_index_slice_kernels.h"
#include "oneflow/core/kernel/util/cuda_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename I>
__global__ void CudaGatherNd(NdIndexSliceArgs<T, I> args, const I* indices, const T* dense,
T* slices) {
DoGatherNd(args.num_slices * args.slice_size, args.slice_size, args.index_ndims, args.dense_shape,
indices, dense, slices);
}
template<typename T, typename I>
__global__ void CudaScatterNdAdd(NdIndexSliceArgs<T, I> args, const I* indices, const T* slices,
T* dense) {
DoScatterNdAdd<DeviceType::kGPU>(args.num_slices * args.slice_size, args.slice_size,
args.index_ndims, args.dense_shape, indices, slices, dense);
}
template<typename T, typename I>
__global__ void CudaZeroByNdIndex(NdIndexSliceArgs<T, I> args, const I* indices, T* dense) {
DoZeroByNdIndex(args.num_slices * args.slice_size, args.slice_size, args.index_ndims,
args.dense_shape, indices, dense);
}
} // namespace
template<typename T, typename I>
struct GatherNdFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* dense, T* slices) const {
RUN_CUDA_KERNEL((CudaGatherNd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
dense, slices);
}
};
template<typename T, typename I>
struct ScatterNdAddFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* slices, T* dense) const {
RUN_CUDA_KERNEL((CudaScatterNdAdd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
slices, dense);
}
};
template<typename T, typename I>
struct ZeroByNdIndexFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
T* dense) const {
RUN_CUDA_KERNEL((CudaZeroByNdIndex<T, I>), ctx, args.num_slices * args.slice_size, args,
indices, dense);
}
};
template<typename T>
struct DeviceAdd<DeviceType::kGPU, T> {
__device__ __forceinline__ static void Invoke(const T* x, T* y) { gpu_atomic_add(y, *x); }
};
#define GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ \
FLOATING_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_GATHER_ND_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_SCATTER_ND_ADD_FUNCTOR, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ZERO_BY_ND_INDEX_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_GATHER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_LIKE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_UPDATE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_ADD_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && TORCH_HIP_VERSION >= 10000
template<>
struct DeviceAdd<DeviceType::kGPU, float16> {
__device__ __forceinline__ static void Invoke(const float16* x, float16* y) {
gpu_atomic_add(reinterpret_cast<half*>(y), *(reinterpret_cast<const half*>(x)));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ND_INDEX_SLICE_FUNCTORS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ND_INDEX_SLICE_KERNELS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#endif
} // namespace oneflow
| ec72b801675b587a974e3e9af32bc4383c017976.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/customized/kernels/nd_index_slice_kernels.h"
#include "oneflow/core/kernel/util/cuda_kernel_util.h"
namespace oneflow {
namespace {
template<typename T, typename I>
__global__ void CudaGatherNd(NdIndexSliceArgs<T, I> args, const I* indices, const T* dense,
T* slices) {
DoGatherNd(args.num_slices * args.slice_size, args.slice_size, args.index_ndims, args.dense_shape,
indices, dense, slices);
}
template<typename T, typename I>
__global__ void CudaScatterNdAdd(NdIndexSliceArgs<T, I> args, const I* indices, const T* slices,
T* dense) {
DoScatterNdAdd<DeviceType::kGPU>(args.num_slices * args.slice_size, args.slice_size,
args.index_ndims, args.dense_shape, indices, slices, dense);
}
template<typename T, typename I>
__global__ void CudaZeroByNdIndex(NdIndexSliceArgs<T, I> args, const I* indices, T* dense) {
DoZeroByNdIndex(args.num_slices * args.slice_size, args.slice_size, args.index_ndims,
args.dense_shape, indices, dense);
}
} // namespace
template<typename T, typename I>
struct GatherNdFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* dense, T* slices) const {
RUN_CUDA_KERNEL((CudaGatherNd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
dense, slices);
}
};
template<typename T, typename I>
struct ScatterNdAddFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* slices, T* dense) const {
RUN_CUDA_KERNEL((CudaScatterNdAdd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
slices, dense);
}
};
template<typename T, typename I>
struct ZeroByNdIndexFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
T* dense) const {
RUN_CUDA_KERNEL((CudaZeroByNdIndex<T, I>), ctx, args.num_slices * args.slice_size, args,
indices, dense);
}
};
template<typename T>
struct DeviceAdd<DeviceType::kGPU, T> {
__device__ __forceinline__ static void Invoke(const T* x, T* y) { gpu_atomic_add(y, *x); }
};
#define GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ \
FLOATING_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_GATHER_ND_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_SCATTER_ND_ADD_FUNCTOR, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ZERO_BY_ND_INDEX_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_GATHER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_LIKE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_UPDATE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_ADD_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && CUDA_VERSION >= 10000
template<>
struct DeviceAdd<DeviceType::kGPU, float16> {
__device__ __forceinline__ static void Invoke(const float16* x, float16* y) {
gpu_atomic_add(reinterpret_cast<half*>(y), *(reinterpret_cast<const half*>(x)));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ND_INDEX_SLICE_FUNCTORS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ND_INDEX_SLICE_KERNELS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#endif
} // namespace oneflow
|
434830c94acc31811bc74733cde7e2eea89cf6c1.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
std::vector<hipStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(hipSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<hipStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(hipStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(hipStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(hipSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = 0;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx),
node_value_constraints[nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSingleSplit(dh::ToSpan(splits_out), inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
hipMemcpyDeviceToHost));
return result.front();
}
std::vector<DeviceSplitCandidate> EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx,
const RegTree& tree) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx),
node_value_constraints[left_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx),
node_value_constraints[right_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplits(dh::ToSpan(splits_out), left, right);
std::vector<DeviceSplitCandidate> result(2);
dh::safe_cuda(hipMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
hipMemcpyDeviceToHost));
return result;
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(hipMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
hipMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(hipSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(hipMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
hipMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
hipMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
hipMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(hipMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), hipMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
node_value_constraints.resize(tree.GetNodes().size());
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(
param, parent_sum);
auto left_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.left_sum) *
param.learning_rate;
auto right_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.right_sum) *
param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), candidate.split.left_sum,
candidate.split.right_sum,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::hip::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split, 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, reducer);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx,
*p_tree);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(hipGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(hipSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
| 434830c94acc31811bc74733cde7e2eea89cf6c1.cu | /*!
* Copyright 2017-2020 XGBoost contributors
*/
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <xgboost/tree_updater.h>
#include <algorithm>
#include <cmath>
#include <memory>
#include <limits>
#include <queue>
#include <utility>
#include <vector>
#include "xgboost/host_device_vector.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/json.h"
#include "../common/io.h"
#include "../common/device_helpers.cuh"
#include "../common/hist_util.h"
#include "../common/timer.h"
#include "../data/ellpack_page.cuh"
#include "param.h"
#include "updater_gpu_common.cuh"
#include "constraints.cuh"
#include "gpu_hist/gradient_based_sampler.cuh"
#include "gpu_hist/row_partitioner.cuh"
#include "gpu_hist/histogram.cuh"
#include "gpu_hist/evaluate_splits.cuh"
namespace xgboost {
namespace tree {
#if !defined(GTEST_TEST)
DMLC_REGISTRY_FILE_TAG(updater_gpu_hist);
#endif // !defined(GTEST_TEST)
// training parameters specific to this algorithm
struct GPUHistMakerTrainParam
: public XGBoostParameter<GPUHistMakerTrainParam> {
bool single_precision_histogram;
bool deterministic_histogram;
bool debug_synchronize;
// declare parameters
DMLC_DECLARE_PARAMETER(GPUHistMakerTrainParam) {
DMLC_DECLARE_FIELD(single_precision_histogram).set_default(false).describe(
"Use single precision to build histograms.");
DMLC_DECLARE_FIELD(deterministic_histogram).set_default(true).describe(
"Pre-round the gradient for obtaining deterministic gradient histogram.");
DMLC_DECLARE_FIELD(debug_synchronize).set_default(false).describe(
"Check if all distributed tree are identical after tree construction.");
}
};
#if !defined(GTEST_TEST)
DMLC_REGISTER_PARAMETER(GPUHistMakerTrainParam);
#endif // !defined(GTEST_TEST)
struct ExpandEntry {
int nid;
int depth;
DeviceSplitCandidate split;
uint64_t timestamp;
ExpandEntry() = default;
ExpandEntry(int nid, int depth, DeviceSplitCandidate split,
uint64_t timestamp)
: nid(nid), depth(depth), split(std::move(split)), timestamp(timestamp) {}
bool IsValid(const TrainParam& param, int num_leaves) const {
if (split.loss_chg <= kRtEps) return false;
if (split.left_sum.GetHess() == 0 || split.right_sum.GetHess() == 0) {
return false;
}
if (split.loss_chg < param.min_split_loss) { return false; }
if (param.max_depth > 0 && depth == param.max_depth) {return false; }
if (param.max_leaves > 0 && num_leaves == param.max_leaves) { return false; }
return true;
}
static bool ChildIsValid(const TrainParam& param, int depth, int num_leaves) {
if (param.max_depth > 0 && depth >= param.max_depth) return false;
if (param.max_leaves > 0 && num_leaves >= param.max_leaves) return false;
return true;
}
friend std::ostream& operator<<(std::ostream& os, const ExpandEntry& e) {
os << "ExpandEntry: \n";
os << "nidx: " << e.nid << "\n";
os << "depth: " << e.depth << "\n";
os << "loss: " << e.split.loss_chg << "\n";
os << "left_sum: " << e.split.left_sum << "\n";
os << "right_sum: " << e.split.right_sum << "\n";
return os;
}
};
inline static bool DepthWise(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.depth == rhs.depth) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.depth > rhs.depth; // favor small depth
}
}
inline static bool LossGuide(const ExpandEntry& lhs, const ExpandEntry& rhs) {
if (lhs.split.loss_chg == rhs.split.loss_chg) {
return lhs.timestamp > rhs.timestamp; // favor small timestamp
} else {
return lhs.split.loss_chg < rhs.split.loss_chg; // favor large loss_chg
}
}
/**
* \struct DeviceHistogram
*
* \summary Data storage for node histograms on device. Automatically expands.
*
* \tparam GradientSumT histogram entry type.
* \tparam kStopGrowingSize Do not grow beyond this size
*
* \author Rory
* \date 28/07/2018
*/
template <typename GradientSumT, size_t kStopGrowingSize = 1 << 26>
class DeviceHistogram {
private:
/*! \brief Map nidx to starting index of its histogram. */
std::map<int, size_t> nidx_map_;
dh::device_vector<typename GradientSumT::ValueT> data_;
int n_bins_;
int device_id_;
static constexpr size_t kNumItemsInGradientSum =
sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT);
static_assert(kNumItemsInGradientSum == 2,
"Number of items in gradient type should be 2.");
public:
void Init(int device_id, int n_bins) {
this->n_bins_ = n_bins;
this->device_id_ = device_id;
}
void Reset() {
auto d_data = data_.data().get();
dh::LaunchN(device_id_, data_.size(),
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
nidx_map_.clear();
}
bool HistogramExists(int nidx) const {
return nidx_map_.find(nidx) != nidx_map_.cend();
}
int Bins() const {
return n_bins_;
}
size_t HistogramSize() const {
return n_bins_ * kNumItemsInGradientSum;
}
dh::device_vector<typename GradientSumT::ValueT>& Data() {
return data_;
}
void AllocateHistogram(int nidx) {
if (HistogramExists(nidx)) return;
// Number of items currently used in data
const size_t used_size = nidx_map_.size() * HistogramSize();
const size_t new_used_size = used_size + HistogramSize();
if (data_.size() >= kStopGrowingSize) {
// Recycle histogram memory
if (new_used_size <= data_.size()) {
// no need to remove old node, just insert the new one.
nidx_map_[nidx] = used_size;
// memset histogram size in bytes
} else {
std::pair<int, size_t> old_entry = *nidx_map_.begin();
nidx_map_.erase(old_entry.first);
nidx_map_[nidx] = old_entry.second;
}
// Zero recycled memory
auto d_data = data_.data().get() + nidx_map_[nidx];
dh::LaunchN(device_id_, n_bins_ * 2,
[=] __device__(size_t idx) { d_data[idx] = 0.0f; });
} else {
// Append new node histogram
nidx_map_[nidx] = used_size;
// Check there is enough memory for another histogram node
if (data_.size() < new_used_size + HistogramSize()) {
size_t new_required_memory =
std::max(data_.size() * 2, HistogramSize());
data_.resize(new_required_memory);
}
}
CHECK_GE(data_.size(), nidx_map_.size() * HistogramSize());
}
/**
* \summary Return pointer to histogram memory for a given node.
* \param nidx Tree node index.
* \return hist pointer.
*/
common::Span<GradientSumT> GetNodeHistogram(int nidx) {
CHECK(this->HistogramExists(nidx));
auto ptr = data_.data().get() + nidx_map_[nidx];
return common::Span<GradientSumT>(
reinterpret_cast<GradientSumT*>(ptr), n_bins_);
}
};
struct CalcWeightTrainParam {
float min_child_weight;
float reg_alpha;
float reg_lambda;
float max_delta_step;
float learning_rate;
XGBOOST_DEVICE explicit CalcWeightTrainParam(const TrainParam& p)
: min_child_weight(p.min_child_weight),
reg_alpha(p.reg_alpha),
reg_lambda(p.reg_lambda),
max_delta_step(p.max_delta_step),
learning_rate(p.learning_rate) {}
};
// Manage memory for a single GPU
template <typename GradientSumT>
struct GPUHistMakerDevice {
int device_id;
EllpackPageImpl* page;
BatchParam batch_param;
std::unique_ptr<RowPartitioner> row_partitioner;
DeviceHistogram<GradientSumT> hist{};
common::Span<GradientPair> gpair;
dh::caching_device_vector<int> monotone_constraints;
dh::caching_device_vector<bst_float> prediction_cache;
/*! \brief Sum gradient for each node. */
std::vector<GradientPair> node_sum_gradients;
TrainParam param;
bool deterministic_histogram;
GradientSumT histogram_rounding;
std::vector<cudaStream_t> streams{};
common::Monitor monitor;
std::vector<ValueConstraint> node_value_constraints;
common::ColumnSampler column_sampler;
FeatureInteractionConstraintDevice interaction_constraints;
using ExpandQueue =
std::priority_queue<ExpandEntry, std::vector<ExpandEntry>,
std::function<bool(ExpandEntry, ExpandEntry)>>;
std::unique_ptr<ExpandQueue> qexpand;
std::unique_ptr<GradientBasedSampler> sampler;
GPUHistMakerDevice(int _device_id,
EllpackPageImpl* _page,
bst_uint _n_rows,
TrainParam _param,
uint32_t column_sampler_seed,
uint32_t n_features,
bool deterministic_histogram,
BatchParam _batch_param)
: device_id(_device_id),
page(_page),
param(std::move(_param)),
column_sampler(column_sampler_seed),
interaction_constraints(param, n_features),
deterministic_histogram{deterministic_histogram},
batch_param(_batch_param) {
sampler.reset(new GradientBasedSampler(
page, _n_rows, batch_param, param.subsample, param.sampling_method));
if (!param.monotone_constraints.empty()) {
// Copy assigning an empty vector causes an exception in MSVC debug builds
monotone_constraints = param.monotone_constraints;
}
node_sum_gradients.resize(param.MaxNodes());
// Init histogram
hist.Init(device_id, page->Cuts().TotalBins());
monitor.Init(std::string("GPUHistMakerDevice") + std::to_string(device_id));
}
~GPUHistMakerDevice() { // NOLINT
dh::safe_cuda(cudaSetDevice(device_id));
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
}
// Get vector of at least n initialised streams
std::vector<cudaStream_t>& GetStreams(int n) {
if (n > streams.size()) {
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamDestroy(stream));
}
streams.clear();
streams.resize(n);
for (auto& stream : streams) {
dh::safe_cuda(cudaStreamCreate(&stream));
}
}
return streams;
}
// Reset values for each update iteration
// Note that the column sampler must be passed by value because it is not
// thread safe
void Reset(HostDeviceVector<GradientPair>* dh_gpair, DMatrix* dmat, int64_t num_columns) {
if (param.grow_policy == TrainParam::kLossGuide) {
qexpand.reset(new ExpandQueue(LossGuide));
} else {
qexpand.reset(new ExpandQueue(DepthWise));
}
this->column_sampler.Init(num_columns, param.colsample_bynode,
param.colsample_bylevel, param.colsample_bytree);
dh::safe_cuda(cudaSetDevice(device_id));
this->interaction_constraints.Reset();
std::fill(node_sum_gradients.begin(), node_sum_gradients.end(),
GradientPair());
auto sample = sampler->Sample(dh_gpair->DeviceSpan(), dmat);
page = sample.page;
gpair = sample.gpair;
if (deterministic_histogram) {
histogram_rounding = CreateRoundingFactor<GradientSumT>(this->gpair);
} else {
histogram_rounding = GradientSumT{0.0, 0.0};
}
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, sample.sample_rows));
hist.Reset();
}
DeviceSplitCandidate EvaluateRootSplit(GradientPair root_sum) {
int nidx = 0;
dh::TemporaryArray<DeviceSplitCandidate> splits_out(1);
GPUTrainingParam gpu_param(param);
auto sampled_features = column_sampler.GetFeatureSet(0);
sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> feature_set =
interaction_constraints.Query(sampled_features->DeviceSpan(), nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> inputs{
nidx,
{root_sum.GetGrad(), root_sum.GetHess()},
gpu_param,
feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(nidx),
node_value_constraints[nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSingleSplit(dh::ToSpan(splits_out), inputs);
std::vector<DeviceSplitCandidate> result(1);
dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
cudaMemcpyDeviceToHost));
return result.front();
}
std::vector<DeviceSplitCandidate> EvaluateLeftRightSplits(
ExpandEntry candidate, int left_nidx, int right_nidx,
const RegTree& tree) {
dh::TemporaryArray<DeviceSplitCandidate> splits_out(2);
GPUTrainingParam gpu_param(param);
auto left_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(left_nidx));
left_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> left_feature_set =
interaction_constraints.Query(left_sampled_features->DeviceSpan(),
left_nidx);
auto right_sampled_features =
column_sampler.GetFeatureSet(tree.GetDepth(right_nidx));
right_sampled_features->SetDevice(device_id);
common::Span<bst_feature_t> right_feature_set =
interaction_constraints.Query(right_sampled_features->DeviceSpan(),
left_nidx);
auto matrix = page->GetDeviceAccessor(device_id);
EvaluateSplitInputs<GradientSumT> left{left_nidx,
{candidate.split.left_sum.GetGrad(),
candidate.split.left_sum.GetHess()},
gpu_param,
left_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(left_nidx),
node_value_constraints[left_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplitInputs<GradientSumT> right{
right_nidx,
{candidate.split.right_sum.GetGrad(),
candidate.split.right_sum.GetHess()},
gpu_param,
right_feature_set,
matrix.feature_segments,
matrix.gidx_fvalue_map,
matrix.min_fvalue,
hist.GetNodeHistogram(right_nidx),
node_value_constraints[right_nidx],
dh::ToSpan(monotone_constraints)};
EvaluateSplits(dh::ToSpan(splits_out), left, right);
std::vector<DeviceSplitCandidate> result(2);
dh::safe_cuda(cudaMemcpy(result.data(), splits_out.data().get(),
sizeof(DeviceSplitCandidate) * splits_out.size(),
cudaMemcpyDeviceToHost));
return result;
}
void BuildHist(int nidx) {
hist.AllocateHistogram(nidx);
auto d_node_hist = hist.GetNodeHistogram(nidx);
auto d_ridx = row_partitioner->GetRows(nidx);
BuildGradientHistogram(page->GetDeviceAccessor(device_id), gpair, d_ridx, d_node_hist,
histogram_rounding);
}
void SubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
auto d_node_hist_parent = hist.GetNodeHistogram(nidx_parent);
auto d_node_hist_histogram = hist.GetNodeHistogram(nidx_histogram);
auto d_node_hist_subtraction = hist.GetNodeHistogram(nidx_subtraction);
dh::LaunchN(device_id, page->Cuts().TotalBins(), [=] __device__(size_t idx) {
d_node_hist_subtraction[idx] =
d_node_hist_parent[idx] - d_node_hist_histogram[idx];
});
}
bool CanDoSubtractionTrick(int nidx_parent, int nidx_histogram,
int nidx_subtraction) {
// Make sure histograms are already allocated
hist.AllocateHistogram(nidx_subtraction);
return hist.HistogramExists(nidx_histogram) &&
hist.HistogramExists(nidx_parent);
}
void UpdatePosition(int nidx, RegTree::Node split_node) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->UpdatePosition(
nidx, split_node.LeftChild(), split_node.RightChild(),
[=] __device__(bst_uint ridx) {
// given a row index, returns the node id it belongs to
bst_float cut_value =
d_matrix.GetFvalue(ridx, split_node.SplitIndex());
// Missing value
int new_position = 0;
if (isnan(cut_value)) {
new_position = split_node.DefaultChild();
} else {
if (cut_value <= split_node.SplitCond()) {
new_position = split_node.LeftChild();
} else {
new_position = split_node.RightChild();
}
}
return new_position;
});
}
// After tree update is finished, update the position of all training
// instances to their final leaf. This information is used later to update the
// prediction cache
void FinalisePosition(RegTree const* p_tree, DMatrix* p_fmat) {
dh::TemporaryArray<RegTree::Node> d_nodes(p_tree->GetNodes().size());
dh::safe_cuda(cudaMemcpy(d_nodes.data().get(), p_tree->GetNodes().data(),
d_nodes.size() * sizeof(RegTree::Node),
cudaMemcpyHostToDevice));
if (row_partitioner->GetRows().size() != p_fmat->Info().num_row_) {
row_partitioner.reset(); // Release the device memory first before reallocating
row_partitioner.reset(new RowPartitioner(device_id, p_fmat->Info().num_row_));
}
if (page->n_rows == p_fmat->Info().num_row_) {
FinalisePositionInPage(page, dh::ToSpan(d_nodes));
} else {
for (auto& batch : p_fmat->GetBatches<EllpackPage>(batch_param)) {
FinalisePositionInPage(batch.Impl(), dh::ToSpan(d_nodes));
}
}
}
void FinalisePositionInPage(EllpackPageImpl* page, const common::Span<RegTree::Node> d_nodes) {
auto d_matrix = page->GetDeviceAccessor(device_id);
row_partitioner->FinalisePosition(
[=] __device__(size_t row_id, int position) {
if (!d_matrix.IsInRange(row_id)) {
return RowPartitioner::kIgnoredTreePosition;
}
auto node = d_nodes[position];
while (!node.IsLeaf()) {
bst_float element = d_matrix.GetFvalue(row_id, node.SplitIndex());
// Missing value
if (isnan(element)) {
position = node.DefaultChild();
} else {
if (element <= node.SplitCond()) {
position = node.LeftChild();
} else {
position = node.RightChild();
}
}
node = d_nodes[position];
}
return position;
});
}
void UpdatePredictionCache(bst_float* out_preds_d) {
dh::safe_cuda(cudaSetDevice(device_id));
auto d_ridx = row_partitioner->GetRows();
if (prediction_cache.size() != d_ridx.size()) {
prediction_cache.resize(d_ridx.size());
dh::safe_cuda(cudaMemcpyAsync(prediction_cache.data().get(), out_preds_d,
prediction_cache.size() * sizeof(bst_float),
cudaMemcpyDefault));
}
CalcWeightTrainParam param_d(param);
dh::TemporaryArray<GradientPair> device_node_sum_gradients(node_sum_gradients.size());
dh::safe_cuda(
cudaMemcpyAsync(device_node_sum_gradients.data().get(), node_sum_gradients.data(),
sizeof(GradientPair) * node_sum_gradients.size(),
cudaMemcpyHostToDevice));
auto d_position = row_partitioner->GetPosition();
auto d_node_sum_gradients = device_node_sum_gradients.data().get();
auto d_prediction_cache = prediction_cache.data().get();
dh::LaunchN(
device_id, prediction_cache.size(), [=] __device__(int local_idx) {
int pos = d_position[local_idx];
bst_float weight = CalcWeight(param_d, d_node_sum_gradients[pos]);
d_prediction_cache[d_ridx[local_idx]] +=
weight * param_d.learning_rate;
});
dh::safe_cuda(cudaMemcpy(
out_preds_d, prediction_cache.data().get(),
prediction_cache.size() * sizeof(bst_float), cudaMemcpyDefault));
row_partitioner.reset();
}
void AllReduceHist(int nidx, dh::AllReducer* reducer) {
monitor.StartCuda("AllReduce");
auto d_node_hist = hist.GetNodeHistogram(nidx).data();
reducer->AllReduceSum(
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
reinterpret_cast<typename GradientSumT::ValueT*>(d_node_hist),
page->Cuts().TotalBins() * (sizeof(GradientSumT) / sizeof(typename GradientSumT::ValueT)));
reducer->Synchronize();
monitor.StopCuda("AllReduce");
}
/**
* \brief Build GPU local histograms for the left and right child of some parent node
*/
void BuildHistLeftRight(const ExpandEntry &candidate, int nidx_left,
int nidx_right, dh::AllReducer* reducer) {
auto build_hist_nidx = nidx_left;
auto subtraction_trick_nidx = nidx_right;
// Decide whether to build the left histogram or right histogram
// Use sum of Hessian as a heuristic to select node with fewest training instances
bool fewer_right = candidate.split.right_sum.GetHess() < candidate.split.left_sum.GetHess();
if (fewer_right) {
std::swap(build_hist_nidx, subtraction_trick_nidx);
}
this->BuildHist(build_hist_nidx);
this->AllReduceHist(build_hist_nidx, reducer);
// Check whether we can use the subtraction trick to calculate the other
bool do_subtraction_trick = this->CanDoSubtractionTrick(
candidate.nid, build_hist_nidx, subtraction_trick_nidx);
if (do_subtraction_trick) {
// Calculate other histogram using subtraction trick
this->SubtractionTrick(candidate.nid, build_hist_nidx,
subtraction_trick_nidx);
} else {
// Calculate other histogram manually
this->BuildHist(subtraction_trick_nidx);
this->AllReduceHist(subtraction_trick_nidx, reducer);
}
}
void ApplySplit(const ExpandEntry& candidate, RegTree* p_tree) {
RegTree& tree = *p_tree;
node_value_constraints.resize(tree.GetNodes().size());
auto parent_sum = candidate.split.left_sum + candidate.split.right_sum;
auto base_weight = node_value_constraints[candidate.nid].CalcWeight(
param, parent_sum);
auto left_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.left_sum) *
param.learning_rate;
auto right_weight = node_value_constraints[candidate.nid].CalcWeight(
param, candidate.split.right_sum) *
param.learning_rate;
tree.ExpandNode(candidate.nid, candidate.split.findex,
candidate.split.fvalue, candidate.split.dir == kLeftDir,
base_weight, left_weight, right_weight,
candidate.split.loss_chg, parent_sum.GetHess(),
candidate.split.left_sum.GetHess(), candidate.split.right_sum.GetHess());
// Set up child constraints
node_value_constraints.resize(tree.GetNodes().size());
node_value_constraints[candidate.nid].SetChild(
param, tree[candidate.nid].SplitIndex(), candidate.split.left_sum,
candidate.split.right_sum,
&node_value_constraints[tree[candidate.nid].LeftChild()],
&node_value_constraints[tree[candidate.nid].RightChild()]);
node_sum_gradients[tree[candidate.nid].LeftChild()] =
candidate.split.left_sum;
node_sum_gradients[tree[candidate.nid].RightChild()] =
candidate.split.right_sum;
interaction_constraints.Split(
candidate.nid, tree[candidate.nid].SplitIndex(),
tree[candidate.nid].LeftChild(),
tree[candidate.nid].RightChild());
}
void InitRoot(RegTree* p_tree, dh::AllReducer* reducer) {
constexpr bst_node_t kRootNIdx = 0;
dh::XGBCachingDeviceAllocator<char> alloc;
GradientPair root_sum = thrust::reduce(
thrust::cuda::par(alloc),
thrust::device_ptr<GradientPair const>(gpair.data()),
thrust::device_ptr<GradientPair const>(gpair.data() + gpair.size()));
rabit::Allreduce<rabit::op::Sum, float>(reinterpret_cast<float*>(&root_sum),
2);
this->BuildHist(kRootNIdx);
this->AllReduceHist(kRootNIdx, reducer);
// Remember root stats
node_sum_gradients[kRootNIdx] = root_sum;
p_tree->Stat(kRootNIdx).sum_hess = root_sum.GetHess();
auto weight = CalcWeight(param, root_sum);
p_tree->Stat(kRootNIdx).base_weight = weight;
(*p_tree)[kRootNIdx].SetLeaf(param.learning_rate * weight);
// Initialise root constraint
node_value_constraints.resize(p_tree->GetNodes().size());
// Generate first split
auto split = this->EvaluateRootSplit(root_sum);
qexpand->push(
ExpandEntry(kRootNIdx, p_tree->GetDepth(kRootNIdx), split, 0));
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair_all, DMatrix* p_fmat,
RegTree* p_tree, dh::AllReducer* reducer) {
auto& tree = *p_tree;
monitor.StartCuda("Reset");
this->Reset(gpair_all, p_fmat, p_fmat->Info().num_col_);
monitor.StopCuda("Reset");
monitor.StartCuda("InitRoot");
this->InitRoot(p_tree, reducer);
monitor.StopCuda("InitRoot");
auto timestamp = qexpand->size();
auto num_leaves = 1;
while (!qexpand->empty()) {
ExpandEntry candidate = qexpand->top();
qexpand->pop();
if (!candidate.IsValid(param, num_leaves)) {
continue;
}
this->ApplySplit(candidate, p_tree);
num_leaves++;
int left_child_nidx = tree[candidate.nid].LeftChild();
int right_child_nidx = tree[candidate.nid].RightChild();
// Only create child entries if needed
if (ExpandEntry::ChildIsValid(param, tree.GetDepth(left_child_nidx),
num_leaves)) {
monitor.StartCuda("UpdatePosition");
this->UpdatePosition(candidate.nid, (*p_tree)[candidate.nid]);
monitor.StopCuda("UpdatePosition");
monitor.StartCuda("BuildHist");
this->BuildHistLeftRight(candidate, left_child_nidx, right_child_nidx, reducer);
monitor.StopCuda("BuildHist");
monitor.StartCuda("EvaluateSplits");
auto splits = this->EvaluateLeftRightSplits(candidate, left_child_nidx,
right_child_nidx,
*p_tree);
monitor.StopCuda("EvaluateSplits");
qexpand->push(ExpandEntry(left_child_nidx,
tree.GetDepth(left_child_nidx), splits.at(0),
timestamp++));
qexpand->push(ExpandEntry(right_child_nidx,
tree.GetDepth(right_child_nidx),
splits.at(1), timestamp++));
}
}
monitor.StartCuda("FinalisePosition");
this->FinalisePosition(p_tree, p_fmat);
monitor.StopCuda("FinalisePosition");
}
};
template <typename GradientSumT>
class GPUHistMakerSpecialised {
public:
GPUHistMakerSpecialised() = default;
void Configure(const Args& args, GenericParameter const* generic_param) {
param_.UpdateAllowUnknown(args);
generic_param_ = generic_param;
hist_maker_param_.UpdateAllowUnknown(args);
dh::CheckComputeCapability();
monitor_.Init("updater_gpu_hist");
}
~GPUHistMakerSpecialised() { // NOLINT
dh::GlobalMemoryLogger().Log();
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) {
monitor_.StartCuda("Update");
// rescale learning rate according to size of trees
float lr = param_.learning_rate;
param_.learning_rate = lr / trees.size();
ValueConstraint::Init(¶m_, dmat->Info().num_col_);
// build tree
try {
for (xgboost::RegTree* tree : trees) {
this->UpdateTree(gpair, dmat, tree);
if (hist_maker_param_.debug_synchronize) {
this->CheckTreesSynchronized(tree);
}
}
dh::safe_cuda(cudaGetLastError());
} catch (const std::exception& e) {
LOG(FATAL) << "Exception in gpu_hist: " << e.what() << std::endl;
}
param_.learning_rate = lr;
monitor_.StopCuda("Update");
}
void InitDataOnce(DMatrix* dmat) {
device_ = generic_param_->gpu_id;
CHECK_GE(device_, 0) << "Must have at least one device";
info_ = &dmat->Info();
reducer_.Init({device_}); // NOLINT
// Synchronise the column sampling seed
uint32_t column_sampling_seed = common::GlobalRandom()();
rabit::Broadcast(&column_sampling_seed, sizeof(column_sampling_seed), 0);
BatchParam batch_param{
device_,
param_.max_bin,
generic_param_->gpu_page_size
};
auto page = (*dmat->GetBatches<EllpackPage>(batch_param).begin()).Impl();
dh::safe_cuda(cudaSetDevice(device_));
maker.reset(new GPUHistMakerDevice<GradientSumT>(device_,
page,
info_->num_row_,
param_,
column_sampling_seed,
info_->num_col_,
hist_maker_param_.deterministic_histogram,
batch_param));
p_last_fmat_ = dmat;
initialised_ = true;
}
void InitData(DMatrix* dmat) {
if (!initialised_) {
monitor_.StartCuda("InitDataOnce");
this->InitDataOnce(dmat);
monitor_.StopCuda("InitDataOnce");
}
}
// Only call this method for testing
void CheckTreesSynchronized(RegTree* local_tree) const {
std::string s_model;
common::MemoryBufferStream fs(&s_model);
int rank = rabit::GetRank();
if (rank == 0) {
local_tree->Save(&fs);
}
fs.Seek(0);
rabit::Broadcast(&s_model, 0);
RegTree reference_tree {}; // rank 0 tree
reference_tree.Load(&fs);
CHECK(*local_tree == reference_tree);
}
void UpdateTree(HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat,
RegTree* p_tree) {
monitor_.StartCuda("InitData");
this->InitData(p_fmat);
monitor_.StopCuda("InitData");
gpair->SetDevice(device_);
maker->UpdateTree(gpair, p_fmat, p_tree, &reducer_);
}
bool UpdatePredictionCache(const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) {
if (maker == nullptr || p_last_fmat_ == nullptr || p_last_fmat_ != data) {
return false;
}
monitor_.StartCuda("UpdatePredictionCache");
p_out_preds->SetDevice(device_);
maker->UpdatePredictionCache(p_out_preds->DevicePointer());
monitor_.StopCuda("UpdatePredictionCache");
return true;
}
TrainParam param_; // NOLINT
MetaInfo* info_{}; // NOLINT
std::unique_ptr<GPUHistMakerDevice<GradientSumT>> maker; // NOLINT
private:
bool initialised_ { false };
GPUHistMakerTrainParam hist_maker_param_;
GenericParameter const* generic_param_;
dh::AllReducer reducer_;
DMatrix* p_last_fmat_ { nullptr };
int device_{-1};
common::Monitor monitor_;
};
class GPUHistMaker : public TreeUpdater {
public:
void Configure(const Args& args) override {
// Used in test to count how many configurations are performed
LOG(DEBUG) << "[GPU Hist]: Configure";
hist_maker_param_.UpdateAllowUnknown(args);
// The passed in args can be empty, if we simply purge the old maker without
// preserving parameters then we can't do Update on it.
TrainParam param;
if (float_maker_) {
param = float_maker_->param_;
} else if (double_maker_) {
param = double_maker_->param_;
}
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
float_maker_->param_ = param;
float_maker_->Configure(args, tparam_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
double_maker_->param_ = param;
double_maker_->Configure(args, tparam_);
}
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
FromJson(config.at("gpu_hist_train_param"), &this->hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
float_maker_.reset(new GPUHistMakerSpecialised<GradientPair>());
FromJson(config.at("train_param"), &float_maker_->param_);
} else {
double_maker_.reset(new GPUHistMakerSpecialised<GradientPairPrecise>());
FromJson(config.at("train_param"), &double_maker_->param_);
}
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["gpu_hist_train_param"] = ToJson(hist_maker_param_);
if (hist_maker_param_.single_precision_histogram) {
out["train_param"] = ToJson(float_maker_->param_);
} else {
out["train_param"] = ToJson(double_maker_->param_);
}
}
void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat,
const std::vector<RegTree*>& trees) override {
if (hist_maker_param_.single_precision_histogram) {
float_maker_->Update(gpair, dmat, trees);
} else {
double_maker_->Update(gpair, dmat, trees);
}
}
bool UpdatePredictionCache(
const DMatrix* data, HostDeviceVector<bst_float>* p_out_preds) override {
if (hist_maker_param_.single_precision_histogram) {
return float_maker_->UpdatePredictionCache(data, p_out_preds);
} else {
return double_maker_->UpdatePredictionCache(data, p_out_preds);
}
}
char const* Name() const override {
return "grow_gpu_hist";
}
private:
GPUHistMakerTrainParam hist_maker_param_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPair>> float_maker_;
std::unique_ptr<GPUHistMakerSpecialised<GradientPairPrecise>> double_maker_;
};
#if !defined(GTEST_TEST)
XGBOOST_REGISTER_TREE_UPDATER(GPUHistMaker, "grow_gpu_hist")
.describe("Grow tree with GPU.")
.set_body([]() { return new GPUHistMaker(); });
#endif // !defined(GTEST_TEST)
} // namespace tree
} // namespace xgboost
|
aa2f05306d14baf63efe6d7c704c530a27d3d9ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/convtranspose.h"
#include "cudakernel/math/math.h"
#include "cudakernel/gemm/gemm.h"
#include "cudakernel/memory/transpose.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/params/onnx/transpose_param.h"
#include "ppl/nn/params/onnx/gemm_param.h"
#include <hip/hip_fp16.h>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void ppl_col2im_gpu_kernel(
const int n, const T* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int hole_h, const int hole_w,
const int height_col, const int width_col,
const float beta, T* data_im) {
CUDA_KERNEL_LOOP(index, n) {
T val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extern_w = (kernel_w-1)*hole_w+1;
int kernel_extern_h = (kernel_h-1)*hole_h+1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extern_w) ? 0 : (w_im - kernel_extern_w) / stride_w + 1;
const int w_col_end =
min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extern_h) ? 0 : (h_im - kernel_extern_h) / stride_h + 1;
const int h_col_end =
min(h_im / stride_h + 1, height_col);
// equivalent implementation
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % hole_h == 0 && w_k % hole_w == 0) {
h_k /= hole_h;
w_k /= hole_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + h_col) * width_col + w_col;
val = Math<T,T,T>::add(val, data_col[data_col_index]);
}
}
}
data_im[index] = Math<T,T,T>::add(val, (beta == 0 ? (T)0 : data_im[index]));
}
}
template <typename T>
void ppl_col2im_gpu(
hipStream_t stream, const T* data_col,
int channels, int height, int width,
int kernel_h, int kernel_w, int pad_h,
int pad_w, int stride_h, int stride_w,
int hole_h, int hole_w, int height_col,
int width_col, const float beta, T* data_im) {
int num_kernels = channels * height * width;
hipLaunchKernelGGL(( ppl_col2im_gpu_kernel<T>), dim3((num_kernels + 1024 - 1) / 1024), dim3(1024), 0, stream,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, hole_h, hole_w,
height_col, width_col, beta, data_im);
}
template <typename T>
__global__ void ppl_cukernel_matrix_padding(
const T* input, int inHeight,
int inWidth, T* output,
int out_height, int out_width) {
int w_idx = blockIdx.x * blockDim.x + threadIdx.x;
int h_idx = blockIdx.y * blockDim.y + threadIdx.y;
if (w_idx >= inWidth || h_idx >= inHeight) return;
uint64_t in_index = h_idx * inWidth + w_idx;
uint64_t out_index = h_idx * out_width + w_idx;
output[out_index] = input[in_index];
}
template <typename T>
void cuda_matrix_padding(
hipStream_t stream, const T* input,
int inHeight, int inWidth, T* output,
int out_height, int out_width) {
hipMemset(output, 0, out_height * out_width * sizeof(T));
dim3 blockSize(16, 16, 1);
dim3 gridSize((inWidth + 15) / 16, (inHeight + 15) / 16, 1);
hipLaunchKernelGGL(( ppl_cukernel_matrix_padding<T>), dim3(gridSize), dim3(blockSize), 0, stream,
input, inHeight, inWidth, output, out_height, out_width);
}
template<typename T>
void __global__ addVectorToMatrixColumnKernel(
int numRows, int numCols, int stride,
float alpha, const T* bias, float beta, T* out) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numRows * numCols) return;
int row_idx = index / numCols;
int col_idx = index % numCols;
uint64_t out_index = row_idx * stride + col_idx;
uint64_t in_index = index;
out[out_index] = Math<T,T,T>::add(
Math<T,T,T>::mul((T)alpha, bias[row_idx]),
((beta == 0) ? (T)0 : Math<T,T,T>::mul((T)beta, out[in_index])));
}
template<typename T>
void addVectorToMatrixColumn(
hipStream_t stream, int M,
int N, int stride, float alpha,
const T* biasData, float beta, T* outData){
const uint64_t count = M * N;
const int blockSize = 128;
const int gridSize = (count + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( addVectorToMatrixColumnKernel<T>), dim3(gridSize), dim3(blockSize), 0, stream, M, N,
stride, alpha, biasData, beta, outData);
}
uint64_t pplConvTransposeGetFilterBufSizeCudaFp32(
int num_filters, int num_channels, int filter_height, int filter_width) {
int M = num_filters;
M *= filter_height;
M *= filter_width;
size_t K = num_channels;
size_t padM = Align(M, 1);
size_t padK = Align(K, 8);
size_t dst = Align(padM * padK * sizeof(__half), 128);
return dst * 2;// transpose buf
}
template<typename T>
void pplConvTransposeConvertFilter(
hipStream_t stream, const T* filter,
int num_filters, int num_channels, int filter_height,
int filter_width, T* cvt_filter) {
int M = num_filters;
M *= filter_height;
M *= filter_width;
size_t K = num_channels;
size_t padM = Align(M, 1);
size_t padK = Align(K, 8);
//no need to transpose, just
cuda_matrix_padding<T>(stream, filter, K, M, cvt_filter, padK, padM);
}
uint64_t pplConvTransposeGetTempBufSizeCudaFp32(
int group, int in_c, int in_h, int in_w,
int out_c, int kernel_h, int kernel_w,
int pad_h, int pad_w, int stride_h,
int stride_w, int hole_h, int hole_w) {
size_t M = out_c * kernel_h * kernel_w;
size_t N = in_w * in_h;
size_t K = in_c;
size_t padN = Align(N, 8);
size_t padK = Align(K, 8);
//for trans buf
return 2 * Align(padN*padK*sizeof(__half), 128) +
Align(M*N*sizeof(__half), 128) + Align(M*padN*sizeof(__half), 128);
}
uint64_t PPLConvTransposeGetBufSizeCuda(
ppl::nn::TensorShape* input_shape,
ppl::nn::TensorShape* output_shape,
const ppl::nn::common::ConvTransposeParam* param)
{
int batch = input_shape->GetDim(0);
int in_c = input_shape->GetDim(1);
int in_h = input_shape->GetDim(2);
int in_w = input_shape->GetDim(3);
int out_c = output_shape->GetDim(1);
int out_h = output_shape->GetDim(2);
int out_w = output_shape->GetDim(3);
int group = param->group;
int kernel_h = param->kernel_shape[0];
int kernel_w = param->kernel_shape[1];
int pad_h = param->pads[0];
int pad_w = param->pads[1];
int stride_h = param->strides[0];
int stride_w = param->strides[1];
int hole_h = param->dilations[0];
int hole_w = param->dilations[1];
uint64_t size = 0;
if (input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
size += pplConvTransposeGetFilterBufSizeCudaFp32(out_c, in_c, kernel_h, kernel_w) +
pplConvTransposeGetTempBufSizeCudaFp32(group, in_c, in_h, in_w, out_c, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, hole_h, hole_w);
} else {
return 0;
}
//NT gemm
int transA = 0;
int M = out_c * kernel_h * kernel_w;
int K = in_c;
int padM = Align(M, 1);
int padK = Align(K, 8 );
ppl::nn::TensorShape a_shape;
a_shape.Reshape({padM, padK});
a_shape.SetDataType(input_shape->GetDataType());
size += PPLGemmCUDAGetBufSize(&a_shape, transA);
return size;
}
template<typename T>
__global__ void remove_padding(
T *pad_data,
T *data,
const int M,
const int padN,
const int N)
{
int64_t off = blockIdx.x*256 + threadIdx.x;
int m_id = off / N;
int n_id = off % N;
int64_t in_off = (int64_t)m_id*padN + n_id;
if (off < M*N) data[off] = pad_data[in_off];
}
template<typename T>
void RemovePadding(
const hipStream_t &stream,
T *pad_data,
T *data,
const int M,
const int padN,
const int N)
{
if(padN == N) return;
int block_size = 256;
int grid = (M*N+255) / 256;
hipLaunchKernelGGL(( remove_padding<T>), dim3(grid), dim3(block_size), 0, stream, pad_data, data, M, padN, N);
}
ppl::common::RetCode PPLCUDAConvTransposeForward(
hipStream_t stream,
ppl::nn::TensorShape* input_shape,
const void* input,
const void* filter,
const void* bias,
const ppl::nn::common::ConvTransposeParam* param,
void* temp_buffer,
ppl::nn::TensorShape* output_shape,
void* output)
{
int batch = input_shape->GetDim(0);
int in_c = input_shape->GetDim(1);
int in_h = input_shape->GetDim(2);
int in_w = input_shape->GetDim(3);
int out_c = output_shape->GetDim(1);
int out_h = output_shape->GetDim(2);
int out_w = output_shape->GetDim(3);
int kernel_h = param->kernel_shape[0];
int kernel_w = param->kernel_shape[1];
int pad_h = param->pads[0];
int pad_w = param->pads[1];
int stride_h = param->strides[0];
int stride_w = param->strides[1];
int hole_h = param->dilations[0];
int hole_w = param->dilations[1];
if(input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
int num_channels = in_c;
int num_filters = out_c;
int height = in_h;
int width = in_w;
int out_height = out_h;
int out_width = out_w;
int M = out_c * kernel_h * kernel_w;
int N = in_w * in_h;
int K = in_c;
int padM = Align(M, 1);
int padN = Align(N, 8);
int padK = Align(K, 8);
__half* pad_in_data = (__half*)temp_buffer;
__half* pad_out_data = pad_in_data + Align(padN*padK, 128/sizeof(__half));
__half* out_data = pad_out_data + Align(M*padN, 128/sizeof(__half));
// cvt filter
__half* cvt_filter = out_data + Align(M*N, 128/sizeof(__half));
pplConvTransposeConvertFilter<__half>(stream, (__half*)filter, num_filters, num_channels, kernel_h, kernel_w, cvt_filter);
ppl::nn::common::TransposeParam trans_param;
trans_param.perm.push_back(1);
trans_param.perm.push_back(0);
__half *trans_filter = cvt_filter + Align(padK * padM, 128 / sizeof(__half));
__half *trans_in_data = trans_filter + Align(padM * padK, 128 / sizeof(__half));
ppl::nn::TensorShape a_shape, b_shape, c_shape, out_a_shape, out_b_shape;
a_shape.SetDataType(input_shape->GetDataType());
b_shape.SetDataType(input_shape->GetDataType());
c_shape.SetDataType(output_shape->GetDataType());
out_a_shape.SetDataType(input_shape->GetDataType());
out_b_shape.SetDataType(input_shape->GetDataType());
a_shape.Reshape({padK, M});
b_shape.Reshape({padK, padN});
c_shape.Reshape({M, padN});
out_a_shape.Reshape({padM, padK});
out_b_shape.Reshape({padN, padK});
ppl::common::RetCode status = PPLCUDATransposeForwardImp(stream,
trans_param, &a_shape, cvt_filter,
&out_a_shape, trans_filter);
ppl::nn::common::GemmParam gemm_param;
fuse_param_t fuse_param;
gemm_param.bias_term = 0;
gemm_param.transA = 0; gemm_param.transB = 1;
gemm_param.alpha = 1.f; gemm_param.beta = 1.f;
gemm_param.N = padN;
for(int n = 0; n < batch; ++n) {
int offset_in = n * num_channels * height * width;
int offset_out = n * num_filters * out_height * out_width;
cuda_matrix_padding<__half>(stream, ((__half*)input) + offset_in, K, N,
pad_in_data, padK, padN);
PPLCUDATransposeForwardImp(stream,
trans_param, &b_shape, pad_in_data,
&out_b_shape, trans_in_data);
//NT
ppl::nn::TensorShape a_shape, b_shape, c_shape;
//input transpose KxN -> NxK weight transpose KxM -> MxK
int kernel_id = 0;
PPLCUDAGemmForwardImp(stream,
&out_a_shape, trans_filter,
&out_b_shape, trans_in_data,
NULL,
&c_shape, pad_out_data,
gemm_param,
NULL,
fuse_param,
kernel_id);
RemovePadding<__half>(stream, pad_out_data, out_data, M, padN, N);
ppl_col2im_gpu<__half>(stream, (const __half*)pad_out_data, num_filters,
out_height, out_width, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, hole_h, hole_w, height, width, 0.f, ((__half*)output) + offset_out);
if (NULL != bias) {
addVectorToMatrixColumn<__half>(stream, num_filters, out_height * out_width,
out_height * out_width, 1.f, (__half*)bias, 1.f, ((__half*)output) + offset_out);
}
}
return ppl::common::RC_SUCCESS;
} else {
return ppl::common::RC_UNSUPPORTED;
}
}
//<<<<<<< Updated upstream
//=======
//
//
//
//ppl::common::RetCode PPLCUDAConvTransposeModifyWeight(
// const hipStream_t &stream,
// void *cvt_filter,
// void *filter,
// ppl::nn::TensorShape* filter_shape){
// return ppl::common::RC_UNSUPPORTED;
// //int in_c_pad = filter_shape->GetDim(0);
// //int out_c_pad = filter_shape->GetDim(1);
// //int kernel_h = filter_shape->GetDim(2);
// //int kernel_w = filter_shape->GetDim(3);
// //int n = out_c_pad * kernel_h*kernel_w;
// //if (filter_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16){
// //} else if (filter_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32){
// //}
// //ppl::nn::common::TransposeParam param;
// //param.perm.push_back(3);
// //param.perm.push_back(1);
// //param.perm.push_back(2);
// //param.perm.push_back(0);
// //ppl::nn::TensorShape cvt_shape, input_shape;
// //input_shape.Reshape({in_c_pad, kernel_h, kernel_w, out_c_pad});
// //input_shape.SetDataType(filter_shape->GetDataType());
// //cvt_shape.Reshape({out_c_pad, kernel_h, kernel_w, in_c_pad});
// //cvt_shape.SetDataType(filter_shape->GetDataType());
//
// //hipMemcpy(cvt_filter, filter, n*in_c_pad*sizeof(__half), hipMemcpyDeviceToDevice);
// //return 0;
//}
//
//
//
//
//
//>>>>>>> Stashed changes
| aa2f05306d14baf63efe6d7c704c530a27d3d9ba.cu | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
#include "cudakernel/nn/convtranspose.h"
#include "cudakernel/math/math.h"
#include "cudakernel/gemm/gemm.h"
#include "cudakernel/memory/transpose.h"
#include "cudakernel/common/common.h"
#include "ppl/nn/params/onnx/transpose_param.h"
#include "ppl/nn/params/onnx/gemm_param.h"
#include <cuda_fp16.h>
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
template <typename T>
__global__ void ppl_col2im_gpu_kernel(
const int n, const T* data_col,
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int hole_h, const int hole_w,
const int height_col, const int width_col,
const float beta, T* data_im) {
CUDA_KERNEL_LOOP(index, n) {
T val = 0;
const int w_im = index % width + pad_w;
const int h_im = (index / width) % height + pad_h;
const int c_im = index / (width * height);
int kernel_extern_w = (kernel_w-1)*hole_w+1;
int kernel_extern_h = (kernel_h-1)*hole_h+1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extern_w) ? 0 : (w_im - kernel_extern_w) / stride_w + 1;
const int w_col_end =
min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extern_h) ? 0 : (h_im - kernel_extern_h) / stride_h + 1;
const int h_col_end =
min(h_im / stride_h + 1, height_col);
// equivalent implementation
for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % hole_h == 0 && w_k % hole_w == 0) {
h_k /= hole_h;
w_k /= hole_w;
int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + h_col) * width_col + w_col;
val = Math<T,T,T>::add(val, data_col[data_col_index]);
}
}
}
data_im[index] = Math<T,T,T>::add(val, (beta == 0 ? (T)0 : data_im[index]));
}
}
template <typename T>
void ppl_col2im_gpu(
cudaStream_t stream, const T* data_col,
int channels, int height, int width,
int kernel_h, int kernel_w, int pad_h,
int pad_w, int stride_h, int stride_w,
int hole_h, int hole_w, int height_col,
int width_col, const float beta, T* data_im) {
int num_kernels = channels * height * width;
ppl_col2im_gpu_kernel<T><<<(num_kernels + 1024 - 1) / 1024, 1024, 0, stream>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
pad_h, pad_w, stride_h, stride_w, hole_h, hole_w,
height_col, width_col, beta, data_im);
}
template <typename T>
__global__ void ppl_cukernel_matrix_padding(
const T* input, int inHeight,
int inWidth, T* output,
int out_height, int out_width) {
int w_idx = blockIdx.x * blockDim.x + threadIdx.x;
int h_idx = blockIdx.y * blockDim.y + threadIdx.y;
if (w_idx >= inWidth || h_idx >= inHeight) return;
uint64_t in_index = h_idx * inWidth + w_idx;
uint64_t out_index = h_idx * out_width + w_idx;
output[out_index] = input[in_index];
}
template <typename T>
void cuda_matrix_padding(
cudaStream_t stream, const T* input,
int inHeight, int inWidth, T* output,
int out_height, int out_width) {
cudaMemset(output, 0, out_height * out_width * sizeof(T));
dim3 blockSize(16, 16, 1);
dim3 gridSize((inWidth + 15) / 16, (inHeight + 15) / 16, 1);
ppl_cukernel_matrix_padding<T><<<gridSize, blockSize, 0, stream>>>(
input, inHeight, inWidth, output, out_height, out_width);
}
template<typename T>
void __global__ addVectorToMatrixColumnKernel(
int numRows, int numCols, int stride,
float alpha, const T* bias, float beta, T* out) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numRows * numCols) return;
int row_idx = index / numCols;
int col_idx = index % numCols;
uint64_t out_index = row_idx * stride + col_idx;
uint64_t in_index = index;
out[out_index] = Math<T,T,T>::add(
Math<T,T,T>::mul((T)alpha, bias[row_idx]),
((beta == 0) ? (T)0 : Math<T,T,T>::mul((T)beta, out[in_index])));
}
template<typename T>
void addVectorToMatrixColumn(
cudaStream_t stream, int M,
int N, int stride, float alpha,
const T* biasData, float beta, T* outData){
const uint64_t count = M * N;
const int blockSize = 128;
const int gridSize = (count + blockSize - 1) / blockSize;
addVectorToMatrixColumnKernel<T><<<gridSize, blockSize, 0, stream>>>(M, N,
stride, alpha, biasData, beta, outData);
}
uint64_t pplConvTransposeGetFilterBufSizeCudaFp32(
int num_filters, int num_channels, int filter_height, int filter_width) {
int M = num_filters;
M *= filter_height;
M *= filter_width;
size_t K = num_channels;
size_t padM = Align(M, 1);
size_t padK = Align(K, 8);
size_t dst = Align(padM * padK * sizeof(__half), 128);
return dst * 2;// transpose buf
}
template<typename T>
void pplConvTransposeConvertFilter(
cudaStream_t stream, const T* filter,
int num_filters, int num_channels, int filter_height,
int filter_width, T* cvt_filter) {
int M = num_filters;
M *= filter_height;
M *= filter_width;
size_t K = num_channels;
size_t padM = Align(M, 1);
size_t padK = Align(K, 8);
//no need to transpose, just
cuda_matrix_padding<T>(stream, filter, K, M, cvt_filter, padK, padM);
}
uint64_t pplConvTransposeGetTempBufSizeCudaFp32(
int group, int in_c, int in_h, int in_w,
int out_c, int kernel_h, int kernel_w,
int pad_h, int pad_w, int stride_h,
int stride_w, int hole_h, int hole_w) {
size_t M = out_c * kernel_h * kernel_w;
size_t N = in_w * in_h;
size_t K = in_c;
size_t padN = Align(N, 8);
size_t padK = Align(K, 8);
//for trans buf
return 2 * Align(padN*padK*sizeof(__half), 128) +
Align(M*N*sizeof(__half), 128) + Align(M*padN*sizeof(__half), 128);
}
uint64_t PPLConvTransposeGetBufSizeCuda(
ppl::nn::TensorShape* input_shape,
ppl::nn::TensorShape* output_shape,
const ppl::nn::common::ConvTransposeParam* param)
{
int batch = input_shape->GetDim(0);
int in_c = input_shape->GetDim(1);
int in_h = input_shape->GetDim(2);
int in_w = input_shape->GetDim(3);
int out_c = output_shape->GetDim(1);
int out_h = output_shape->GetDim(2);
int out_w = output_shape->GetDim(3);
int group = param->group;
int kernel_h = param->kernel_shape[0];
int kernel_w = param->kernel_shape[1];
int pad_h = param->pads[0];
int pad_w = param->pads[1];
int stride_h = param->strides[0];
int stride_w = param->strides[1];
int hole_h = param->dilations[0];
int hole_w = param->dilations[1];
uint64_t size = 0;
if (input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
size += pplConvTransposeGetFilterBufSizeCudaFp32(out_c, in_c, kernel_h, kernel_w) +
pplConvTransposeGetTempBufSizeCudaFp32(group, in_c, in_h, in_w, out_c, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, hole_h, hole_w);
} else {
return 0;
}
//NT gemm
int transA = 0;
int M = out_c * kernel_h * kernel_w;
int K = in_c;
int padM = Align(M, 1);
int padK = Align(K, 8 );
ppl::nn::TensorShape a_shape;
a_shape.Reshape({padM, padK});
a_shape.SetDataType(input_shape->GetDataType());
size += PPLGemmCUDAGetBufSize(&a_shape, transA);
return size;
}
template<typename T>
__global__ void remove_padding(
T *pad_data,
T *data,
const int M,
const int padN,
const int N)
{
int64_t off = blockIdx.x*256 + threadIdx.x;
int m_id = off / N;
int n_id = off % N;
int64_t in_off = (int64_t)m_id*padN + n_id;
if (off < M*N) data[off] = pad_data[in_off];
}
template<typename T>
void RemovePadding(
const cudaStream_t &stream,
T *pad_data,
T *data,
const int M,
const int padN,
const int N)
{
if(padN == N) return;
int block_size = 256;
int grid = (M*N+255) / 256;
remove_padding<T><<<grid, block_size, 0, stream>>>(pad_data, data, M, padN, N);
}
ppl::common::RetCode PPLCUDAConvTransposeForward(
cudaStream_t stream,
ppl::nn::TensorShape* input_shape,
const void* input,
const void* filter,
const void* bias,
const ppl::nn::common::ConvTransposeParam* param,
void* temp_buffer,
ppl::nn::TensorShape* output_shape,
void* output)
{
int batch = input_shape->GetDim(0);
int in_c = input_shape->GetDim(1);
int in_h = input_shape->GetDim(2);
int in_w = input_shape->GetDim(3);
int out_c = output_shape->GetDim(1);
int out_h = output_shape->GetDim(2);
int out_w = output_shape->GetDim(3);
int kernel_h = param->kernel_shape[0];
int kernel_w = param->kernel_shape[1];
int pad_h = param->pads[0];
int pad_w = param->pads[1];
int stride_h = param->strides[0];
int stride_w = param->strides[1];
int hole_h = param->dilations[0];
int hole_w = param->dilations[1];
if(input_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16) {
int num_channels = in_c;
int num_filters = out_c;
int height = in_h;
int width = in_w;
int out_height = out_h;
int out_width = out_w;
int M = out_c * kernel_h * kernel_w;
int N = in_w * in_h;
int K = in_c;
int padM = Align(M, 1);
int padN = Align(N, 8);
int padK = Align(K, 8);
__half* pad_in_data = (__half*)temp_buffer;
__half* pad_out_data = pad_in_data + Align(padN*padK, 128/sizeof(__half));
__half* out_data = pad_out_data + Align(M*padN, 128/sizeof(__half));
// cvt filter
__half* cvt_filter = out_data + Align(M*N, 128/sizeof(__half));
pplConvTransposeConvertFilter<__half>(stream, (__half*)filter, num_filters, num_channels, kernel_h, kernel_w, cvt_filter);
ppl::nn::common::TransposeParam trans_param;
trans_param.perm.push_back(1);
trans_param.perm.push_back(0);
__half *trans_filter = cvt_filter + Align(padK * padM, 128 / sizeof(__half));
__half *trans_in_data = trans_filter + Align(padM * padK, 128 / sizeof(__half));
ppl::nn::TensorShape a_shape, b_shape, c_shape, out_a_shape, out_b_shape;
a_shape.SetDataType(input_shape->GetDataType());
b_shape.SetDataType(input_shape->GetDataType());
c_shape.SetDataType(output_shape->GetDataType());
out_a_shape.SetDataType(input_shape->GetDataType());
out_b_shape.SetDataType(input_shape->GetDataType());
a_shape.Reshape({padK, M});
b_shape.Reshape({padK, padN});
c_shape.Reshape({M, padN});
out_a_shape.Reshape({padM, padK});
out_b_shape.Reshape({padN, padK});
ppl::common::RetCode status = PPLCUDATransposeForwardImp(stream,
trans_param, &a_shape, cvt_filter,
&out_a_shape, trans_filter);
ppl::nn::common::GemmParam gemm_param;
fuse_param_t fuse_param;
gemm_param.bias_term = 0;
gemm_param.transA = 0; gemm_param.transB = 1;
gemm_param.alpha = 1.f; gemm_param.beta = 1.f;
gemm_param.N = padN;
for(int n = 0; n < batch; ++n) {
int offset_in = n * num_channels * height * width;
int offset_out = n * num_filters * out_height * out_width;
cuda_matrix_padding<__half>(stream, ((__half*)input) + offset_in, K, N,
pad_in_data, padK, padN);
PPLCUDATransposeForwardImp(stream,
trans_param, &b_shape, pad_in_data,
&out_b_shape, trans_in_data);
//NT
ppl::nn::TensorShape a_shape, b_shape, c_shape;
//input transpose KxN -> NxK weight transpose KxM -> MxK
int kernel_id = 0;
PPLCUDAGemmForwardImp(stream,
&out_a_shape, trans_filter,
&out_b_shape, trans_in_data,
NULL,
&c_shape, pad_out_data,
gemm_param,
NULL,
fuse_param,
kernel_id);
RemovePadding<__half>(stream, pad_out_data, out_data, M, padN, N);
ppl_col2im_gpu<__half>(stream, (const __half*)pad_out_data, num_filters,
out_height, out_width, kernel_h, kernel_w, pad_h, pad_w, stride_h,
stride_w, hole_h, hole_w, height, width, 0.f, ((__half*)output) + offset_out);
if (NULL != bias) {
addVectorToMatrixColumn<__half>(stream, num_filters, out_height * out_width,
out_height * out_width, 1.f, (__half*)bias, 1.f, ((__half*)output) + offset_out);
}
}
return ppl::common::RC_SUCCESS;
} else {
return ppl::common::RC_UNSUPPORTED;
}
}
//<<<<<<< Updated upstream
//=======
//
//
//
//ppl::common::RetCode PPLCUDAConvTransposeModifyWeight(
// const cudaStream_t &stream,
// void *cvt_filter,
// void *filter,
// ppl::nn::TensorShape* filter_shape){
// return ppl::common::RC_UNSUPPORTED;
// //int in_c_pad = filter_shape->GetDim(0);
// //int out_c_pad = filter_shape->GetDim(1);
// //int kernel_h = filter_shape->GetDim(2);
// //int kernel_w = filter_shape->GetDim(3);
// //int n = out_c_pad * kernel_h*kernel_w;
// //if (filter_shape->GetDataType() == ppl::common::DATATYPE_FLOAT16){
// //} else if (filter_shape->GetDataType() == ppl::common::DATATYPE_FLOAT32){
// //}
// //ppl::nn::common::TransposeParam param;
// //param.perm.push_back(3);
// //param.perm.push_back(1);
// //param.perm.push_back(2);
// //param.perm.push_back(0);
// //ppl::nn::TensorShape cvt_shape, input_shape;
// //input_shape.Reshape({in_c_pad, kernel_h, kernel_w, out_c_pad});
// //input_shape.SetDataType(filter_shape->GetDataType());
// //cvt_shape.Reshape({out_c_pad, kernel_h, kernel_w, in_c_pad});
// //cvt_shape.SetDataType(filter_shape->GetDataType());
//
// //cudaMemcpy(cvt_filter, filter, n*in_c_pad*sizeof(__half), cudaMemcpyDeviceToDevice);
// //return 0;
//}
//
//
//
//
//
//>>>>>>> Stashed changes
|
6ad19a04ecd9335b5f4fb7ef26924dd9c65416e3.hip | // !!! This is a file automatically generated by hipify!!!
///*
// * LinearSysSolver.cpp
// *
// * Created on: Jul 8, 2013
// * Author: adm85
// */
//
//#include <vector>
//#include <iostream>
//#include <time.h>
//#include "LinearSysSolver.h"
//#include "rocblas.h"
//#include "cula.h"
//
//
//LinearSysSolver::LinearSysSolver()
//{
// // TODO Auto-generated constructor stub
//
//}
//
//LinearSysSolver::~LinearSysSolver()
//{
// // TODO Auto-generated destructor stub
//}
//
///**
// * Solves A*x=B for x. The result is stored in the vector pointed to by B.
// */
//void LinearSysSolver::solveSystem(hipComplex* A, int M_A, int N_A, hipComplex* B, int M_B, int N_B) {
// //Get the LU Factorization
// hipComplex* LUMat = new hipComplex[M_A*N_A];
// int ipivLength = N_A;
// int* ipiv = new int[ipivLength];
// getLUDecomposition(A, M_A, N_A, LUMat, ipiv, ipivLength);
//
// //Calculate P*b
// swapPivotRows(B, M_B, N_B, ipiv, ipivLength);
//
// //Solve the system. The result will be stored in B
// cublasSolveLinearSystem(LUMat, M_A, N_A, B, M_B, N_B);
//
// // DEBUG CODE -------
// //hipComplex* test = multiplyMatrices(xTxInv, N, N, xTx, N, N);
// hipComplex* test = multiplyMatrices(A, M_A, N_A, B, M_B, N_B);
// cout << endl << "X * XInv" << endl;
// columnMajorPrintArray(test, M_A, N_B);
// delete [] test;
// // END DEBUG CODE ---
//
// delete [] LUMat;
// delete [] ipiv;
//}
//
//
///**
// * Uses the CULA library to get the LU decomposition of the matrix.
// */
//void LinearSysSolver::getLUDecomposition(hipComplex* x, int M, int N, hipComplex* LUMat, int* ipiv, int ipivLength) {
//
// culaDeviceFloatComplex* devxTx;
// culaDeviceInt* devIPIV;
//
// hipMalloc(&devxTx, M*N*sizeof(culaDeviceFloatComplex));
// hipMalloc(&devIPIV, ipivLength*sizeof(culaDeviceInt));
// hipMemcpy(devxTx, x, M*N*sizeof(culaDeviceFloatComplex), hipMemcpyHostToDevice);
//
// culaStatus culaStat;
// culaInitialize();
//
// culaStat = culaDeviceCgetrf(M, N, devxTx, M, devIPIV);
// if(culaStat != culaNoError) {
// cout << "Cula Cgetrf failure" << endl;
// }
//
// culaShutdown();
//
// //LUMat = new hipComplex[M*N];
// hipMemcpy(LUMat, devxTx, M*N*sizeof(culaDeviceFloatComplex), hipMemcpyDeviceToHost);
// hipMemcpy(ipiv, devIPIV, ipivLength*sizeof(culaDeviceInt), hipMemcpyDeviceToHost);
//
//// getL(L, LUMat, M, N);
////
// cout << "LUMat Inside:" << endl;
// columnMajorPrintArray(LUMat, M, N);
////
//// getU(U, LUMat, M, N);
//// cout << endl << "U" << endl;
//// columnMajorPrintArray(U, M, N);
//
// hipFree(devxTx);
// hipFree(devIPIV);
//}
//
///**
// * Using the information from the CULA generated IPIF array,
// * this function swaps rows as appropriate.
// */
//void LinearSysSolver::swapPivotRows(hipComplex* x, int M, int N, int* ipiv, int ipivLength) {
// //Temporary row vector
// hipComplex rowVec[N];
//
// //We use index 1 based ordering because this is what CULA returns
// for(int i=1; i <= ipivLength; i++) {
// //Check to see if the row swaps. This happens when element x of the ipif
// //array is not equal to x. When element x is different, it means that row x
// //and the row specified in element x swap places.
// if(ipiv[i-1] != i) {
// int startIndex = i-1;
// //Copy the current row into the temporary row vector
// for(int j = 0; j < N; j++) {
// rowVec[j].x = x[startIndex+j*M].x;
// rowVec[j].y = x[startIndex+j*M].y;
// }
//
// //Copy the specified row into the current row
// int specRowStart = ipiv[i-1]-1;
// for(int j=0; j < N; j++) {
// x[startIndex+j*M].x = x[specRowStart+j*M].x;
// x[startIndex+j*M].y = x[specRowStart+j*M].y;
// }
//
// //Copy the temp row into the specified row
// for(int j=0; j < N; j++) {
// x[specRowStart+j*M].x = rowVec[j].x;
// x[specRowStart+j*M].y = rowVec[j].y;
// }
// }
// }
//
//}
//
//void LinearSysSolver::cublasSolveLinearSystem(hipComplex* A, int M, int N, hipComplex* B, int M_B, int N_B) {
// hipComplex* xInv = new hipComplex[M*N_B];
//
// //Now put L, U, and the I matrix on the GPU
// hipblasStatus_t stat;
// hipblasHandle_t handle;
//
// hipComplex* devA;
// hipComplex* devB;
// hipMalloc(&devA, M*N*sizeof(hipComplex));
// hipMalloc(&devB, M_B*N_B*sizeof(hipComplex));
//
// stat = hipblasCreate(&handle);
// if(stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = hipblasSetMatrix(M, N, sizeof(hipComplex), A, M, devA, M);
// if(stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = hipblasSetMatrix(M_B, N_B, sizeof(hipComplex), B, M_B, devB, M_B);
// if(stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
//
// //Set up Alpha
// hipComplex alpha;
// alpha.x = 1;
// alpha.y = 0;
//
// //First solve L*y = P*b
// stat = hipblasCtrsm(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_LOWER, HIPBLAS_OP_N, HIPBLAS_DIAG_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Error solving for y" << endl;
// }
//
// //Then solve U*x = y
// stat = hipblasCtrsm(handle, HIPBLAS_SIDE_LEFT, HIPBLAS_FILL_MODE_UPPER, HIPBLAS_OP_N, HIPBLAS_DIAG_NON_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Error solving for x" << endl;
// }
//
// //Get results, and store them in matrix B
// hipMemcpy(B, devB, M*N_B*sizeof(hipComplex), hipMemcpyDeviceToHost);
//
// //Free resources
// hipblasDestroy(handle);
// hipFree(devA);
// hipFree(devB);
//}
//
///**
// * Multiplies two matrices together. Result is stored in B on exit.
// */
//hipComplex* LinearSysSolver::multiplyMatrices(hipComplex* A, int M_A, int N_A, hipComplex* B, int M_B, int N_B) {
// hipError_t cudaStat;
// hipblasStatus_t stat;
// hipblasHandle_t handle;
//
// hipComplex* devA;
// hipComplex* devB;
// hipComplex* devC;
// hipComplex* alpha = new hipComplex;
// hipComplex* beta = new hipComplex;
// hipComplex* hostC = new hipComplex[M_A*N_B];
// alpha->x = 1;
// alpha->y = 0;
// beta->x = 0;
// beta->y = 0;
//
// cudaStat = hipMalloc(&devA, M_A*N_A*sizeof(hipComplex));
// cudaStat = hipMalloc(&devB, M_B*N_B*sizeof(hipComplex));
// cudaStat = hipMalloc(&devC, M_A*N_B*sizeof(hipComplex));
// if(cudaStat != hipSuccess) {
// cout << "Horrible failure!" << endl;
// }
//
// stat = hipblasCreate(&handle);
//
// stat = hipblasSetMatrix(M_A, N_A, sizeof(hipComplex), A, M_A, devA, M_A);
// if (stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Data download A failed" << endl;
// }
// stat = hipblasSetMatrix(M_B, N_B, sizeof(hipComplex), B, M_B, devB, M_B);
// if (stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Data download B failed" << endl;
// }
//
// //Perform the multiply.
// stat = hipblasCgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, M_A, N_B, N_A, alpha, devA, M_A, devB, M_B, beta, devC, M_A);
//
// stat = hipblasGetMatrix(M_A, N_B, sizeof(hipComplex), devC, M_A, hostC, M_A);
// if (stat != HIPBLAS_STATUS_SUCCESS) {
// cout << "Failed to get devC to hostC" << endl;
// cout << stat << endl;
// }
//
// hipFree(devA);
// hipFree(devB);
// hipFree(devC);
// hipblasDestroy(handle);
//
// delete alpha;
// delete beta;
// return hostC;
//
//}
//
///**
// * Prints out an array that is stored in column-major order in memory.
// */
//void LinearSysSolver::columnMajorPrintArray(hipComplex* x, int M, int N) {
// int realIndex;
// cout << "------------------------------------------------------" << endl;
// cout << " Printing Column Order Matrix " << endl;
// cout << "------------------------------------------------------" << endl;
// for(int i=0; i < M; i++) {
// cout << "Row: " << (i+1) << " ";
// for(int j=0; j < N; j++) {
// realIndex = (M*j)+i;
// cout << x[realIndex].x;
// if(x[realIndex].y >= 0) {
// cout << "+";
// }
// cout << x[realIndex].y << "i ";
// }
// cout << endl;
// }
//}
| 6ad19a04ecd9335b5f4fb7ef26924dd9c65416e3.cu | ///*
// * LinearSysSolver.cpp
// *
// * Created on: Jul 8, 2013
// * Author: adm85
// */
//
//#include <vector>
//#include <iostream>
//#include <time.h>
//#include "LinearSysSolver.h"
//#include "cublas_v2.h"
//#include "cula.h"
//
//
//LinearSysSolver::LinearSysSolver()
//{
// // TODO Auto-generated constructor stub
//
//}
//
//LinearSysSolver::~LinearSysSolver()
//{
// // TODO Auto-generated destructor stub
//}
//
///**
// * Solves A*x=B for x. The result is stored in the vector pointed to by B.
// */
//void LinearSysSolver::solveSystem(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// //Get the LU Factorization
// cuComplex* LUMat = new cuComplex[M_A*N_A];
// int ipivLength = N_A;
// int* ipiv = new int[ipivLength];
// getLUDecomposition(A, M_A, N_A, LUMat, ipiv, ipivLength);
//
// //Calculate P*b
// swapPivotRows(B, M_B, N_B, ipiv, ipivLength);
//
// //Solve the system. The result will be stored in B
// cublasSolveLinearSystem(LUMat, M_A, N_A, B, M_B, N_B);
//
// // DEBUG CODE -------
// //cuComplex* test = multiplyMatrices(xTxInv, N, N, xTx, N, N);
// cuComplex* test = multiplyMatrices(A, M_A, N_A, B, M_B, N_B);
// cout << endl << "X * XInv" << endl;
// columnMajorPrintArray(test, M_A, N_B);
// delete [] test;
// // END DEBUG CODE ---
//
// delete [] LUMat;
// delete [] ipiv;
//}
//
//
///**
// * Uses the CULA library to get the LU decomposition of the matrix.
// */
//void LinearSysSolver::getLUDecomposition(cuComplex* x, int M, int N, cuComplex* LUMat, int* ipiv, int ipivLength) {
//
// culaDeviceFloatComplex* devxTx;
// culaDeviceInt* devIPIV;
//
// cudaMalloc(&devxTx, M*N*sizeof(culaDeviceFloatComplex));
// cudaMalloc(&devIPIV, ipivLength*sizeof(culaDeviceInt));
// cudaMemcpy(devxTx, x, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyHostToDevice);
//
// culaStatus culaStat;
// culaInitialize();
//
// culaStat = culaDeviceCgetrf(M, N, devxTx, M, devIPIV);
// if(culaStat != culaNoError) {
// cout << "Cula Cgetrf failure" << endl;
// }
//
// culaShutdown();
//
// //LUMat = new cuComplex[M*N];
// cudaMemcpy(LUMat, devxTx, M*N*sizeof(culaDeviceFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(ipiv, devIPIV, ipivLength*sizeof(culaDeviceInt), cudaMemcpyDeviceToHost);
//
//// getL(L, LUMat, M, N);
////
// cout << "LUMat Inside:" << endl;
// columnMajorPrintArray(LUMat, M, N);
////
//// getU(U, LUMat, M, N);
//// cout << endl << "U" << endl;
//// columnMajorPrintArray(U, M, N);
//
// cudaFree(devxTx);
// cudaFree(devIPIV);
//}
//
///**
// * Using the information from the CULA generated IPIF array,
// * this function swaps rows as appropriate.
// */
//void LinearSysSolver::swapPivotRows(cuComplex* x, int M, int N, int* ipiv, int ipivLength) {
// //Temporary row vector
// cuComplex rowVec[N];
//
// //We use index 1 based ordering because this is what CULA returns
// for(int i=1; i <= ipivLength; i++) {
// //Check to see if the row swaps. This happens when element x of the ipif
// //array is not equal to x. When element x is different, it means that row x
// //and the row specified in element x swap places.
// if(ipiv[i-1] != i) {
// int startIndex = i-1;
// //Copy the current row into the temporary row vector
// for(int j = 0; j < N; j++) {
// rowVec[j].x = x[startIndex+j*M].x;
// rowVec[j].y = x[startIndex+j*M].y;
// }
//
// //Copy the specified row into the current row
// int specRowStart = ipiv[i-1]-1;
// for(int j=0; j < N; j++) {
// x[startIndex+j*M].x = x[specRowStart+j*M].x;
// x[startIndex+j*M].y = x[specRowStart+j*M].y;
// }
//
// //Copy the temp row into the specified row
// for(int j=0; j < N; j++) {
// x[specRowStart+j*M].x = rowVec[j].x;
// x[specRowStart+j*M].y = rowVec[j].y;
// }
// }
// }
//
//}
//
//void LinearSysSolver::cublasSolveLinearSystem(cuComplex* A, int M, int N, cuComplex* B, int M_B, int N_B) {
// cuComplex* xInv = new cuComplex[M*N_B];
//
// //Now put L, U, and the I matrix on the GPU
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cudaMalloc(&devA, M*N*sizeof(cuComplex));
// cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
//
// stat = cublasCreate(&handle);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M, N, sizeof(cuComplex), A, M, devA, M);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error in solver" << endl;
// }
//
// //Set up Alpha
// cuComplex alpha;
// alpha.x = 1;
// alpha.y = 0;
//
// //First solve L*y = P*b
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, CUBLAS_DIAG_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for y" << endl;
// }
//
// //Then solve U*x = y
// stat = cublasCtrsm(handle, CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER, CUBLAS_OP_N, CUBLAS_DIAG_NON_UNIT, M, N, &alpha, devA, M, devB, M_B);
// if(stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Error solving for x" << endl;
// }
//
// //Get results, and store them in matrix B
// cudaMemcpy(B, devB, M*N_B*sizeof(cuComplex), cudaMemcpyDeviceToHost);
//
// //Free resources
// cublasDestroy(handle);
// cudaFree(devA);
// cudaFree(devB);
//}
//
///**
// * Multiplies two matrices together. Result is stored in B on exit.
// */
//cuComplex* LinearSysSolver::multiplyMatrices(cuComplex* A, int M_A, int N_A, cuComplex* B, int M_B, int N_B) {
// cudaError_t cudaStat;
// cublasStatus_t stat;
// cublasHandle_t handle;
//
// cuComplex* devA;
// cuComplex* devB;
// cuComplex* devC;
// cuComplex* alpha = new cuComplex;
// cuComplex* beta = new cuComplex;
// cuComplex* hostC = new cuComplex[M_A*N_B];
// alpha->x = 1;
// alpha->y = 0;
// beta->x = 0;
// beta->y = 0;
//
// cudaStat = cudaMalloc(&devA, M_A*N_A*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devB, M_B*N_B*sizeof(cuComplex));
// cudaStat = cudaMalloc(&devC, M_A*N_B*sizeof(cuComplex));
// if(cudaStat != cudaSuccess) {
// cout << "Horrible failure!" << endl;
// }
//
// stat = cublasCreate(&handle);
//
// stat = cublasSetMatrix(M_A, N_A, sizeof(cuComplex), A, M_A, devA, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download A failed" << endl;
// }
// stat = cublasSetMatrix(M_B, N_B, sizeof(cuComplex), B, M_B, devB, M_B);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Data download B failed" << endl;
// }
//
// //Perform the multiply.
// stat = cublasCgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, M_A, N_B, N_A, alpha, devA, M_A, devB, M_B, beta, devC, M_A);
//
// stat = cublasGetMatrix(M_A, N_B, sizeof(cuComplex), devC, M_A, hostC, M_A);
// if (stat != CUBLAS_STATUS_SUCCESS) {
// cout << "Failed to get devC to hostC" << endl;
// cout << stat << endl;
// }
//
// cudaFree(devA);
// cudaFree(devB);
// cudaFree(devC);
// cublasDestroy(handle);
//
// delete alpha;
// delete beta;
// return hostC;
//
//}
//
///**
// * Prints out an array that is stored in column-major order in memory.
// */
//void LinearSysSolver::columnMajorPrintArray(cuComplex* x, int M, int N) {
// int realIndex;
// cout << "------------------------------------------------------" << endl;
// cout << " Printing Column Order Matrix " << endl;
// cout << "------------------------------------------------------" << endl;
// for(int i=0; i < M; i++) {
// cout << "Row: " << (i+1) << " ";
// for(int j=0; j < N; j++) {
// realIndex = (M*j)+i;
// cout << x[realIndex].x;
// if(x[realIndex].y >= 0) {
// cout << "+";
// }
// cout << x[realIndex].y << "i ";
// }
// cout << endl;
// }
//}
|
b4da6c25092e353bb51a4a1f060add3990f13e55.hip | // !!! This is a file automatically generated by hipify!!!
/*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "hip/hip_runtime.h"
#include <string.h>
#include <math.h>
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
hipDeviceProp_t deviceProp;
int nDevCount = 0;
hipGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( hipSuccess == hipGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", hipGetErrorString(hipGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
hipMalloc((void **) &m_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &a_cuda, Size * Size * sizeof(float));
hipMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
hipMemcpy(m_cuda, m, Size * Size * sizeof(float),hipMemcpyHostToDevice );
hipMemcpy(a_cuda, a, Size * Size * sizeof(float),hipMemcpyHostToDevice );
hipMemcpy(b_cuda, b, Size * sizeof(float),hipMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<(Size-1); t++) {
hipLaunchKernelGGL(( Fan1), dim3(dimGrid),dim3(dimBlock), 0, 0, m_cuda,a_cuda,Size,t);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Fan2), dim3(dimGridXY),dim3(dimBlockXY), 0, 0, m_cuda,a_cuda,b_cuda,Size,Size-t,t);
hipDeviceSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
hipMemcpy(m, m_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost );
hipMemcpy(a, a_cuda, Size * Size * sizeof(float),hipMemcpyDeviceToHost );
hipMemcpy(b, b_cuda, Size * sizeof(float),hipMemcpyDeviceToHost );
hipFree(m_cuda);
hipFree(a_cuda);
hipFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
hipGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
| b4da6c25092e353bb51a4a1f060add3990f13e55.cu | /*-----------------------------------------------------------
** gaussian.cu -- The program is to solve a linear system Ax = b
** by using Gaussian Elimination. The algorithm on page 101
** ("Foundations of Parallel Programming") is used.
** The sequential version is gaussian.c. This parallel
** implementation converts three independent for() loops
** into three Fans. Use the data file ge_3.dat to verify
** the correction of the output.
**
** Written by Andreas Kura, 02/15/95
** Modified by Chong-wei Xu, 04/20/95
** Modified by Chris Gregg for CUDA, 07/20/2009
**-----------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "cuda.h"
#include <string.h>
#include <math.h>
#ifdef RD_WG_SIZE_0_0
#define MAXBLOCKSIZE RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define MAXBLOCKSIZE RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define MAXBLOCKSIZE RD_WG_SIZE
#else
#define MAXBLOCKSIZE 512
#endif
//2D defines. Go from specific to general
#ifdef RD_WG_SIZE_1_0
#define BLOCK_SIZE_XY RD_WG_SIZE_1_0
#elif defined(RD_WG_SIZE_1)
#define BLOCK_SIZE_XY RD_WG_SIZE_1
#elif defined(RD_WG_SIZE)
#define BLOCK_SIZE_XY RD_WG_SIZE
#else
#define BLOCK_SIZE_XY 4
#endif
int Size;
float *a, *b, *finalVec;
float *m;
FILE *fp;
void InitProblemOnce(char *filename);
void InitPerRun();
void ForwardSub();
void BackSub();
__global__ void Fan1(float *m, float *a, int Size, int t);
__global__ void Fan2(float *m, float *a, float *b,int Size, int j1, int t);
void InitMat(float *ary, int nrow, int ncol);
void InitAry(float *ary, int ary_size);
void PrintMat(float *ary, int nrow, int ncolumn);
void PrintAry(float *ary, int ary_size);
void PrintDeviceProperties();
void checkCUDAError(const char *msg);
unsigned int totalKernelTime = 0;
// create both matrix and right hand side, Ke Wang 2013/08/12 11:51:06
void
create_matrix(float *m, int size){
int i,j;
float lamda = -0.01;
float coe[2*size-1];
float coe_i =0.0;
for (i=0; i < size; i++)
{
coe_i = 10*exp(lamda*i);
j=size-1+i;
coe[j]=coe_i;
j=size-1-i;
coe[j]=coe_i;
}
for (i=0; i < size; i++) {
for (j=0; j < size; j++) {
m[i*size+j]=coe[size-1-i+j];
}
}
}
int main(int argc, char *argv[])
{
printf("WG size of kernel 1 = %d, WG size of kernel 2= %d X %d\n", MAXBLOCKSIZE, BLOCK_SIZE_XY, BLOCK_SIZE_XY);
int verbose = 1;
int i, j;
char flag;
if (argc < 2) {
printf("Usage: gaussian -f filename / -s size [-q]\n\n");
printf("-q (quiet) suppresses printing the matrix and result values.\n");
printf("-f (filename) path of input file\n");
printf("-s (size) size of matrix. Create matrix and rhs in this program \n");
printf("The first line of the file contains the dimension of the matrix, n.");
printf("The second line of the file is a newline.\n");
printf("The next n lines contain n tab separated values for the matrix.");
printf("The next line of the file is a newline.\n");
printf("The next line of the file is a 1xn vector with tab separated values.\n");
printf("The next line of the file is a newline. (optional)\n");
printf("The final line of the file is the pre-computed solution. (optional)\n");
printf("Example: matrix4.txt:\n");
printf("4\n");
printf("\n");
printf("-0.6 -0.5 0.7 0.3\n");
printf("-0.3 -0.9 0.3 0.7\n");
printf("-0.4 -0.5 -0.3 -0.8\n");
printf("0.0 -0.1 0.2 0.9\n");
printf("\n");
printf("-0.85 -0.68 0.24 -0.53\n");
printf("\n");
printf("0.7 0.0 -0.4 -0.5\n");
exit(0);
}
//PrintDeviceProperties();
//char filename[100];
//sprintf(filename,"matrices/matrix%d.txt",size);
for(i=1;i<argc;i++) {
if (argv[i][0]=='-') {// flag
flag = argv[i][1];
switch (flag) {
case 's': // platform
i++;
Size = atoi(argv[i]);
printf("Create matrix internally in parse, size = %d \n", Size);
a = (float *) malloc(Size * Size * sizeof(float));
create_matrix(a, Size);
b = (float *) malloc(Size * sizeof(float));
for (j =0; j< Size; j++)
b[j]=1.0;
m = (float *) malloc(Size * Size * sizeof(float));
break;
case 'f': // platform
i++;
printf("Read file from %s \n", argv[i]);
InitProblemOnce(argv[i]);
break;
case 'q': // quiet
verbose = 0;
break;
}
}
}
//InitProblemOnce(filename);
InitPerRun();
//begin timing
struct timeval time_start;
gettimeofday(&time_start, NULL);
// run kernels
ForwardSub();
//end timing
struct timeval time_end;
gettimeofday(&time_end, NULL);
unsigned int time_total = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
if (verbose) {
printf("Matrix m is: \n");
PrintMat(m, Size, Size);
printf("Matrix a is: \n");
PrintMat(a, Size, Size);
printf("Array b is: \n");
PrintAry(b, Size);
}
BackSub();
if (verbose) {
printf("The final solution is: \n");
PrintAry(finalVec,Size);
}
printf("\nTime total (including memory transfers)\t%f sec\n", time_total * 1e-6);
printf("Time for CUDA kernels:\t%f sec\n",totalKernelTime * 1e-6);
/*printf("%d,%d\n",size,time_total);
fprintf(stderr,"%d,%d\n",size,time_total);*/
free(m);
free(a);
free(b);
}
/*------------------------------------------------------
** PrintDeviceProperties
**-----------------------------------------------------
*/
void PrintDeviceProperties(){
cudaDeviceProp deviceProp;
int nDevCount = 0;
cudaGetDeviceCount( &nDevCount );
printf( "Total Device found: %d", nDevCount );
for (int nDeviceIdx = 0; nDeviceIdx < nDevCount; ++nDeviceIdx )
{
memset( &deviceProp, 0, sizeof(deviceProp));
if( cudaSuccess == cudaGetDeviceProperties(&deviceProp, nDeviceIdx))
{
printf( "\nDevice Name \t\t - %s ", deviceProp.name );
printf( "\n**************************************");
printf( "\nTotal Global Memory\t\t\t - %lu KB", deviceProp.totalGlobalMem/1024 );
printf( "\nShared memory available per block \t - %lu KB", deviceProp.sharedMemPerBlock/1024 );
printf( "\nNumber of registers per thread block \t - %d", deviceProp.regsPerBlock );
printf( "\nWarp size in threads \t\t\t - %d", deviceProp.warpSize );
printf( "\nMemory Pitch \t\t\t\t - %zu bytes", deviceProp.memPitch );
printf( "\nMaximum threads per block \t\t - %d", deviceProp.maxThreadsPerBlock );
printf( "\nMaximum Thread Dimension (block) \t - %d %d %d", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2] );
printf( "\nMaximum Thread Dimension (grid) \t - %d %d %d", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2] );
printf( "\nTotal constant memory \t\t\t - %zu bytes", deviceProp.totalConstMem );
printf( "\nCUDA ver \t\t\t\t - %d.%d", deviceProp.major, deviceProp.minor );
printf( "\nClock rate \t\t\t\t - %d KHz", deviceProp.clockRate );
printf( "\nTexture Alignment \t\t\t - %zu bytes", deviceProp.textureAlignment );
printf( "\nDevice Overlap \t\t\t\t - %s", deviceProp. deviceOverlap?"Allowed":"Not Allowed" );
printf( "\nNumber of Multi processors \t\t - %d\n\n", deviceProp.multiProcessorCount );
}
else
printf( "\n%s", cudaGetErrorString(cudaGetLastError()));
}
}
/*------------------------------------------------------
** InitProblemOnce -- Initialize all of matrices and
** vectors by opening a data file specified by the user.
**
** We used dynamic array *a, *b, and *m to allocate
** the memory storages.
**------------------------------------------------------
*/
void InitProblemOnce(char *filename)
{
//char *filename = argv[1];
//printf("Enter the data file name: ");
//scanf("%s", filename);
//printf("The file name is: %s\n", filename);
fp = fopen(filename, "r");
fscanf(fp, "%d", &Size);
a = (float *) malloc(Size * Size * sizeof(float));
InitMat(a, Size, Size);
//printf("The input matrix a is:\n");
//PrintMat(a, Size, Size);
b = (float *) malloc(Size * sizeof(float));
InitAry(b, Size);
//printf("The input array b is:\n");
//PrintAry(b, Size);
m = (float *) malloc(Size * Size * sizeof(float));
}
/*------------------------------------------------------
** InitPerRun() -- Initialize the contents of the
** multipier matrix **m
**------------------------------------------------------
*/
void InitPerRun()
{
int i;
for (i=0; i<Size*Size; i++)
*(m+i) = 0.0;
}
/*-------------------------------------------------------
** Fan1() -- Calculate multiplier matrix
** Pay attention to the index. Index i give the range
** which starts from 0 to range-1. The real values of
** the index should be adjust and related with the value
** of t which is defined on the ForwardSub().
**-------------------------------------------------------
*/
__global__ void Fan1(float *m_cuda, float *a_cuda, int Size, int t)
{
//if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) printf(".");
//printf("blockIDx.x:%d,threadIdx.x:%d,Size:%d,t:%d,Size-1-t:%d\n",blockIdx.x,threadIdx.x,Size,t,Size-1-t);
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
*(m_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) = *(a_cuda+Size*(blockDim.x*blockIdx.x+threadIdx.x+t+1)+t) / *(a_cuda+Size*t+t);
}
/*-------------------------------------------------------
** Fan2() -- Modify the matrix A into LUD
**-------------------------------------------------------
*/
__global__ void Fan2(float *m_cuda, float *a_cuda, float *b_cuda,int Size, int j1, int t)
{
if(threadIdx.x + blockIdx.x * blockDim.x >= Size-1-t) return;
if(threadIdx.y + blockIdx.y * blockDim.y >= Size-t) return;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
a_cuda[Size*(xidx+1+t)+(yidx+t)] -= m_cuda[Size*(xidx+1+t)+t] * a_cuda[Size*t+(yidx+t)];
//a_cuda[xidx+1+t][yidx+t] -= m_cuda[xidx+1+t][t] * a_cuda[t][yidx+t];
if(yidx == 0){
//printf("blockIdx.x:%d,threadIdx.x:%d,blockIdx.y:%d,threadIdx.y:%d,blockDim.x:%d,blockDim.y:%d\n",blockIdx.x,threadIdx.x,blockIdx.y,threadIdx.y,blockDim.x,blockDim.y);
//printf("xidx:%d,yidx:%d\n",xidx,yidx);
b_cuda[xidx+1+t] -= m_cuda[Size*(xidx+1+t)+(yidx+t)] * b_cuda[t];
}
}
/*------------------------------------------------------
** ForwardSub() -- Forward substitution of Gaussian
** elimination.
**------------------------------------------------------
*/
void ForwardSub()
{
int t;
float *m_cuda,*a_cuda,*b_cuda;
// allocate memory on GPU
cudaMalloc((void **) &m_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &a_cuda, Size * Size * sizeof(float));
cudaMalloc((void **) &b_cuda, Size * sizeof(float));
// copy memory to GPU
cudaMemcpy(m_cuda, m, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(a_cuda, a, Size * Size * sizeof(float),cudaMemcpyHostToDevice );
cudaMemcpy(b_cuda, b, Size * sizeof(float),cudaMemcpyHostToDevice );
int block_size,grid_size;
block_size = MAXBLOCKSIZE;
grid_size = (Size/block_size) + (!(Size%block_size)? 0:1);
//printf("1d grid size: %d\n",grid_size);
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
//dim3 dimGrid( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
int blockSize2d, gridSize2d;
blockSize2d = BLOCK_SIZE_XY;
gridSize2d = (Size/blockSize2d) + (!(Size%blockSize2d?0:1));
dim3 dimBlockXY(blockSize2d,blockSize2d);
dim3 dimGridXY(gridSize2d,gridSize2d);
// begin timing kernels
struct timeval time_start;
gettimeofday(&time_start, NULL);
for (t=0; t<(Size-1); t++) {
Fan1<<<dimGrid,dimBlock>>>(m_cuda,a_cuda,Size,t);
cudaThreadSynchronize();
Fan2<<<dimGridXY,dimBlockXY>>>(m_cuda,a_cuda,b_cuda,Size,Size-t,t);
cudaThreadSynchronize();
checkCUDAError("Fan2");
}
// end timing kernels
struct timeval time_end;
gettimeofday(&time_end, NULL);
totalKernelTime = (time_end.tv_sec * 1000000 + time_end.tv_usec) - (time_start.tv_sec * 1000000 + time_start.tv_usec);
// copy memory back to CPU
cudaMemcpy(m, m_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(a, a_cuda, Size * Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaMemcpy(b, b_cuda, Size * sizeof(float),cudaMemcpyDeviceToHost );
cudaFree(m_cuda);
cudaFree(a_cuda);
cudaFree(b_cuda);
}
/*------------------------------------------------------
** BackSub() -- Backward substitution
**------------------------------------------------------
*/
void BackSub()
{
// create a new vector to hold the final answer
finalVec = (float *) malloc(Size * sizeof(float));
// solve "bottom up"
int i,j;
for(i=0;i<Size;i++){
finalVec[Size-i-1]=b[Size-i-1];
for(j=0;j<i;j++)
{
finalVec[Size-i-1]-=*(a+Size*(Size-i-1)+(Size-j-1)) * finalVec[Size-j-1];
}
finalVec[Size-i-1]=finalVec[Size-i-1]/ *(a+Size*(Size-i-1)+(Size-i-1));
}
}
void InitMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
fscanf(fp, "%f", ary+Size*i+j);
}
}
}
/*------------------------------------------------------
** PrintMat() -- Print the contents of the matrix
**------------------------------------------------------
*/
void PrintMat(float *ary, int nrow, int ncol)
{
int i, j;
for (i=0; i<nrow; i++) {
for (j=0; j<ncol; j++) {
printf("%8.2f ", *(ary+Size*i+j));
}
printf("\n");
}
printf("\n");
}
/*------------------------------------------------------
** InitAry() -- Initialize the array (vector) by reading
** data from the data file
**------------------------------------------------------
*/
void InitAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
fscanf(fp, "%f", &ary[i]);
}
}
/*------------------------------------------------------
** PrintAry() -- Print the contents of the array (vector)
**------------------------------------------------------
*/
void PrintAry(float *ary, int ary_size)
{
int i;
for (i=0; i<ary_size; i++) {
printf("%.2f ", ary[i]);
}
printf("\n\n");
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
b878c1ca8683ea41e775ac4506ff9099d3370f73.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_update(float *img1, float *img, int nx, int ny, int nz, float lambda){
int ix = 16 * blockIdx.x + threadIdx.x;
int iy = 16 * blockIdx.y + threadIdx.y;
int iz = 4 * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
img1[id] -= lambda * img[id];
if (img1[id] < 0.0f)
img1[id] = 0.0f;
if (img1[id] > 2500.0f)
img1[id] = 0.0f;
}
| b878c1ca8683ea41e775ac4506ff9099d3370f73.cu | __global__ void kernel_update(float *img1, float *img, int nx, int ny, int nz, float lambda){
int ix = 16 * blockIdx.x + threadIdx.x;
int iy = 16 * blockIdx.y + threadIdx.y;
int iz = 4 * blockIdx.z + threadIdx.z;
if (ix >= nx || iy >= ny || iz >= nz)
return;
int id = ix + iy * nx + iz * nx * ny;
img1[id] -= lambda * img[id];
if (img1[id] < 0.0f)
img1[id] = 0.0f;
if (img1[id] > 2500.0f)
img1[id] = 0.0f;
}
|
81832e58606ed4467e5ad572de4867e68894b941.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* In the current application, `N` is larger than the grid.
* Refactor this kernel to use a grid-stride loop in order that
* each parallel thread work on more than one element of the array.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
int gridStride = gridDim.x * blockDim.x;
for(int j = i; j < N; j += gridStride){
a[j] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
/*
* `N` is greater than the size of the grid (see below).
*/
int N = 10000;
int *a;
size_t size = N * sizeof(int);
hipMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N);
hipDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
hipFree(a);
}
| 81832e58606ed4467e5ad572de4867e68894b941.cu | #include <stdio.h>
void init(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
a[i] = i;
}
}
/*
* In the current application, `N` is larger than the grid.
* Refactor this kernel to use a grid-stride loop in order that
* each parallel thread work on more than one element of the array.
*/
__global__
void doubleElements(int *a, int N)
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
int gridStride = gridDim.x * blockDim.x;
for(int j = i; j < N; j += gridStride){
a[j] *= 2;
}
}
bool checkElementsAreDoubled(int *a, int N)
{
int i;
for (i = 0; i < N; ++i)
{
if (a[i] != i*2) return false;
}
return true;
}
int main()
{
/*
* `N` is greater than the size of the grid (see below).
*/
int N = 10000;
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
init(a, N);
/*
* The size of this grid is 256*32 = 8192.
*/
size_t threads_per_block = 256;
size_t number_of_blocks = 32;
doubleElements<<<number_of_blocks, threads_per_block>>>(a, N);
cudaDeviceSynchronize();
bool areDoubled = checkElementsAreDoubled(a, N);
printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE");
cudaFree(a);
}
|
2fb7e2c630fbc6b37b2906f35285721f346eab60.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernelSyncBuf(double *A, double *A0)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
const int k = threadIdx.z;
const int N1 = blockDim.x;
const int N2 = blockDim.y;
const int N3 = blockDim.z;
const int iB = blockIdx.x;
const int jB = blockIdx.y;
const int kB = blockIdx.z;
//const int N1B = gridDim.x; //just never used
const int N2B = gridDim.y;
const int N3B = gridDim.z;
const int iG = i + iB * N1;
const int jG = j + jB * N2;
const int kG = k + kB * N3;
//const int N1G = N1 * N1B; //just never used
const int N2G = N2 * N2B;
const int N3G = N3 * N3B;
const int indB = k + N3 * (j + N2 * i);
const int indA = kB + N3B * (jB + N2B * iB);
const int indA0 = kG + N3G * (jG + N2G * iG);
extern __shared__ double B[];
B[indB] = A0[indA0];
__syncthreads();
int numOfElem = N1 * N2 * N3;
int step = 1;
while (numOfElem > 1)
{
if (indB % (2*step) == 0)
{
B[indB] = B[indB] + B[indB + step];
}
__syncthreads();
numOfElem /= 2;
step *= 2;
}
if (indB == 0)
{
A[indA] = B[0] / (N1 * N2 * N3);
}
} | 2fb7e2c630fbc6b37b2906f35285721f346eab60.cu | #include "includes.h"
__global__ void kernelSyncBuf(double *A, double *A0)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
const int k = threadIdx.z;
const int N1 = blockDim.x;
const int N2 = blockDim.y;
const int N3 = blockDim.z;
const int iB = blockIdx.x;
const int jB = blockIdx.y;
const int kB = blockIdx.z;
//const int N1B = gridDim.x; //just never used
const int N2B = gridDim.y;
const int N3B = gridDim.z;
const int iG = i + iB * N1;
const int jG = j + jB * N2;
const int kG = k + kB * N3;
//const int N1G = N1 * N1B; //just never used
const int N2G = N2 * N2B;
const int N3G = N3 * N3B;
const int indB = k + N3 * (j + N2 * i);
const int indA = kB + N3B * (jB + N2B * iB);
const int indA0 = kG + N3G * (jG + N2G * iG);
extern __shared__ double B[];
B[indB] = A0[indA0];
__syncthreads();
int numOfElem = N1 * N2 * N3;
int step = 1;
while (numOfElem > 1)
{
if (indB % (2*step) == 0)
{
B[indB] = B[indB] + B[indB + step];
}
__syncthreads();
numOfElem /= 2;
step *= 2;
}
if (indB == 0)
{
A[indA] = B[0] / (N1 * N2 * N3);
}
} |
8cac1db834d2e63c2a407fb49c4b7fb55968d085.hip | // !!! This is a file automatically generated by hipify!!!
/**
* syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <assert.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <pthread.h>
#define POLYBENCH_TIME 1
#include "syr2k.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
// Macros to generate openmp schedule.
#include <macros.h>
// Time measures implementation.
#include <timing.h>
// Offloading support functions.
#include <offload.h>
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
#define RUN_ON_CPU
/* GPU pointers now as global to be shared between kernels. */
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
/* ------------------------------------------------------------- */
/* Arrays initialization. */
void init_arrays(int ni, int nj,
DATA_TYPE *alpha,
DATA_TYPE *beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni))
{
int i, j;
*alpha = 32412;
*beta = 2123;
for (i = 0; i < ni; i++) {
for (j = 0; j < nj; j++) {
A[i][j] = ((DATA_TYPE)i * j) / ni;
B[i][j] = ((DATA_TYPE)i * j) / ni;
}
}
for (i = 0; i < ni; i++) {
for (j = 0; j < ni; j++) {
C[i][j] = ((DATA_TYPE)i * j) / ni;
}
}
}
/* ------------------------------------------------------------- */
void copy_array(int ni, DATA_TYPE POLYBENCH_2D(C_source, NI, NI, ni, ni), DATA_TYPE POLYBENCH_2D(C_dest, NI, NI, ni, ni)) {
int i, j;
for (i = 0; i < ni; i++) {
for (j = 0; j < ni; j++) {
C_dest[i][j] = C_source[i][j];
// printf("%4.2f - %4.2f\n", C_dest[i][j], C_source[i][j]);
}
}
}
/* ------------------------------------------------------------- */
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < ni; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, C[i][j]);
if ((i * ni + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* ------------------------------------------------------------- */
/* Original Version. */
void syr2kCpu(int ni, int nj,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
int i, j, k;
/* C := alpha*A*B' + alpha*B*A' + beta*C */
for (i = 0; i < _PB_NI; i++) {
for (j = 0; j < _PB_NI; j++) {
C[i][j] *= beta;
}
}
for (i = 0; i < _PB_NI; i++) {
for (j = 0; j < _PB_NI; j++) {
for (k = 0; k < _PB_NJ; k++) {
C[i][j] += alpha * A[i][k] * B[j][k];
C[i][j] += alpha * B[i][k] * A[j][k];
}
}
}
}
/* ------------------------------------------------------------- */
void syr2k_original(int ni, int nj,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_SEQ_START;
/* Run kernel. */
syr2kCpu(ni, nj, alpha, beta, A, B, C);
/* Stop and print timer. */
// polybench_stop_instruments;
// // printf("Original CPU Time in seconds:\n");
// polybench_print_instruments;
HOOKOMP_TIMING_SEQ_STOP;
// HOOKOMP_TIMING_SEQ_PRINT;
}
/* ------------------------------------------------------------- */
void GPU_argv_init() {
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n", GPU_DEVICE, deviceProp.name);
hipSetDevice(GPU_DEVICE);
}
/* ------------------------------------------------------------- */
void compareResults(int ni,
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_output,
NI, NI, ni, ni)) {
int i, j, fail;
fail = 0;
// Compare C with D
for (i = 0; i < ni; i++) {
for (j = 0; j < ni; j++) {
// printf("%4.2f - %4.2f\n", C[i][j], C_output[i][j]);
if (percentDiff(C[i][j], C_output[i][j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
fprintf(stderr,
"Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
/* ------------------------------------------------------------- */
/* Original of Benchmarks, join the loops. */
__global__ void syr2k_cuda_kernel(int ni, int nj,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE *a,
DATA_TYPE *b,
DATA_TYPE *c) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NI)) {
c[i * NI + j] *= beta;
int k;
for (k = 0; k < NJ; k++) {
c[i * NI + j] += alpha * a[i * NJ + k] * b[j * NJ + k] +
alpha * b[i * NJ + k] * a[j * NJ + k];
}
}
}
/* ------------------------------------------------------------- */
__global__ void syr2k_cuda_kernel_0(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NI)) {
c[i * NI + j] *= beta;
}
}
/* ------------------------------------------------------------- */
__global__ void syr2k_cuda_kernel_1(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NI)) {
int k;
for (k = 0; k < NJ; k++) {
c[i * NI + j] += alpha * a[i * NJ + k] * b[j * NJ + k] +
alpha * b[i * NJ + k] * a[j * NJ + k];
}
}
}
/* ------------------------------------------------------------- */
void syr2k_cuda_0(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_inputToGpu, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_outputFromGpu, NI, NI, ni, ni)) {
fprintf(stderr, "Calling function syr2k_cuda_0.\n");
// GPU initialization.
GPU_argv_init();
// Moved to Global.
// DATA_TYPE *A_gpu;
// DATA_TYPE *B_gpu;
// DATA_TYPE *C_gpu;
hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ);
hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NI);
HOOKOMP_TIMING_DT_H2D_START;
hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice);
hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice);
hipMemcpy(C_gpu, C_inputToGpu, sizeof(DATA_TYPE) * NI * NI, hipMemcpyHostToDevice);
HOOKOMP_TIMING_DT_H2D_STOP;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_X)),
(size_t)(ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_Y))));
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_DEV_KERNEL1_START;
hipLaunchKernelGGL(( syr2k_cuda_kernel_0), dim3(grid), dim3(block), 0, 0, ni, nj, alpha, beta, A_gpu, B_gpu, C_gpu);
hipDeviceSynchronize();
HOOKOMP_TIMING_DEV_KERNEL1_STOP;
// syr2k_cuda_kernel_2<<<grid, block>>>(ni, nj, alpha, beta, A_gpu, B_gpu, C_gpu);
// hipDeviceSynchronize();
/* Stop and print timer. */
// polybench_stop_instruments;
// printf("GPU Time in seconds:\n");
// polybench_print_instruments;
// hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NI, hipMemcpyDeviceToHost);
// hipFree(A_gpu);
// hipFree(B_gpu);
// hipFree(C_gpu);
}
/* ------------------------------------------------------------- */
void syr2k_cuda_1(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_inputToGpu, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_outputFromGpu, NI, NI, ni, ni)) {
fprintf(stderr, "Calling function syr2k_cuda_1.\n");
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_X)),
(size_t)(ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_Y))));
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_DEV_KERNEL2_START;
hipLaunchKernelGGL(( syr2k_cuda_kernel_1), dim3(grid), dim3(block), 0, 0, ni, nj, alpha, beta, A_gpu, B_gpu, C_gpu);
hipDeviceSynchronize();
/* Stop and print timer. */
// polybench_stop_instruments;
// printf("GPU Time in seconds:\n");
// polybench_print_instruments;
HOOKOMP_TIMING_DEV_KERNEL2_STOP;
HOOKOMP_TIMING_DT_D2H_START;
hipMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NI,
hipMemcpyDeviceToHost);
HOOKOMP_TIMING_DT_D2H_STOP;
hipFree(A_gpu);
hipFree(B_gpu);
hipFree(C_gpu);
}
/* ------------------------------------------------------------- */
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void syr2k_omp_kernel(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
int i, j, k;
#pragma scop
// #pragma omp parallel
#pragma omp parallel num_threads(OPENMP_NUM_THREADS)
{
/* C := alpha*A*B' + alpha*B*A' + beta*C */
current_loop_index = 0;
num_threads_defined = OPENMP_NUM_THREADS;
// Copy to device A, B, C.
q_data_transfer_write = (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NI);
// Copy back C.
q_data_transfer_read = (sizeof(DATA_TYPE) * NI * NI);
// 0: MEMORY_ALLOC_DEFAULT, 1: MEMORY_ALLOC_PAGEABLE, 2: MEMORY_ALLOC_PINNED
type_of_data_allocation = MEMORY_ALLOC_PAGEABLE;
#pragma omp for private(j) schedule(OPENMP_SCHEDULE_WITH_CHUNK)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NI; j++){
C[i][j] *= beta;
}
current_loop_index = 1;
num_threads_defined = OPENMP_NUM_THREADS;
// Copy to device A, B, C.
q_data_transfer_write = (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NI);
// Copy back C.
q_data_transfer_read = (sizeof(DATA_TYPE) * NI * NI);
// 0: MEMORY_ALLOC_DEFAULT, 1: MEMORY_ALLOC_PAGEABLE, 2: MEMORY_ALLOC_PINNED
type_of_data_allocation = MEMORY_ALLOC_PAGEABLE;
#pragma omp for private(j, k) schedule(OPENMP_SCHEDULE_WITH_CHUNK)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NI; j++)
for (k = 0; k < _PB_NJ; k++) {
C[i][j] += alpha * A[i][k] * B[j][k];
C[i][j] += alpha * B[i][k] * A[j][k];
}
}
#pragma endscop
}
/* ------------------------------------------------------------- */
void syr2k_omp(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_outputFromOMP, NI, NI, ni, ni)) {
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_OMP_START;
syr2k_omp_kernel(ni, nj, alpha, beta, A, B, C_outputFromOMP);
/* Stop and print timer. */
// polybench_stop_instruments;
// // printf("OpenMP Time in seconds:\n");
// polybench_print_instruments;
HOOKOMP_TIMING_OMP_STOP;
}
/* ------------------------------------------------------------- */
int main(int argc, char *argv[]) {
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
DATA_TYPE beta;
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NI, NI, ni, ni);
POLYBENCH_2D_ARRAY_DECL(C_outputFromOMP, DATA_TYPE, NI, NI, ni, ni);
POLYBENCH_2D_ARRAY_DECL(C_inputToGpu, DATA_TYPE, NI, NI, ni, ni);
POLYBENCH_2D_ARRAY_DECL(C_outputFromGpu, DATA_TYPE, NI, NI, ni, ni);
fprintf(stderr, "Preparing alternatives functions.\n");
/* Preparing the call to target function.
void syr2k_cuda(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_inputToGpu, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_outputFromGpu, NI, NI, ni, ni))
*/
// Number of parameters to function.
int n_params = 8;
// loop 0.
Func *ff_0 = (Func *) malloc(sizeof(Func));
// Number of arguments + 1, the lists need to have last element NULL.
ff_0->arg_types = (ffi_type**) malloc ((n_params + 1) * sizeof(ffi_type*));
ff_0->arg_values = (void**) malloc ((n_params + 1) * sizeof(void*));
ff_0->f = &syr2k_cuda_0;
memset(&ff_0->ret_value, 0, sizeof(ff_0->ret_value));
// return type.
ff_0->ret_type = &ffi_type_void;
ff_0->nargs = n_params;
ff_0->arg_values[0] = ∋
ff_0->arg_values[1] = &nj;
ff_0->arg_values[2] = α
ff_0->arg_values[3] = β
ff_0->arg_values[4] = &A;
ff_0->arg_values[5] = &B;
ff_0->arg_values[6] = &C_inputToGpu;
ff_0->arg_values[7] = &C_outputFromGpu;
ff_0->arg_values[8] = NULL;
ff_0->arg_types[0] = &ffi_type_sint32;
ff_0->arg_types[1] = &ffi_type_sint32;
ff_0->arg_types[2] = &ffi_type_double;
ff_0->arg_types[3] = &ffi_type_double;
ff_0->arg_types[4] = &ffi_type_pointer;
ff_0->arg_types[5] = &ffi_type_pointer;
ff_0->arg_types[6] = &ffi_type_pointer;
ff_0->arg_types[7] = &ffi_type_pointer;
ff_0->arg_types[8] = NULL;
// loop 1.
Func *ff_1 = (Func *) malloc(sizeof(Func));
// Number of arguments + 1, the lists need to have last element NULL.
ff_1->arg_types = (ffi_type**) malloc ((n_params + 1) * sizeof(ffi_type*));
ff_1->arg_values = (void**) malloc ((n_params + 1) * sizeof(void*));
ff_1->f = &syr2k_cuda_1;
memset(&ff_1->ret_value, 0, sizeof(ff_1->ret_value));
// return type.
ff_1->ret_type = &ffi_type_void;
ff_1->nargs = n_params;
ff_1->arg_values[0] = ∋
ff_1->arg_values[1] = &nj;
ff_1->arg_values[2] = α
ff_1->arg_values[3] = β
ff_1->arg_values[4] = &A;
ff_1->arg_values[5] = &B;
ff_1->arg_values[6] = &C_inputToGpu;
ff_1->arg_values[7] = &C_outputFromGpu;
ff_1->arg_values[8] = NULL;
ff_1->arg_types[0] = &ffi_type_sint32;
ff_1->arg_types[1] = &ffi_type_sint32;
ff_1->arg_types[2] = &ffi_type_double;
ff_1->arg_types[3] = &ffi_type_double;
ff_1->arg_types[4] = &ffi_type_pointer;
ff_1->arg_types[5] = &ffi_type_pointer;
ff_1->arg_types[6] = &ffi_type_pointer;
ff_1->arg_types[7] = &ffi_type_pointer;
ff_1->arg_types[8] = NULL;
/* device 0
* loop 0 gemm_cuda
* matrix 1 x 1.
*/
fprintf(stderr, "Creating table of target functions.\n");
int nloops = 2;
int ndevices = 2;
if (create_target_functions_table(&table, nloops, ndevices)) {
// Set up the library Functions table.
assert(table != NULL);
fprintf(stderr, "Declaring function in 0,1.\n");
table[0][1][0] = *ff_0;
fprintf(stderr, "Declaring function in 1,1.\n");
table[1][1][0] = *ff_1;
TablePointerFunctions = table;
assert(TablePointerFunctions != NULL);
}
fprintf(stderr, "Calling init_array.\n");
init_arrays(ni, nj, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C));
/*Copy the original C to C of OMP.*/
fprintf(stderr, "Copying C to C_outputFromOMP.\n");
// memcpy(C_outputFromOMP, C, sizeof(C_outputFromOMP));
copy_array(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromOMP));
// printf("%4.2f - %4.2f\n", *(C[0][0]), *(C_outputFromOMP[0][0]));
// compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromOMP));
fprintf(stderr, "Copying C to C_outputFromGpu.\n");
// memcpy(C_inputToGpu, C, sizeof(C_inputToGpu));
copy_array(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu));
// compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu));
fprintf(stderr, "Calling syr2k_original:\n");
syr2k_original(ni, nj, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C));
fprintf(stderr, "Calling syr2k_omp:\n");
syr2k_omp(ni, nj, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C_outputFromOMP));
fprintf(stdout, "version = OMP+OFF, num_threads = %d, NI = %d, NJ = %d, NK = %d, ", OPENMP_NUM_THREADS, NI, NJ, 0);
HOOKOMP_PRINT_TIME_RESULTS;
fprintf(stderr, "Calling compareResults(original, omp).\n");
compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromOMP));
// fprintf(stderr, "Calling CUDA.\n");
// syr2k_cuda(ni, nj, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C_inputToGpu), POLYBENCH_ARRAY(C_outputFromGpu));
// fprintf(stderr, "Calling gemm_cuda using Table of Pointers.\n");
// call_function_ffi_call(table[0][0]);
fprintf(stderr, "Calling compareResults(original, cuda).\n");
compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu));
polybench_prevent_dce(print_array(ni, POLYBENCH_ARRAY(C_outputFromGpu)));
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(C_outputFromOMP);
POLYBENCH_FREE_ARRAY(C_outputFromGpu);
return 0;
}
// polybench.c uses the OpenMP to parallelize somethings. This call were
// intercepted by hookomp.
#undef _OPENMP
#include <polybench.c> | 8cac1db834d2e63c2a407fb49c4b7fb55968d085.cu | /**
* syr2k.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <assert.h>
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <pthread.h>
#define POLYBENCH_TIME 1
#include "syr2k.cuh"
#include <polybench.h>
#include <polybenchUtilFuncts.h>
// Macros to generate openmp schedule.
#include <macros.h>
// Time measures implementation.
#include <timing.h>
// Offloading support functions.
#include <offload.h>
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
#define RUN_ON_CPU
/* GPU pointers now as global to be shared between kernels. */
DATA_TYPE *A_gpu;
DATA_TYPE *B_gpu;
DATA_TYPE *C_gpu;
/* ------------------------------------------------------------- */
/* Arrays initialization. */
void init_arrays(int ni, int nj,
DATA_TYPE *alpha,
DATA_TYPE *beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni))
{
int i, j;
*alpha = 32412;
*beta = 2123;
for (i = 0; i < ni; i++) {
for (j = 0; j < nj; j++) {
A[i][j] = ((DATA_TYPE)i * j) / ni;
B[i][j] = ((DATA_TYPE)i * j) / ni;
}
}
for (i = 0; i < ni; i++) {
for (j = 0; j < ni; j++) {
C[i][j] = ((DATA_TYPE)i * j) / ni;
}
}
}
/* ------------------------------------------------------------- */
void copy_array(int ni, DATA_TYPE POLYBENCH_2D(C_source, NI, NI, ni, ni), DATA_TYPE POLYBENCH_2D(C_dest, NI, NI, ni, ni)) {
int i, j;
for (i = 0; i < ni; i++) {
for (j = 0; j < ni; j++) {
C_dest[i][j] = C_source[i][j];
// printf("%4.2f - %4.2f\n", C_dest[i][j], C_source[i][j]);
}
}
}
/* ------------------------------------------------------------- */
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < ni; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, C[i][j]);
if ((i * ni + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* ------------------------------------------------------------- */
/* Original Version. */
void syr2kCpu(int ni, int nj,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
int i, j, k;
/* C := alpha*A*B' + alpha*B*A' + beta*C */
for (i = 0; i < _PB_NI; i++) {
for (j = 0; j < _PB_NI; j++) {
C[i][j] *= beta;
}
}
for (i = 0; i < _PB_NI; i++) {
for (j = 0; j < _PB_NI; j++) {
for (k = 0; k < _PB_NJ; k++) {
C[i][j] += alpha * A[i][k] * B[j][k];
C[i][j] += alpha * B[i][k] * A[j][k];
}
}
}
}
/* ------------------------------------------------------------- */
void syr2k_original(int ni, int nj,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_SEQ_START;
/* Run kernel. */
syr2kCpu(ni, nj, alpha, beta, A, B, C);
/* Stop and print timer. */
// polybench_stop_instruments;
// // printf("Original CPU Time in seconds:\n");
// polybench_print_instruments;
HOOKOMP_TIMING_SEQ_STOP;
// HOOKOMP_TIMING_SEQ_PRINT;
}
/* ------------------------------------------------------------- */
void GPU_argv_init() {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n", GPU_DEVICE, deviceProp.name);
cudaSetDevice(GPU_DEVICE);
}
/* ------------------------------------------------------------- */
void compareResults(int ni,
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_output,
NI, NI, ni, ni)) {
int i, j, fail;
fail = 0;
// Compare C with D
for (i = 0; i < ni; i++) {
for (j = 0; j < ni; j++) {
// printf("%4.2f - %4.2f\n", C[i][j], C_output[i][j]);
if (percentDiff(C[i][j], C_output[i][j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
}
}
}
// print results
fprintf(stderr,
"Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
/* ------------------------------------------------------------- */
/* Original of Benchmarks, join the loops. */
__global__ void syr2k_cuda_kernel(int ni, int nj,
DATA_TYPE alpha,
DATA_TYPE beta,
DATA_TYPE *a,
DATA_TYPE *b,
DATA_TYPE *c) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NI)) {
c[i * NI + j] *= beta;
int k;
for (k = 0; k < NJ; k++) {
c[i * NI + j] += alpha * a[i * NJ + k] * b[j * NJ + k] +
alpha * b[i * NJ + k] * a[j * NJ + k];
}
}
}
/* ------------------------------------------------------------- */
__global__ void syr2k_cuda_kernel_0(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NI)) {
c[i * NI + j] *= beta;
}
}
/* ------------------------------------------------------------- */
__global__ void syr2k_cuda_kernel_1(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *c) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < NI) && (j < NI)) {
int k;
for (k = 0; k < NJ; k++) {
c[i * NI + j] += alpha * a[i * NJ + k] * b[j * NJ + k] +
alpha * b[i * NJ + k] * a[j * NJ + k];
}
}
}
/* ------------------------------------------------------------- */
void syr2k_cuda_0(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_inputToGpu, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_outputFromGpu, NI, NI, ni, ni)) {
fprintf(stderr, "Calling function syr2k_cuda_0.\n");
// GPU initialization.
GPU_argv_init();
// Moved to Global.
// DATA_TYPE *A_gpu;
// DATA_TYPE *B_gpu;
// DATA_TYPE *C_gpu;
cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ);
cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NI * NI);
HOOKOMP_TIMING_DT_H2D_START;
cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice);
cudaMemcpy(C_gpu, C_inputToGpu, sizeof(DATA_TYPE) * NI * NI, cudaMemcpyHostToDevice);
HOOKOMP_TIMING_DT_H2D_STOP;
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_X)),
(size_t)(ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_Y))));
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_DEV_KERNEL1_START;
syr2k_cuda_kernel_0<<<grid, block>>>(ni, nj, alpha, beta, A_gpu, B_gpu, C_gpu);
cudaThreadSynchronize();
HOOKOMP_TIMING_DEV_KERNEL1_STOP;
// syr2k_cuda_kernel_2<<<grid, block>>>(ni, nj, alpha, beta, A_gpu, B_gpu, C_gpu);
// cudaThreadSynchronize();
/* Stop and print timer. */
// polybench_stop_instruments;
// printf("GPU Time in seconds:\n");
// polybench_print_instruments;
// cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NI, cudaMemcpyDeviceToHost);
// cudaFree(A_gpu);
// cudaFree(B_gpu);
// cudaFree(C_gpu);
}
/* ------------------------------------------------------------- */
void syr2k_cuda_1(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_inputToGpu, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_outputFromGpu, NI, NI, ni, ni)) {
fprintf(stderr, "Calling function syr2k_cuda_1.\n");
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_X)),
(size_t)(ceil(((float)NI) / ((float)DIM_THREAD_BLOCK_Y))));
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_DEV_KERNEL2_START;
syr2k_cuda_kernel_1<<<grid, block>>>(ni, nj, alpha, beta, A_gpu, B_gpu, C_gpu);
cudaThreadSynchronize();
/* Stop and print timer. */
// polybench_stop_instruments;
// printf("GPU Time in seconds:\n");
// polybench_print_instruments;
HOOKOMP_TIMING_DEV_KERNEL2_STOP;
HOOKOMP_TIMING_DT_D2H_START;
cudaMemcpy(C_outputFromGpu, C_gpu, sizeof(DATA_TYPE) * NI * NI,
cudaMemcpyDeviceToHost);
HOOKOMP_TIMING_DT_D2H_STOP;
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
}
/* ------------------------------------------------------------- */
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void syr2k_omp_kernel(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C, NI, NI, ni, ni)) {
int i, j, k;
#pragma scop
// #pragma omp parallel
#pragma omp parallel num_threads(OPENMP_NUM_THREADS)
{
/* C := alpha*A*B' + alpha*B*A' + beta*C */
current_loop_index = 0;
num_threads_defined = OPENMP_NUM_THREADS;
// Copy to device A, B, C.
q_data_transfer_write = (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NI);
// Copy back C.
q_data_transfer_read = (sizeof(DATA_TYPE) * NI * NI);
// 0: MEMORY_ALLOC_DEFAULT, 1: MEMORY_ALLOC_PAGEABLE, 2: MEMORY_ALLOC_PINNED
type_of_data_allocation = MEMORY_ALLOC_PAGEABLE;
#pragma omp for private(j) schedule(OPENMP_SCHEDULE_WITH_CHUNK)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NI; j++){
C[i][j] *= beta;
}
current_loop_index = 1;
num_threads_defined = OPENMP_NUM_THREADS;
// Copy to device A, B, C.
q_data_transfer_write = (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NJ) + (sizeof(DATA_TYPE) * NI * NI);
// Copy back C.
q_data_transfer_read = (sizeof(DATA_TYPE) * NI * NI);
// 0: MEMORY_ALLOC_DEFAULT, 1: MEMORY_ALLOC_PAGEABLE, 2: MEMORY_ALLOC_PINNED
type_of_data_allocation = MEMORY_ALLOC_PAGEABLE;
#pragma omp for private(j, k) schedule(OPENMP_SCHEDULE_WITH_CHUNK)
for (i = 0; i < _PB_NI; i++)
for (j = 0; j < _PB_NI; j++)
for (k = 0; k < _PB_NJ; k++) {
C[i][j] += alpha * A[i][k] * B[j][k];
C[i][j] += alpha * B[i][k] * A[j][k];
}
}
#pragma endscop
}
/* ------------------------------------------------------------- */
void syr2k_omp(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_outputFromOMP, NI, NI, ni, ni)) {
/* Start timer. */
// polybench_start_instruments;
HOOKOMP_TIMING_OMP_START;
syr2k_omp_kernel(ni, nj, alpha, beta, A, B, C_outputFromOMP);
/* Stop and print timer. */
// polybench_stop_instruments;
// // printf("OpenMP Time in seconds:\n");
// polybench_print_instruments;
HOOKOMP_TIMING_OMP_STOP;
}
/* ------------------------------------------------------------- */
int main(int argc, char *argv[]) {
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
DATA_TYPE alpha;
DATA_TYPE beta;
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NI, NI, ni, ni);
POLYBENCH_2D_ARRAY_DECL(C_outputFromOMP, DATA_TYPE, NI, NI, ni, ni);
POLYBENCH_2D_ARRAY_DECL(C_inputToGpu, DATA_TYPE, NI, NI, ni, ni);
POLYBENCH_2D_ARRAY_DECL(C_outputFromGpu, DATA_TYPE, NI, NI, ni, ni);
fprintf(stderr, "Preparing alternatives functions.\n");
/* Preparing the call to target function.
void syr2k_cuda(int ni, int nj, DATA_TYPE alpha, DATA_TYPE beta,
DATA_TYPE POLYBENCH_2D(A, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(B, NI, NJ, ni, nj),
DATA_TYPE POLYBENCH_2D(C_inputToGpu, NI, NI, ni, ni),
DATA_TYPE POLYBENCH_2D(C_outputFromGpu, NI, NI, ni, ni))
*/
// Number of parameters to function.
int n_params = 8;
// loop 0.
Func *ff_0 = (Func *) malloc(sizeof(Func));
// Number of arguments + 1, the lists need to have last element NULL.
ff_0->arg_types = (ffi_type**) malloc ((n_params + 1) * sizeof(ffi_type*));
ff_0->arg_values = (void**) malloc ((n_params + 1) * sizeof(void*));
ff_0->f = &syr2k_cuda_0;
memset(&ff_0->ret_value, 0, sizeof(ff_0->ret_value));
// return type.
ff_0->ret_type = &ffi_type_void;
ff_0->nargs = n_params;
ff_0->arg_values[0] = ∋
ff_0->arg_values[1] = &nj;
ff_0->arg_values[2] = α
ff_0->arg_values[3] = β
ff_0->arg_values[4] = &A;
ff_0->arg_values[5] = &B;
ff_0->arg_values[6] = &C_inputToGpu;
ff_0->arg_values[7] = &C_outputFromGpu;
ff_0->arg_values[8] = NULL;
ff_0->arg_types[0] = &ffi_type_sint32;
ff_0->arg_types[1] = &ffi_type_sint32;
ff_0->arg_types[2] = &ffi_type_double;
ff_0->arg_types[3] = &ffi_type_double;
ff_0->arg_types[4] = &ffi_type_pointer;
ff_0->arg_types[5] = &ffi_type_pointer;
ff_0->arg_types[6] = &ffi_type_pointer;
ff_0->arg_types[7] = &ffi_type_pointer;
ff_0->arg_types[8] = NULL;
// loop 1.
Func *ff_1 = (Func *) malloc(sizeof(Func));
// Number of arguments + 1, the lists need to have last element NULL.
ff_1->arg_types = (ffi_type**) malloc ((n_params + 1) * sizeof(ffi_type*));
ff_1->arg_values = (void**) malloc ((n_params + 1) * sizeof(void*));
ff_1->f = &syr2k_cuda_1;
memset(&ff_1->ret_value, 0, sizeof(ff_1->ret_value));
// return type.
ff_1->ret_type = &ffi_type_void;
ff_1->nargs = n_params;
ff_1->arg_values[0] = ∋
ff_1->arg_values[1] = &nj;
ff_1->arg_values[2] = α
ff_1->arg_values[3] = β
ff_1->arg_values[4] = &A;
ff_1->arg_values[5] = &B;
ff_1->arg_values[6] = &C_inputToGpu;
ff_1->arg_values[7] = &C_outputFromGpu;
ff_1->arg_values[8] = NULL;
ff_1->arg_types[0] = &ffi_type_sint32;
ff_1->arg_types[1] = &ffi_type_sint32;
ff_1->arg_types[2] = &ffi_type_double;
ff_1->arg_types[3] = &ffi_type_double;
ff_1->arg_types[4] = &ffi_type_pointer;
ff_1->arg_types[5] = &ffi_type_pointer;
ff_1->arg_types[6] = &ffi_type_pointer;
ff_1->arg_types[7] = &ffi_type_pointer;
ff_1->arg_types[8] = NULL;
/* device 0
* loop 0 gemm_cuda
* matrix 1 x 1.
*/
fprintf(stderr, "Creating table of target functions.\n");
int nloops = 2;
int ndevices = 2;
if (create_target_functions_table(&table, nloops, ndevices)) {
// Set up the library Functions table.
assert(table != NULL);
fprintf(stderr, "Declaring function in 0,1.\n");
table[0][1][0] = *ff_0;
fprintf(stderr, "Declaring function in 1,1.\n");
table[1][1][0] = *ff_1;
TablePointerFunctions = table;
assert(TablePointerFunctions != NULL);
}
fprintf(stderr, "Calling init_array.\n");
init_arrays(ni, nj, &alpha, &beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C));
/*Copy the original C to C of OMP.*/
fprintf(stderr, "Copying C to C_outputFromOMP.\n");
// memcpy(C_outputFromOMP, C, sizeof(C_outputFromOMP));
copy_array(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromOMP));
// printf("%4.2f - %4.2f\n", *(C[0][0]), *(C_outputFromOMP[0][0]));
// compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromOMP));
fprintf(stderr, "Copying C to C_outputFromGpu.\n");
// memcpy(C_inputToGpu, C, sizeof(C_inputToGpu));
copy_array(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu));
// compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu));
fprintf(stderr, "Calling syr2k_original:\n");
syr2k_original(ni, nj, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C));
fprintf(stderr, "Calling syr2k_omp:\n");
syr2k_omp(ni, nj, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C_outputFromOMP));
fprintf(stdout, "version = OMP+OFF, num_threads = %d, NI = %d, NJ = %d, NK = %d, ", OPENMP_NUM_THREADS, NI, NJ, 0);
HOOKOMP_PRINT_TIME_RESULTS;
fprintf(stderr, "Calling compareResults(original, omp).\n");
compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromOMP));
// fprintf(stderr, "Calling CUDA.\n");
// syr2k_cuda(ni, nj, alpha, beta, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C_inputToGpu), POLYBENCH_ARRAY(C_outputFromGpu));
// fprintf(stderr, "Calling gemm_cuda using Table of Pointers.\n");
// call_function_ffi_call(table[0][0]);
fprintf(stderr, "Calling compareResults(original, cuda).\n");
compareResults(ni, POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(C_outputFromGpu));
polybench_prevent_dce(print_array(ni, POLYBENCH_ARRAY(C_outputFromGpu)));
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(C_outputFromOMP);
POLYBENCH_FREE_ARRAY(C_outputFromGpu);
return 0;
}
// polybench.c uses the OpenMP to parallelize somethings. This call were
// intercepted by hookomp.
#undef _OPENMP
#include <polybench.c> |
2d4626cc6b0c805edbe28635c177594d44a4c488.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include "../Box_ops/box_iou_rotated_utils.h"
using namespace pet;
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__global__ void nms_rotated_cuda_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
// nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// Compared to nms_cuda_kernel, where each box is represented with 4 values
// (x1, y1, x2, y2), each rotated box is represented with 5 values
// (x_center, y_center, width, height, angle_degrees) here.
__shared__ T block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
// Instead of devIoU used by original horizontal nms, here
// we use the single_box_iou_rotated function from box_iou_rotated_utils.h
if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) >
iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace pet {
at::Tensor nms_rotated_cuda(
const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
// using scalar_t = float;
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor");
at::hip::HIPGuardMasqueradingAsCUDA device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
int dets_num = dets.size(0);
const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.type(), "nms_rotated_kernel_cuda", [&] {
hipLaunchKernelGGL(( nms_rotated_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream,
dets_num,
iou_threshold,
dets_sorted.data<scalar_t>(),
(unsigned long long*)mask.data<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(hipGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace pet
| 2d4626cc6b0c805edbe28635c177594d44a4c488.cu | #include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include "../Box_ops/box_iou_rotated_utils.h"
using namespace pet;
int const threadsPerBlock = sizeof(unsigned long long) * 8;
template <typename T>
__global__ void nms_rotated_cuda_kernel(
const int n_boxes,
const float iou_threshold,
const T* dev_boxes,
unsigned long long* dev_mask) {
// nms_rotated_cuda_kernel is modified from torchvision's nms_cuda_kernel
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
// Compared to nms_cuda_kernel, where each box is represented with 4 values
// (x1, y1, x2, y2), each rotated box is represented with 5 values
// (x_center, y_center, width, height, angle_degrees) here.
__shared__ T block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const T* cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
// Instead of devIoU used by original horizontal nms, here
// we use the single_box_iou_rotated function from box_iou_rotated_utils.h
if (single_box_iou_rotated<T>(cur_box, block_boxes + i * 5) >
iou_threshold) {
t |= 1ULL << i;
}
}
const int col_blocks = at::cuda::ATenCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
namespace pet {
at::Tensor nms_rotated_cuda(
const at::Tensor& dets,
const at::Tensor& scores,
float iou_threshold) {
// using scalar_t = float;
AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor");
AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(dets.device());
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto dets_sorted = dets.index_select(0, order_t);
int dets_num = dets.size(0);
const int col_blocks = at::cuda::ATenCeilDiv(dets_num, threadsPerBlock);
at::Tensor mask =
at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong));
dim3 blocks(col_blocks, col_blocks);
dim3 threads(threadsPerBlock);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
dets_sorted.type(), "nms_rotated_kernel_cuda", [&] {
nms_rotated_cuda_kernel<scalar_t><<<blocks, threads, 0, stream>>>(
dets_num,
iou_threshold,
dets_sorted.data<scalar_t>(),
(unsigned long long*)mask.data<int64_t>());
});
at::Tensor mask_cpu = mask.to(at::kCPU);
unsigned long long* mask_host = (unsigned long long*)mask_cpu.data<int64_t>();
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep =
at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < dets_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long* p = mask_host + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
AT_CUDA_CHECK(cudaGetLastError());
return order_t.index(
{keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep)
.to(order_t.device(), keep.scalar_type())});
}
} // namespace pet
|
cdbc720bd04b466abad58b00f6c02cb07b7cecdd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_ceil(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = ceil(A[index]);
}
} | cdbc720bd04b466abad58b00f6c02cb07b7cecdd.cu | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_ceil(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = ceil(A[index]);
}
} |
251e1a57132a97fe729a6749ccea1fa3861a80de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GridTools
*
* Copyright (c) 2014-2021, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "./test_hypercube_iterator.cpp"
#include <gridtools/common/cuda_util.hpp>
static const size_t Size = 2;
GT_FUNCTION int linear_index(gridtools::array<size_t, 2> &index) { return index[0] * Size + index[1]; }
__global__ void test_kernel(int *out_ptr) {
for (size_t i = 0; i < Size * Size; ++i)
out_ptr[i] = -1;
using hypercube_t = gridtools::array<gridtools::array<size_t, 2>, 2>;
for (auto pos : make_hypercube_view(hypercube_t{{{0ul, Size}, {0ul, Size}}})) {
out_ptr[linear_index(pos)] = linear_index(pos);
}
};
TEST(multi_iterator, iterate_on_device) {
int *out;
GT_CUDA_CHECK(hipMalloc(&out, sizeof(int) * Size * Size));
hipLaunchKernelGGL(( test_kernel), dim3(1), dim3(1), 0, 0, out);
int host_out[Size * Size];
GT_CUDA_CHECK(hipMemcpy(&host_out, out, sizeof(int) * Size * Size, hipMemcpyDeviceToHost));
for (size_t i = 0; i < Size * Size; ++i)
ASSERT_EQ(i, host_out[i]) << "at i = " << i;
}
| 251e1a57132a97fe729a6749ccea1fa3861a80de.cu | /*
* GridTools
*
* Copyright (c) 2014-2021, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include "./test_hypercube_iterator.cpp"
#include <gridtools/common/cuda_util.hpp>
static const size_t Size = 2;
GT_FUNCTION int linear_index(gridtools::array<size_t, 2> &index) { return index[0] * Size + index[1]; }
__global__ void test_kernel(int *out_ptr) {
for (size_t i = 0; i < Size * Size; ++i)
out_ptr[i] = -1;
using hypercube_t = gridtools::array<gridtools::array<size_t, 2>, 2>;
for (auto pos : make_hypercube_view(hypercube_t{{{0ul, Size}, {0ul, Size}}})) {
out_ptr[linear_index(pos)] = linear_index(pos);
}
};
TEST(multi_iterator, iterate_on_device) {
int *out;
GT_CUDA_CHECK(cudaMalloc(&out, sizeof(int) * Size * Size));
test_kernel<<<1, 1>>>(out);
int host_out[Size * Size];
GT_CUDA_CHECK(cudaMemcpy(&host_out, out, sizeof(int) * Size * Size, cudaMemcpyDeviceToHost));
for (size_t i = 0; i < Size * Size; ++i)
ASSERT_EQ(i, host_out[i]) << "at i = " << i;
}
|
8aff340f8beb89fff62765192f6b8d7a62e58cf5.hip | // !!! This is a file automatically generated by hipify!!!
//
// Created by zeyi on 1/9/19.
//
#include <fstream>
#include "hip/hip_runtime_api.h"
#include <thundergbm/tree.h>
#include <thundergbm/trainer.h>
#include <thundergbm/metric/metric.h>
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/reduce.h"
#include "time.h"
#include "thundergbm/booster.h"
#include "chrono"
#include <thundergbm/parser.h>
using namespace std;
vector<vector<Tree>> TreeTrainer::train(GBMParam ¶m, const DataSet &dataset) {
if (param.tree_method == "auto")
if (dataset.n_features() > 20000)
param.tree_method = "exact";
else
param.tree_method = "hist";
//correct the number of classes
if(param.objective.find("multi:") != std::string::npos || param.objective.find("binary:") != std::string::npos) {
int num_class = dataset.label.size();
if (param.num_class != num_class) {
LOG(INFO) << "updating number of classes from " << param.num_class << " to " << num_class;
param.num_class = num_class;
}
if(param.num_class > 2)
param.tree_per_rounds = param.num_class;
}
else if(param.objective.find("reg:") != std::string::npos){
param.num_class = 1;
}
vector<vector<Tree>> boosted_model;
Booster booster;
booster.init(dataset, param);
std::chrono::high_resolution_clock timer;
auto start = timer.now();
for (int i = 0; i < param.n_trees; ++i) {
//one iteration may produce multiple trees, depending on objectives
booster.boost(boosted_model);
}
auto stop = timer.now();
std::chrono::duration<float> training_time = stop - start;
LOG(INFO) << "training time = " << training_time.count();
SyncMem::clear_cache();
return boosted_model;
}
| 8aff340f8beb89fff62765192f6b8d7a62e58cf5.cu | //
// Created by zeyi on 1/9/19.
//
#include <fstream>
#include "cuda_runtime_api.h"
#include <thundergbm/tree.h>
#include <thundergbm/trainer.h>
#include <thundergbm/metric/metric.h>
#include "thundergbm/util/device_lambda.cuh"
#include "thrust/reduce.h"
#include "time.h"
#include "thundergbm/booster.h"
#include "chrono"
#include <thundergbm/parser.h>
using namespace std;
vector<vector<Tree>> TreeTrainer::train(GBMParam ¶m, const DataSet &dataset) {
if (param.tree_method == "auto")
if (dataset.n_features() > 20000)
param.tree_method = "exact";
else
param.tree_method = "hist";
//correct the number of classes
if(param.objective.find("multi:") != std::string::npos || param.objective.find("binary:") != std::string::npos) {
int num_class = dataset.label.size();
if (param.num_class != num_class) {
LOG(INFO) << "updating number of classes from " << param.num_class << " to " << num_class;
param.num_class = num_class;
}
if(param.num_class > 2)
param.tree_per_rounds = param.num_class;
}
else if(param.objective.find("reg:") != std::string::npos){
param.num_class = 1;
}
vector<vector<Tree>> boosted_model;
Booster booster;
booster.init(dataset, param);
std::chrono::high_resolution_clock timer;
auto start = timer.now();
for (int i = 0; i < param.n_trees; ++i) {
//one iteration may produce multiple trees, depending on objectives
booster.boost(boosted_model);
}
auto stop = timer.now();
std::chrono::duration<float> training_time = stop - start;
LOG(INFO) << "training time = " << training_time.count();
SyncMem::clear_cache();
return boosted_model;
}
|
ae8dd2bc4e01868147f14792f2a0a1749c341962.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "spirit.cuh"
#define MAX_THREAD 1024
#define MAX_BLOCK_X 65535ll
#define MAX_BLOCK_Y 65535ll
#define MAX_BLOCK_Z 65535ll
Spirit::Spirit(int const &n, InitKernelEnum const &ik)
: resource(0),
nParticle(n),
deviceParticles(nullptr),
deviceRandStates(nullptr),
pShader("shaders/particle.vs", "shaders/particle.fs"){
createVBO();
setCallBacks();
initCuda(ik);
}
Spirit::~Spirit(){
//unmap resource
CUDA_SAFE_CALL( hipGraphicsUnmapResources(1, &resource) );
CUDA_SAFE_CALL( hipGraphicsUnregisterResource(resource) );
//free
CUDA_SAFE_CALL( hipFree(deviceParticles) );
CUDA_SAFE_CALL( hipFree(deviceRandStates) );
}
void Spirit::createVBO(){
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
//set VBO
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, nParticle*sizeof(Particle), deviceParticles, GL_STATIC_DRAW);
//set VAO
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)(0));
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)(sizeof(vec2)*1));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)(sizeof(vec2)*2));
//unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void Spirit::setCallBacks() const{
//glfwSetCursorPosCallback(scene.window, [](GLFWWindow *window, float x, float y){});
}
void Spirit::initCuda(InitKernelEnum const &ik){
deployGrid();
// cuda allocations
auto sz = sizeof(InitKernelEnum);
InitKernelEnum *deviceIK = nullptr;
CUDA_SAFE_CALL( hipMalloc((void**)&deviceIK, sz) );
CUDA_SAFE_CALL( hipMemcpy(deviceIK, &ik, sz, hipMemcpyHostToDevice) );
CUDA_SAFE_CALL( hipMalloc((void**)&deviceParticles, nParticle*sizeof(Particle)) );
CUDA_SAFE_CALL( hipMalloc((void**)&deviceRandStates, nParticle*sizeof(hiprandState_t)) );
//register buffer to cuda
CUDA_SAFE_CALL( hipGraphicsGLRegisterBuffer(&resource, VBO, hipGraphicsRegisterFlagsNone) );
//map dptr to VBO
size_t retSz;
Particle* dp;
CUDA_SAFE_CALL( hipGraphicsMapResources(1, &resource) );
CUDA_SAFE_CALL( hipGraphicsResourceGetMappedPointer((void**)&dp, &retSz, resource) );
//run cuda kernel
hipLaunchKernelGGL(( initKernel), dim3(grid), dim3(block), 0, 0, *deviceIK, dp, deviceRandStates, nParticle);
CUDA_ERROR_CHECKER;
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//free
CUDA_SAFE_CALL( hipFree(deviceIK) );
}
void Spirit::render(UpdateKernelEnum const &uk, Mouse const &mouse){
//set mouse position to device
Mouse* deviceMouse = nullptr;
auto sz = sizeof(Mouse);
CUDA_SAFE_CALL( hipMalloc((void**)&deviceMouse, sz) );
CUDA_SAFE_CALL( hipMemcpy(deviceMouse, &mouse, sz, hipMemcpyHostToDevice) );
//set uk to device
sz = sizeof(UpdateKernelEnum);
UpdateKernelEnum *deviceUK = nullptr;
CUDA_SAFE_CALL( hipMalloc((void**)&deviceUK, sz) );
CUDA_SAFE_CALL( hipMemcpy(deviceUK, &uk, sz, hipMemcpyHostToDevice) );
//map dptr to VBO
size_t retSz;
Particle *dptr = nullptr;
CUDA_SAFE_CALL( hipGraphicsResourceGetMappedPointer((void**)&dptr, &retSz, resource) );
//run cuda kernel
hipLaunchKernelGGL(( renderKernel), dim3(block), dim3(grid), 0, 0, *deviceUK, dptr, nParticle, *deviceMouse);
CUDA_ERROR_CHECKER;
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//draw
pShader.use();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, nParticle);
glBindVertexArray(0);
glDisable(GL_BLEND);
//free
CUDA_SAFE_CALL( hipFree(deviceMouse) );
CUDA_SAFE_CALL( hipFree(deviceUK) );
}
__GLOBAL__ void initKernel(InitKernelEnum const &ik, Particle* dp, hiprandState_t* dr, int n){
int index = getIdx();
if(index > n)
return ;
hiprandState_t *state = &dr[index];
//init hiprand states
hiprand_init(clock64(), index, 0, state);
dp[index].init(ik, state);
}
__GLOBAL__ void renderKernel(
UpdateKernelEnum const &uk, Particle* dp, int n, Mouse const &mouse){
int index = getIdx();
if(index > n)
return ;
dp[index].update(uk, mouse);
}
void Spirit::deployGrid(){
unsigned int blockX = nParticle>MAX_THREAD? MAX_THREAD: static_cast<unsigned int>(nParticle);
block = {blockX, 1, 1};
float nGrid = static_cast<float>(nParticle)/blockX;
if(nGrid > MAX_BLOCK_X*MAX_BLOCK_Y*MAX_BLOCK_Z)
throw std::runtime_error("Number of particles out of gpu limits.");
else if(nGrid > MAX_BLOCK_X*MAX_BLOCK_Y){
unsigned int z = ::ceil(nGrid/MAX_BLOCK_X/MAX_BLOCK_Y);
grid = {MAX_BLOCK_X, MAX_BLOCK_Y, z};
}
else if(nGrid > MAX_BLOCK_X){
unsigned int y = ::ceil(nGrid/MAX_BLOCK_X);
grid = {MAX_BLOCK_X, y, 1};
}
else if(nGrid > 0){
unsigned int x = ::ceil(nGrid);
grid = {x, 1, 1};
}
else
throw std::runtime_error("No particles in screen.");
}
| ae8dd2bc4e01868147f14792f2a0a1749c341962.cu | #include "spirit.cuh"
#define MAX_THREAD 1024
#define MAX_BLOCK_X 65535ll
#define MAX_BLOCK_Y 65535ll
#define MAX_BLOCK_Z 65535ll
Spirit::Spirit(int const &n, InitKernelEnum const &ik)
: resource(0),
nParticle(n),
deviceParticles(nullptr),
deviceRandStates(nullptr),
pShader("shaders/particle.vs", "shaders/particle.fs"){
createVBO();
setCallBacks();
initCuda(ik);
}
Spirit::~Spirit(){
//unmap resource
CUDA_SAFE_CALL( cudaGraphicsUnmapResources(1, &resource) );
CUDA_SAFE_CALL( cudaGraphicsUnregisterResource(resource) );
//free
CUDA_SAFE_CALL( cudaFree(deviceParticles) );
CUDA_SAFE_CALL( cudaFree(deviceRandStates) );
}
void Spirit::createVBO(){
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
//set VBO
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, nParticle*sizeof(Particle), deviceParticles, GL_STATIC_DRAW);
//set VAO
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)(0));
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)(sizeof(vec2)*1));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)(sizeof(vec2)*2));
//unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void Spirit::setCallBacks() const{
//glfwSetCursorPosCallback(scene.window, [](GLFWWindow *window, float x, float y){});
}
void Spirit::initCuda(InitKernelEnum const &ik){
deployGrid();
// cuda allocations
auto sz = sizeof(InitKernelEnum);
InitKernelEnum *deviceIK = nullptr;
CUDA_SAFE_CALL( cudaMalloc((void**)&deviceIK, sz) );
CUDA_SAFE_CALL( cudaMemcpy(deviceIK, &ik, sz, cudaMemcpyHostToDevice) );
CUDA_SAFE_CALL( cudaMalloc((void**)&deviceParticles, nParticle*sizeof(Particle)) );
CUDA_SAFE_CALL( cudaMalloc((void**)&deviceRandStates, nParticle*sizeof(curandState)) );
//register buffer to cuda
CUDA_SAFE_CALL( cudaGraphicsGLRegisterBuffer(&resource, VBO, cudaGraphicsRegisterFlagsNone) );
//map dptr to VBO
size_t retSz;
Particle* dp;
CUDA_SAFE_CALL( cudaGraphicsMapResources(1, &resource) );
CUDA_SAFE_CALL( cudaGraphicsResourceGetMappedPointer((void**)&dp, &retSz, resource) );
//run cuda kernel
initKernel<<<grid, block>>>(*deviceIK, dp, deviceRandStates, nParticle);
CUDA_ERROR_CHECKER;
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
//free
CUDA_SAFE_CALL( cudaFree(deviceIK) );
}
void Spirit::render(UpdateKernelEnum const &uk, Mouse const &mouse){
//set mouse position to device
Mouse* deviceMouse = nullptr;
auto sz = sizeof(Mouse);
CUDA_SAFE_CALL( cudaMalloc((void**)&deviceMouse, sz) );
CUDA_SAFE_CALL( cudaMemcpy(deviceMouse, &mouse, sz, cudaMemcpyHostToDevice) );
//set uk to device
sz = sizeof(UpdateKernelEnum);
UpdateKernelEnum *deviceUK = nullptr;
CUDA_SAFE_CALL( cudaMalloc((void**)&deviceUK, sz) );
CUDA_SAFE_CALL( cudaMemcpy(deviceUK, &uk, sz, cudaMemcpyHostToDevice) );
//map dptr to VBO
size_t retSz;
Particle *dptr = nullptr;
CUDA_SAFE_CALL( cudaGraphicsResourceGetMappedPointer((void**)&dptr, &retSz, resource) );
//run cuda kernel
renderKernel<<<block, grid>>>(*deviceUK, dptr, nParticle, *deviceMouse);
CUDA_ERROR_CHECKER;
CUDA_SAFE_CALL( cudaDeviceSynchronize() );
//draw
pShader.use();
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, nParticle);
glBindVertexArray(0);
glDisable(GL_BLEND);
//free
CUDA_SAFE_CALL( cudaFree(deviceMouse) );
CUDA_SAFE_CALL( cudaFree(deviceUK) );
}
__GLOBAL__ void initKernel(InitKernelEnum const &ik, Particle* dp, curandState* dr, int n){
int index = getIdx();
if(index > n)
return ;
curandState *state = &dr[index];
//init curand states
curand_init(clock64(), index, 0, state);
dp[index].init(ik, state);
}
__GLOBAL__ void renderKernel(
UpdateKernelEnum const &uk, Particle* dp, int n, Mouse const &mouse){
int index = getIdx();
if(index > n)
return ;
dp[index].update(uk, mouse);
}
void Spirit::deployGrid(){
unsigned int blockX = nParticle>MAX_THREAD? MAX_THREAD: static_cast<unsigned int>(nParticle);
block = {blockX, 1, 1};
float nGrid = static_cast<float>(nParticle)/blockX;
if(nGrid > MAX_BLOCK_X*MAX_BLOCK_Y*MAX_BLOCK_Z)
throw std::runtime_error("Number of particles out of gpu limits.");
else if(nGrid > MAX_BLOCK_X*MAX_BLOCK_Y){
unsigned int z = std::ceil(nGrid/MAX_BLOCK_X/MAX_BLOCK_Y);
grid = {MAX_BLOCK_X, MAX_BLOCK_Y, z};
}
else if(nGrid > MAX_BLOCK_X){
unsigned int y = std::ceil(nGrid/MAX_BLOCK_X);
grid = {MAX_BLOCK_X, y, 1};
}
else if(nGrid > 0){
unsigned int x = std::ceil(nGrid);
grid = {x, 1, 1};
}
else
throw std::runtime_error("No particles in screen.");
}
|
33326879c9573d8142ad188e1746e85d1d103e14.hip | // !!! This is a file automatically generated by hipify!!!
#include "diffusion3d_cuda_temporal_blocking.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define CUDA_SAFE_CALL(c) \
do { \
hipError_t _e = c; \
if (_e != hipSuccess) { \
fprintf(stderr, "Error: %s\n", hipGetErrorString(_e)); \
} \
} while (0)
namespace diffusion3d {
#if 0
__global__ void diffusion_kernel_temporal_blocking_1st_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void diffusion_kernel_temporal_blocking_2nd_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + min(threadIdx.x, blockDim.x - 3);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + min(threadIdx.y, blockDim.y - 3);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
#if 0
if (threadIdx.x > 0 && threadIdx.x < (blockDim.x - 1) &&
threadIdx.y > 0 && threadIdx.y < (blockDim.y - 1)) {
#endif
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
#if 0
}
#endif
c += xy;
}
return;
}
#endif
__global__ void diffusion_kernel_temporal_blocking(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int i, j, c, sc;
int i2, j2, c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
i2 = (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3);
i2 = min(i2, nx-1);
j2 = (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3);
j2 = min(j2, ny-1);
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
for (int k = 1; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
__syncthreads();
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = (k-1 == 0) ? sb2 + sc2 : sb1 + sc2;
REAL *tv = sb3 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = sb1 + sc2;
REAL *tv = sb2 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
return;
}
__global__ void diffusion_kernel_temporal_blocking2(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int c, sc;
int c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
const int i = min(
nx-1, max(0,
(blockDim.x - 2) * blockIdx.x + threadIdx.x - 1));
const int j =
min(ny-1,
max(0, (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1));
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
t += xy;
const int i2 = min(nx-1, (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3));
const int j2 = min(ny-1, (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3));
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w2 = (i2 == 0) ? sc2 : sc2 - 1;
int e2 = (i2 == nx-1) ? sc2 : sc2 + 1;
int n2 = (j2 == 0) ? sc2 : sc2 - blockDim.x;
int s2 = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
{
int k = 1;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb2[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
for (int k = 2; k < nz-1; ++k) {
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
sb3[sc] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[c];
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb2[sc2];
return;
}
void Diffusion3DCUDATemporalBlocking::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(hipMemcpy(f1_d_, f1_, s, hipMemcpyHostToDevice));
assert(block_x_ > 2);
assert(block_y_ > 2);
dim3 block_dim(block_x_, block_y_);
dim3 grid_dim(nx_ / (block_x_ - 2), ny_ / (block_y_ - 2), 1);
if (nx_ % (block_x_ - 2)) ++grid_dim.x;
if (ny_ % (block_y_ - 2)) ++grid_dim.y;
size_t shared_size = sizeof(REAL) * block_dim.x * block_dim.y * 3;
printf("Shared memory size: %ld bytes\n", shared_size);
CUDA_SAFE_CALL(hipEventRecord(ev1_));
for (int i = 0; i < count; i += 2) {
#if 1
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking2),
dim3(grid_dim), dim3(block_dim), shared_size, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//CUDA_SAFE_CALL(hipGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking),
dim3(grid_dim), dim3(block_dim), shared_size, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//CUDA_SAFE_CALL(hipGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_1st_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_2nd_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#elif 0
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_1st_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_1st_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#else
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_2nd_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
hipLaunchKernelGGL(( diffusion_kernel_temporal_blocking_2nd_half), dim3(grid_dim), dim3(block_dim), 0, 0,
f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#endif
}
CUDA_SAFE_CALL(hipEventRecord(ev2_));
CUDA_SAFE_CALL(hipMemcpy(f1_, f1_d_, s, hipMemcpyDeviceToHost));
return;
}
}
| 33326879c9573d8142ad188e1746e85d1d103e14.cu | #include "diffusion3d_cuda_temporal_blocking.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define CUDA_SAFE_CALL(c) \
do { \
cudaError_t _e = c; \
if (_e != cudaSuccess) { \
fprintf(stderr, "Error: %s\n", cudaGetErrorString(_e)); \
} \
} while (0)
namespace diffusion3d {
#if 0
__global__ void diffusion_kernel_temporal_blocking_1st_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
c += xy;
}
return;
}
__global__ void diffusion_kernel_temporal_blocking_2nd_half(
REAL *f1, REAL *f2,
int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs,
REAL ct, REAL cb, REAL cc) {
int i = (blockDim.x - 2) * blockIdx.x + min(threadIdx.x, blockDim.x - 3);
i = min(i, nx-1);
int j = (blockDim.y - 2) * blockIdx.y + min(threadIdx.y, blockDim.y - 3);
j = min(j, ny-1);
int c = i + j * nx;
int xy = nx * ny;
for (int k = 0; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
#if 0
if (threadIdx.x > 0 && threadIdx.x < (blockDim.x - 1) &&
threadIdx.y > 0 && threadIdx.y < (blockDim.y - 1)) {
#endif
f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
#if 0
}
#endif
c += xy;
}
return;
}
#endif
__global__ void diffusion_kernel_temporal_blocking(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int i, j, c, sc;
int i2, j2, c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
i = (blockDim.x - 2) * blockIdx.x + threadIdx.x - 1;
i = max(i, 0);
i = min(i, nx-1);
j = (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1;
j = max(j, 0);
j = min(j, ny-1);
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
i2 = (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3);
i2 = min(i2, nx-1);
j2 = (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3);
j2 = min(j2, ny-1);
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
for (int k = 1; k < nz; ++k) {
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = (k == 0) ? c : c - xy;
int t = (k == nz-1) ? c : c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
__syncthreads();
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = (k-1 == 0) ? sb2 + sc2 : sb1 + sc2;
REAL *tv = sb3 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
w = (i2 == 0) ? sc2 : sc2 - 1;
e = (i2 == nx-1) ? sc2 : sc2 + 1;
n = (j2 == 0) ? sc2 : sc2 - blockDim.x;
s = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
REAL *bv = sb1 + sc2;
REAL *tv = sb2 + sc2;
f2[c2] = cc * sb2[sc2] + cw * sb2[w] + ce * sb2[e] + cs * sb2[s]
+ cn * sb2[n] + cb * (*bv) + ct * (*tv);
return;
}
__global__ void diffusion_kernel_temporal_blocking2(
REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc) {
int c, sc;
int c2, sc2;
extern __shared__ REAL sb[];
REAL *sb1 = sb;
REAL *sb2 = sb + blockDim.x * blockDim.y;
REAL *sb3 = sb + blockDim.x * blockDim.y * 2;
const int i = min(
nx-1, max(0,
(blockDim.x - 2) * blockIdx.x + threadIdx.x - 1));
const int j =
min(ny-1,
max(0, (blockDim.y - 2) * blockIdx.y + threadIdx.y - 1));
c = i + j * nx;
sc = threadIdx.x + threadIdx.y * blockDim.x;
const int xy = nx * ny;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
int b = c;
int t = c + xy;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb2[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
t += xy;
const int i2 = min(nx-1, (blockDim.x - 2) * blockIdx.x +
min(threadIdx.x, blockDim.x - 3));
const int j2 = min(ny-1, (blockDim.y - 2) * blockIdx.y +
min(threadIdx.y, blockDim.y - 3));
c2 = i2 + j2 * nx;
sc2 = (i2 % (blockDim.x-2)) + 1 + ((j2 % (blockDim.y-2)) + 1) * blockDim.x;
int w2 = (i2 == 0) ? sc2 : sc2 - 1;
int e2 = (i2 == nx-1) ? sc2 : sc2 + 1;
int n2 = (j2 == 0) ? sc2 : sc2 - blockDim.x;
int s2 = (j2 == ny-1) ? sc2 : sc2 + blockDim.x;
{
int k = 1;
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb2[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
for (int k = 2; k < nz-1; ++k) {
float v = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[t];
sb3[sc] = v;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
b += xy;
t += xy;
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
}
sb3[sc] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s]
+ cn * f1[n] + cb * f1[b] + ct * f1[c];
__syncthreads();
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb3[sc2];
c2 += xy;
__syncthreads();
REAL *sb_tmp = sb1;
sb1 = sb2;
sb2 = sb3;
sb3 = sb_tmp;
f2[c2] = cc * sb2[sc2] + cw * sb2[w2] + ce * sb2[e2] + cs * sb2[s2]
+ cn * sb2[n2] + cb * sb1[sc2] + ct * sb2[sc2];
return;
}
void Diffusion3DCUDATemporalBlocking::RunKernel(int count) {
size_t s = sizeof(REAL) * nx_ * ny_ * nz_;
CUDA_SAFE_CALL(cudaMemcpy(f1_d_, f1_, s, cudaMemcpyHostToDevice));
assert(block_x_ > 2);
assert(block_y_ > 2);
dim3 block_dim(block_x_, block_y_);
dim3 grid_dim(nx_ / (block_x_ - 2), ny_ / (block_y_ - 2), 1);
if (nx_ % (block_x_ - 2)) ++grid_dim.x;
if (ny_ % (block_y_ - 2)) ++grid_dim.y;
size_t shared_size = sizeof(REAL) * block_dim.x * block_dim.y * 3;
printf("Shared memory size: %ld bytes\n", shared_size);
CUDA_SAFE_CALL(cudaEventRecord(ev1_));
for (int i = 0; i < count; i += 2) {
#if 1
diffusion_kernel_temporal_blocking2<<<
grid_dim, block_dim, shared_size>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//CUDA_SAFE_CALL(cudaGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
diffusion_kernel_temporal_blocking<<<
grid_dim, block_dim, shared_size>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
//”ĘCUDA_SAFE_CALL(cudaGetLastError());
REAL *f_tmp = f1_d_;
f1_d_ = f2_d_;
f2_d_ = f_tmp;
#elif 0
diffusion_kernel_temporal_blocking_1st_half<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
diffusion_kernel_temporal_blocking_2nd_half<<<grid_dim, block_dim>>>
(f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#elif 0
diffusion_kernel_temporal_blocking_1st_half<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
diffusion_kernel_temporal_blocking_1st_half<<<grid_dim, block_dim>>>
(f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#else
diffusion_kernel_temporal_blocking_2nd_half<<<grid_dim, block_dim>>>
(f1_d_, f2_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
diffusion_kernel_temporal_blocking_2nd_half<<<grid_dim, block_dim>>>
(f2_d_, f1_d_, nx_, ny_, nz_, ce_, cw_, cn_, cs_, ct_, cb_, cc_);
#endif
}
CUDA_SAFE_CALL(cudaEventRecord(ev2_));
CUDA_SAFE_CALL(cudaMemcpy(f1_, f1_d_, s, cudaMemcpyDeviceToHost));
return;
}
}
|
64902b3f62b94e61ddf486c836ad1bfd09d55585.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#ifdef __cplusplus
extern "C" {
#endif /*__cplusplus*/
#include <stdio.h> // stdio functions are used since C++ streams aren't necessarily thread safe
#include <stdlib.h>
#include <string.h>
#include "graphio.h"
#include "graph.h"
#ifdef __cplusplus
}
#endif /*__cplusplus*/
#include <string>
//#include <omp.h>
//#define DEBUG
#define NREPS 10 // number of repetations for time calculations
#define THREADS_PER_BLOCK 1024
__global__ void ClosenessCentKernel(int *result, const etype *rowPtr, const vtype *colInd, vtype nov)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
unsigned int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (index < nov)
result[index] = -1;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t CudaClosenessCent(int *result, const etype *rowPtr, const vtype *colInd, vtype nov)
{
etype *dev_rowPtr = 0;
vtype *dev_colInd = 0;
int *dev_result = 0;
hipError_t cudaStatus;
int numThreads = (int)sqrt(THREADS_PER_BLOCK);
dim3 dimBlock(numThreads, numThreads, 1);
//===========================================================================================================================
// Allocate GPU buffers for three vectors (two input, one output)
cudaStatus = hipMalloc((void**)&dev_result, nov * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_rowPtr, nov * sizeof(etype));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_colInd, rowPtr[nov] * sizeof(vtype));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
//===========================================================================================================================
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_rowPtr, rowPtr, nov * sizeof(etype), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_colInd, colInd, rowPtr[nov] * sizeof(vtype), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
//===========================================================================================================================
// Launch a kernel on the GPU with one thread for each element, and check for errors.
printf("%d, %d\n", nov, nov / numThreads);
hipLaunchKernelGGL(( ClosenessCentKernel), dim3((nov+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK), dim3(dimBlock), 0, 0, dev_result, dev_rowPtr, dev_colInd, nov);
//===========================================================================================================================
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(result, dev_result, nov * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_result);
hipFree(dev_rowPtr);
hipFree(dev_colInd);
return cudaStatus;
}
| 64902b3f62b94e61ddf486c836ad1bfd09d55585.cu | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#ifdef __cplusplus
extern "C" {
#endif /*__cplusplus*/
#include <stdio.h> // stdio functions are used since C++ streams aren't necessarily thread safe
#include <stdlib.h>
#include <string.h>
#include "graphio.h"
#include "graph.h"
#ifdef __cplusplus
}
#endif /*__cplusplus*/
#include <string>
//#include <omp.h>
//#define DEBUG
#define NREPS 10 // number of repetations for time calculations
#define THREADS_PER_BLOCK 1024
__global__ void ClosenessCentKernel(int *result, const etype *rowPtr, const vtype *colInd, vtype nov)
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
unsigned int index = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if (index < nov)
result[index] = -1;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t CudaClosenessCent(int *result, const etype *rowPtr, const vtype *colInd, vtype nov)
{
etype *dev_rowPtr = 0;
vtype *dev_colInd = 0;
int *dev_result = 0;
cudaError_t cudaStatus;
int numThreads = (int)sqrt(THREADS_PER_BLOCK);
dim3 dimBlock(numThreads, numThreads, 1);
//===========================================================================================================================
// Allocate GPU buffers for three vectors (two input, one output)
cudaStatus = cudaMalloc((void**)&dev_result, nov * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_rowPtr, nov * sizeof(etype));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_colInd, rowPtr[nov] * sizeof(vtype));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//===========================================================================================================================
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_rowPtr, rowPtr, nov * sizeof(etype), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_colInd, colInd, rowPtr[nov] * sizeof(vtype), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
//===========================================================================================================================
// Launch a kernel on the GPU with one thread for each element, and check for errors.
printf("%d, %d\n", nov, nov / numThreads);
ClosenessCentKernel<<<(nov+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, dimBlock>>>(dev_result, dev_rowPtr, dev_colInd, nov);
//===========================================================================================================================
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(result, dev_result, nov * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_result);
cudaFree(dev_rowPtr);
cudaFree(dev_colInd);
return cudaStatus;
}
|
84cd305a0066fd5ec8b35cefc67cfcb777598976.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
__global__ void kernel( void ) {
}
int main( void ) {hipLaunchKernelGGL((
kernel), dim3(1),dim3(1), 0, 0, );
int deviceCount;
hipGetDeviceCount(&deviceCount);
printf(" Device Count %i \n",deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,device);
printf("Name : %s\n", prop.name);
printf("Device %d has compute capability %d.%d.\n",
device, prop.major, prop.minor);
printf("totalGlobalMem : %u MB \n" , prop.totalGlobalMem / (1024 * 1024));
printf("sharedMemPerBlock : %u KB \n" , prop.sharedMemPerBlock / 1024 );
printf("regsPerBlock:%d \n", prop.regsPerBlock);
printf("warpSize : %d \n" , prop.warpSize);
printf("memPitch : %u \n", prop.memPitch);
printf("maxThreadPerBlock %d \n" , prop.maxThreadsPerBlock ) ;
printf("maxThreadsDim:x %d, y %d, z %d\n",prop.maxThreadsDim[0],prop.maxThreadsDim[1] , prop.maxThr$
printf("maxGridSize:x %d, y %d, z%d\n", prop.maxGridSize[0],prop.maxGridSize[0] , prop.maxGridSize[$
printf("deviceOverlap:%d \n", prop.deviceOverlap);
printf("totalConstMem:%u\n" , prop.totalConstMem);
printf("major:%d\n",prop.major);
printf("minor:%d\n",prop.minor);
printf("clockRate:%d\n",prop.clockRate);
printf("textureAlignment:%u\n",prop.textureAlignment);
if ( prop.major >= 1 ) {
break;
}
}
return 0;
} | 84cd305a0066fd5ec8b35cefc67cfcb777598976.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <iostream>
__global__ void kernel( void ) {
}
int main( void ) {
kernel<<<1,1>>>();
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf(" Device Count %i \n",deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,device);
printf("Name : %s\n", prop.name);
printf("Device %d has compute capability %d.%d.\n",
device, prop.major, prop.minor);
printf("totalGlobalMem : %u MB \n" , prop.totalGlobalMem / (1024 * 1024));
printf("sharedMemPerBlock : %u KB \n" , prop.sharedMemPerBlock / 1024 );
printf("regsPerBlock:%d \n", prop.regsPerBlock);
printf("warpSize : %d \n" , prop.warpSize);
printf("memPitch : %u \n", prop.memPitch);
printf("maxThreadPerBlock %d \n" , prop.maxThreadsPerBlock ) ;
printf("maxThreadsDim:x %d, y %d, z %d\n",prop.maxThreadsDim[0],prop.maxThreadsDim[1] , prop.maxThr$
printf("maxGridSize:x %d, y %d, z%d\n", prop.maxGridSize[0],prop.maxGridSize[0] , prop.maxGridSize[$
printf("deviceOverlap:%d \n", prop.deviceOverlap);
printf("totalConstMem:%u\n" , prop.totalConstMem);
printf("major:%d\n",prop.major);
printf("minor:%d\n",prop.minor);
printf("clockRate:%d\n",prop.clockRate);
printf("textureAlignment:%u\n",prop.textureAlignment);
if ( prop.major >= 1 ) {
break;
}
}
return 0;
} |
7e66579cf90bf5341481a2e466fa5b7b0a21c58e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Filters
//
// Includes: system
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <sys/io.h>
#include <cutil_inline.h>
// Includes: local
#include "bmp.h"
enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER};
#define CLAMP_8bit(x) max(0, min(255, (x)))//1023
char *BMPInFile = "lena.bmp";//lena/dublin
char *BMPOutFile = "output.bmp";
char *Filter = "sobel";
int FilterMode = SOBEL_FILTER;
// create and stat timer as unsigned interger
unsigned int timer_CPU =0;
unsigned int timer_GPU =0;
// Functions
void Cleanup(void);
void ParseArguments(int, char**);
void FilterWrapper(unsigned char* pImageIn, int Width, int Height);
// Kernels
__global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
/* Device Memory */
unsigned char *d_In;
unsigned char *d_Out;
// Setup for kernel size
const int TILE_WIDTH = 6;
const int TILE_HEIGHT = 6;
const int FILTER_RADIUS = 1;
// const int FILTER_RADIUS = 3;
const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1;
const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER;
const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS;
const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS;
const int EDGE_VALUE_THRESHOLD = 70;
const int HIGH_BOOST_FACTOR = 10;
#include "filter_kernel.hip"
void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete)
{
size_t palete_size;
int fd;
if((fd = open(file, O_RDONLY )) < 0)
FATAL("Open Source");
if(read(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Read BMP Header");
if(read(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Read DIB Header");
assert(dib->bpp == 8);
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if(palete_size > 0) {
*palete = (unsigned char *)malloc(palete_size);
int go = read(fd, *palete, palete_size);
if (go != palete_size) {
FATAL("Read Palete");
}
}
*data = (unsigned char *)malloc(dib->image_size);
if(read(fd, *data, dib->image_size) != dib->image_size)
FATAL("Read Image");
close(fd);
}
void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete)
{
size_t palete_size;
int fd;
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR |S_IRGRP)) < 0)
FATAL("Open Destination");
if(write(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Write BMP Header");
if(write(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Write BMP Header");
if(palete_size != 0) {
if(write(fd, palete, palete_size) != palete_size)
FATAL("Write Palete");
}
if(write(fd, data, dib->image_size) != dib->image_size)
FATAL("Write Image");
close(fd);
}
void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1};
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
// sum up the 9 values to calculate both the direction x and direction y
float sumX = 0, sumY=0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
}
}
imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;//255: 0/ 1023:0
}
}
}
void CPU_boost(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
// const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1};
unsigned char centerPixel;
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
centerPixel = imageIn[i*width+j];
// sum up the 9 values to calculate both the direction x and direction y
float sum = 0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
// centerPixel = (float) (imageIn[i*width+j]);
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
// sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
// sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
sum += Pixel;
}
}
// imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 1023 : 0;//255: 0/ 1023:0
imageOut[i*width + j] = (CLAMP_8bit(((unsigned char)(centerPixel)+HIGH_BOOST_FACTOR*(unsigned char)(centerPixel-sum/FILTER_AREA))));
}
}
}
// Host code
int main(int argc, char** argv)
{
ParseArguments(argc, argv);
struct bmp_header bmp;
struct dib_header dib;
unsigned char *palete = NULL;
unsigned char *data = NULL, *out = NULL;
printf("Running %s filter\n", Filter);
BitMapRead(BMPInFile, &bmp, &dib, &data, &palete);
out = (unsigned char *)malloc(dib.image_size);
printf("Computing the CPU output\n");
printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Intialize the timer to zero cycles
cutilCheckError(cutCreateTimer(&timer_CPU));
cutilCheckError(cutCreateTimer(&timer_GPU));
/////////////////////////////// cpu sobel call start here /////////////////////////////////////////////////
// Start the CPU timer////
cutilCheckError(cutStartTimer(timer_CPU));
//CPU_Sobel(data, out, dib.width, dib.height);
CPU_boost(data, out, dib.width, dib.height);
// stop CPU timer ///
cutilCheckError(cutStopTimer(timer_CPU));
////////////// cpu sobel call end here ///////////////////////////////////////////////
BitMapWrite("CPU_boost.bmp", &bmp, &dib, out, palete);
printf("Done with CPU output\n");
printf("Allocating %d bytes for image \n", dib.image_size);
cutilSafeCall( hipMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) );
cutilSafeCall( hipMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) );
hipMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), hipMemcpyHostToDevice);
/////////////////////// calling kernel here ////////////////////////////////////////////////////
// Start the GPU timer////
cutilCheckError(cutStartTimer(timer_GPU));
FilterWrapper(data, dib.width, dib.height);
// stop GPU timer ///
cutilCheckError(cutStopTimer(timer_GPU));
//////////////////////////////// kernel call end //////////////////////////////////////////////
// Copy image back to host
hipMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), hipMemcpyDeviceToHost);
// Write output image
BitMapWrite(BMPOutFile, &bmp, &dib, out, palete);
// print timers
printf ("CPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_CPU));
printf ("GPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_GPU));
Cleanup();
}
void Cleanup(void)
{
//Destroy (Free) timer
cutilCheckError(cutDeleteTimer(timer_CPU));
cutilCheckError(cutDeleteTimer(timer_GPU));
cutilSafeCall( hipDeviceReset() );
exit(0);
}
void FilterWrapper(unsigned char* pImageIn, int Width, int Height)
{
// Design grid disection around tile size
int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH;
int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT;
dim3 dimGrid(gridWidth, gridHeight);
// But actually invoke larger blocks to take care of surrounding shared memory
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
switch(FilterMode) {
case SOBEL_FILTER:
printf("Sobel Filter \n");
hipLaunchKernelGGL(( SobelFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case AVERAGE_FILTER:
printf("Average Filter \n");
hipLaunchKernelGGL(( AverageFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case HIGH_BOOST_FILTER:
printf("Boost Filter \n");
hipLaunchKernelGGL(( HighBoostFilter), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
}
cutilSafeCall( hipDeviceSynchronize() );
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) {
BMPInFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) {
BMPOutFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) {
Filter = argv[i+1];
i = i + 1;
if (strcmp(Filter, "sobel") == 0)
FilterMode = SOBEL_FILTER;
else if (strcmp(Filter, "average") == 0)
FilterMode = AVERAGE_FILTER;
else if (strcmp(Filter, "boost") == 0)
FilterMode = HIGH_BOOST_FILTER;
}
}
}
| 7e66579cf90bf5341481a2e466fa5b7b0a21c58e.cu | //
// Filters
//
// Includes: system
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdint.h>
#include <errno.h>
#include <assert.h>
#include <string.h>
#include <sys/io.h>
#include <cutil_inline.h>
// Includes: local
#include "bmp.h"
enum {SOBEL_FILTER=1, AVERAGE_FILTER, HIGH_BOOST_FILTER};
#define CLAMP_8bit(x) max(0, min(255, (x)))//1023
char *BMPInFile = "lena.bmp";//lena/dublin
char *BMPOutFile = "output.bmp";
char *Filter = "sobel";
int FilterMode = SOBEL_FILTER;
// create and stat timer as unsigned interger
unsigned int timer_CPU =0;
unsigned int timer_GPU =0;
// Functions
void Cleanup(void);
void ParseArguments(int, char**);
void FilterWrapper(unsigned char* pImageIn, int Width, int Height);
// Kernels
__global__ void SobelFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void AverageFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
__global__ void HighBoostFilter(unsigned char *g_DataIn, unsigned char *g_DataOut, int width, int height);
/* Device Memory */
unsigned char *d_In;
unsigned char *d_Out;
// Setup for kernel size
const int TILE_WIDTH = 6;
const int TILE_HEIGHT = 6;
const int FILTER_RADIUS = 1;
// const int FILTER_RADIUS = 3;
const int FILTER_DIAMETER = 2 * FILTER_RADIUS + 1;
const int FILTER_AREA = FILTER_DIAMETER * FILTER_DIAMETER;
const int BLOCK_WIDTH = TILE_WIDTH + 2*FILTER_RADIUS;
const int BLOCK_HEIGHT = TILE_HEIGHT + 2*FILTER_RADIUS;
const int EDGE_VALUE_THRESHOLD = 70;
const int HIGH_BOOST_FACTOR = 10;
#include "filter_kernel.cu"
void BitMapRead(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char **data, unsigned char **palete)
{
size_t palete_size;
int fd;
if((fd = open(file, O_RDONLY )) < 0)
FATAL("Open Source");
if(read(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Read BMP Header");
if(read(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Read DIB Header");
assert(dib->bpp == 8);
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if(palete_size > 0) {
*palete = (unsigned char *)malloc(palete_size);
int go = read(fd, *palete, palete_size);
if (go != palete_size) {
FATAL("Read Palete");
}
}
*data = (unsigned char *)malloc(dib->image_size);
if(read(fd, *data, dib->image_size) != dib->image_size)
FATAL("Read Image");
close(fd);
}
void BitMapWrite(char *file, struct bmp_header *bmp, struct dib_header *dib, unsigned char *data, unsigned char *palete)
{
size_t palete_size;
int fd;
palete_size = bmp->offset - BMP_SIZE - DIB_SIZE;
if((fd = open(file, O_WRONLY | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR |S_IRGRP)) < 0)
FATAL("Open Destination");
if(write(fd, bmp, BMP_SIZE) != BMP_SIZE)
FATAL("Write BMP Header");
if(write(fd, dib, DIB_SIZE) != DIB_SIZE)
FATAL("Write BMP Header");
if(palete_size != 0) {
if(write(fd, palete, palete_size) != palete_size)
FATAL("Write Palete");
}
if(write(fd, data, dib->image_size) != dib->image_size)
FATAL("Write Image");
close(fd);
}
void CPU_Sobel(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1};
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
// sum up the 9 values to calculate both the direction x and direction y
float sumX = 0, sumY=0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
}
}
imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 255 : 0;//255: 0/ 1023:0
}
}
}
void CPU_boost(unsigned char* imageIn, unsigned char* imageOut, int width, int height)
{
int i, j, rows, cols, startCol, endCol, startRow, endRow;
// const float SobelMatrix[9] = {-1,0,1,-2,0,2,-1,0,1};
unsigned char centerPixel;
rows = height;
cols = width;
// Initialize all output pixels to zero
for(i=0; i<rows; i++) {
for(j=0; j<cols; j++) {
imageOut[i*width + j] = 0;
}
}
startCol = 1;
endCol = cols - 1;
startRow = 1;
endRow = rows - 1;
// Go through all inner pizel positions
for(i=startRow; i<endRow; i++) {
for(j=startCol; j<endCol; j++) {
centerPixel = imageIn[i*width+j];
// sum up the 9 values to calculate both the direction x and direction y
float sum = 0;
for(int dy = -FILTER_RADIUS; dy <= FILTER_RADIUS; dy++) {
for(int dx = -FILTER_RADIUS; dx <= FILTER_RADIUS; dx++) {
// centerPixel = (float) (imageIn[i*width+j]);
float Pixel = (float)(imageIn[i*width + j + (dy * width + dx)]);
// sumX += Pixel * SobelMatrix[(dy + FILTER_RADIUS) * FILTER_DIAMETER + (dx+FILTER_RADIUS)];
// sumY += Pixel * SobelMatrix[(dx + FILTER_RADIUS) * FILTER_DIAMETER + (dy+FILTER_RADIUS)];
sum += Pixel;
}
}
// imageOut[i*width + j] = (abs(sumX) + abs(sumY)) > EDGE_VALUE_THRESHOLD ? 1023 : 0;//255: 0/ 1023:0
imageOut[i*width + j] = (CLAMP_8bit(((unsigned char)(centerPixel)+HIGH_BOOST_FACTOR*(unsigned char)(centerPixel-sum/FILTER_AREA))));
}
}
}
// Host code
int main(int argc, char** argv)
{
ParseArguments(argc, argv);
struct bmp_header bmp;
struct dib_header dib;
unsigned char *palete = NULL;
unsigned char *data = NULL, *out = NULL;
printf("Running %s filter\n", Filter);
BitMapRead(BMPInFile, &bmp, &dib, &data, &palete);
out = (unsigned char *)malloc(dib.image_size);
printf("Computing the CPU output\n");
printf("Image details: %d by %d = %d , imagesize = %d\n", dib.width, dib.height, dib.width * dib.height,dib.image_size);
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Intialize the timer to zero cycles
cutilCheckError(cutCreateTimer(&timer_CPU));
cutilCheckError(cutCreateTimer(&timer_GPU));
/////////////////////////////// cpu sobel call start here /////////////////////////////////////////////////
// Start the CPU timer////
cutilCheckError(cutStartTimer(timer_CPU));
//CPU_Sobel(data, out, dib.width, dib.height);
CPU_boost(data, out, dib.width, dib.height);
// stop CPU timer ///
cutilCheckError(cutStopTimer(timer_CPU));
////////////// cpu sobel call end here ///////////////////////////////////////////////
BitMapWrite("CPU_boost.bmp", &bmp, &dib, out, palete);
printf("Done with CPU output\n");
printf("Allocating %d bytes for image \n", dib.image_size);
cutilSafeCall( cudaMalloc( (void **)&d_In, dib.image_size*sizeof(unsigned char)) );
cutilSafeCall( cudaMalloc( (void **)&d_Out, dib.image_size*sizeof(unsigned char)) );
cudaMemcpy(d_In, data, dib.image_size*sizeof(unsigned char), cudaMemcpyHostToDevice);
/////////////////////// calling kernel here ////////////////////////////////////////////////////
// Start the GPU timer////
cutilCheckError(cutStartTimer(timer_GPU));
FilterWrapper(data, dib.width, dib.height);
// stop GPU timer ///
cutilCheckError(cutStopTimer(timer_GPU));
//////////////////////////////// kernel call end //////////////////////////////////////////////
// Copy image back to host
cudaMemcpy(out, d_Out, dib.image_size*sizeof(unsigned char), cudaMemcpyDeviceToHost);
// Write output image
BitMapWrite(BMPOutFile, &bmp, &dib, out, palete);
// print timers
printf ("CPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_CPU));
printf ("GPU Execution time: %f (ms) \n ", cutGetTimerValue(timer_GPU));
Cleanup();
}
void Cleanup(void)
{
//Destroy (Free) timer
cutilCheckError(cutDeleteTimer(timer_CPU));
cutilCheckError(cutDeleteTimer(timer_GPU));
cutilSafeCall( cudaThreadExit() );
exit(0);
}
void FilterWrapper(unsigned char* pImageIn, int Width, int Height)
{
// Design grid disection around tile size
int gridWidth = (Width + TILE_WIDTH - 1) / TILE_WIDTH;
int gridHeight = (Height + TILE_HEIGHT - 1) / TILE_HEIGHT;
dim3 dimGrid(gridWidth, gridHeight);
// But actually invoke larger blocks to take care of surrounding shared memory
dim3 dimBlock(BLOCK_WIDTH, BLOCK_HEIGHT);
switch(FilterMode) {
case SOBEL_FILTER:
printf("Sobel Filter \n");
SobelFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case AVERAGE_FILTER:
printf("Average Filter \n");
AverageFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
case HIGH_BOOST_FILTER:
printf("Boost Filter \n");
HighBoostFilter<<< dimGrid, dimBlock >>>(d_In, d_Out, Width, Height);
cutilCheckMsg("kernel launch failure");
break;
}
cutilSafeCall( cudaThreadSynchronize() );
}
// Parse program arguments
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i) {
if (strcmp(argv[i], "--file") == 0 || strcmp(argv[i], "-file") == 0) {
BMPInFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--out") == 0 || strcmp(argv[i], "-out") == 0) {
BMPOutFile = argv[i+1];
i = i + 1;
}
if (strcmp(argv[i], "--filter") == 0 || strcmp(argv[i], "-filter") == 0) {
Filter = argv[i+1];
i = i + 1;
if (strcmp(Filter, "sobel") == 0)
FilterMode = SOBEL_FILTER;
else if (strcmp(Filter, "average") == 0)
FilterMode = AVERAGE_FILTER;
else if (strcmp(Filter, "boost") == 0)
FilterMode = HIGH_BOOST_FILTER;
}
}
}
|
b4cbf709b8f9628790b043d203627f0d694d0cb3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel1_x_nonvector [5][2];
static int dims_advec_mom_kernel1_x_nonvector_h [5][2] = {0};
//user function
__device__
inline void advec_mom_kernel1_x_nonvector_gpu(const ACC<double> &node_flux,
const ACC<double> &node_mass_pre,
ACC<double> &mom_flux,
const ACC<double> &celldx,
const ACC<double> &vel1) {
double sigma, wind, width;
double vdiffuw, vdiffdw, auw, adw, limiter;
int upwind, donor, downwind, dif;
double advec_vel_temp;
if( (node_flux(0,0,0)) < 0.0) {
upwind = 2;
donor = 1;
downwind = 0;
dif = donor;
}
else {
upwind = -1;
donor = 0;
downwind = 1;
dif = upwind;
}
sigma = fabs(node_flux(0,0,0))/node_mass_pre(donor,0,0);
width = celldx(0,0,0);
vdiffuw = vel1(donor,0,0) - vel1(upwind,0,0);
vdiffdw = vel1(downwind,0,0) - vel1(donor,0,0);
limiter=0.0;
if(vdiffuw*vdiffdw > 0.0) {
auw = fabs(vdiffuw);
adw = fabs(vdiffdw);
wind = 1.0;
if(vdiffdw <= 0.0) wind = -1.0;
limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldx(dif,0,0))/6.0, MIN(auw, adw));
}
advec_vel_temp = vel1(donor,0,0) + (1.0 - sigma) * limiter;
mom_flux(0,0,0) = advec_vel_temp * node_flux(0,0,0);
}
__global__ void ops_advec_mom_kernel1_x_nonvector(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] * dims_advec_mom_kernel1_x_nonvector[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] * dims_advec_mom_kernel1_x_nonvector[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] * dims_advec_mom_kernel1_x_nonvector[2][1];
arg3 += idx_x * 1*1 + idx_y * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] + idx_z * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] * dims_advec_mom_kernel1_x_nonvector[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] * dims_advec_mom_kernel1_x_nonvector[4][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_advec_mom_kernel1_x_nonvector[0][0], dims_advec_mom_kernel1_x_nonvector[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel1_x_nonvector[1][0], dims_advec_mom_kernel1_x_nonvector[1][1], arg1);
ACC<double> argp2(dims_advec_mom_kernel1_x_nonvector[2][0], dims_advec_mom_kernel1_x_nonvector[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel1_x_nonvector[3][0], dims_advec_mom_kernel1_x_nonvector[3][1], arg3);
const ACC<double> argp4(dims_advec_mom_kernel1_x_nonvector[4][0], dims_advec_mom_kernel1_x_nonvector[4][1], arg4);
advec_mom_kernel1_x_nonvector_gpu(argp0, argp1, argp2, argp3,
argp4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_mom_kernel1_x_nonvector_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,129)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(129,"advec_mom_kernel1_x_nonvector");
OPS_kernels[129].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != dims_advec_mom_kernel1_x_nonvector_h[0][0] || ydim0 != dims_advec_mom_kernel1_x_nonvector_h[0][1] || xdim1 != dims_advec_mom_kernel1_x_nonvector_h[1][0] || ydim1 != dims_advec_mom_kernel1_x_nonvector_h[1][1] || xdim2 != dims_advec_mom_kernel1_x_nonvector_h[2][0] || ydim2 != dims_advec_mom_kernel1_x_nonvector_h[2][1] || xdim3 != dims_advec_mom_kernel1_x_nonvector_h[3][0] || ydim3 != dims_advec_mom_kernel1_x_nonvector_h[3][1] || xdim4 != dims_advec_mom_kernel1_x_nonvector_h[4][0] || ydim4 != dims_advec_mom_kernel1_x_nonvector_h[4][1]) {
dims_advec_mom_kernel1_x_nonvector_h[0][0] = xdim0;
dims_advec_mom_kernel1_x_nonvector_h[0][1] = ydim0;
dims_advec_mom_kernel1_x_nonvector_h[1][0] = xdim1;
dims_advec_mom_kernel1_x_nonvector_h[1][1] = ydim1;
dims_advec_mom_kernel1_x_nonvector_h[2][0] = xdim2;
dims_advec_mom_kernel1_x_nonvector_h[2][1] = ydim2;
dims_advec_mom_kernel1_x_nonvector_h[3][0] = xdim3;
dims_advec_mom_kernel1_x_nonvector_h[3][1] = ydim3;
dims_advec_mom_kernel1_x_nonvector_h[4][0] = xdim4;
dims_advec_mom_kernel1_x_nonvector_h[4][1] = ydim4;
cutilSafeCall(hipMemcpyToSymbol( dims_advec_mom_kernel1_x_nonvector, dims_advec_mom_kernel1_x_nonvector_h, sizeof(dims_advec_mom_kernel1_x_nonvector)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[129].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_advec_mom_kernel1_x_nonvector), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[129].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[129].mpi_time += t2-t1;
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 129;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 129;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_mom_kernel1_x_nonvector_execute;
if (OPS_diags > 1) {
ops_timing_realloc(129,"advec_mom_kernel1_x_nonvector");
}
ops_enqueue_kernel(desc);
}
#endif
| b4cbf709b8f9628790b043d203627f0d694d0cb3.cu | //
// auto-generated by ops.py
//
__constant__ int dims_advec_mom_kernel1_x_nonvector [5][2];
static int dims_advec_mom_kernel1_x_nonvector_h [5][2] = {0};
//user function
__device__
inline void advec_mom_kernel1_x_nonvector_gpu(const ACC<double> &node_flux,
const ACC<double> &node_mass_pre,
ACC<double> &mom_flux,
const ACC<double> &celldx,
const ACC<double> &vel1) {
double sigma, wind, width;
double vdiffuw, vdiffdw, auw, adw, limiter;
int upwind, donor, downwind, dif;
double advec_vel_temp;
if( (node_flux(0,0,0)) < 0.0) {
upwind = 2;
donor = 1;
downwind = 0;
dif = donor;
}
else {
upwind = -1;
donor = 0;
downwind = 1;
dif = upwind;
}
sigma = fabs(node_flux(0,0,0))/node_mass_pre(donor,0,0);
width = celldx(0,0,0);
vdiffuw = vel1(donor,0,0) - vel1(upwind,0,0);
vdiffdw = vel1(downwind,0,0) - vel1(donor,0,0);
limiter=0.0;
if(vdiffuw*vdiffdw > 0.0) {
auw = fabs(vdiffuw);
adw = fabs(vdiffdw);
wind = 1.0;
if(vdiffdw <= 0.0) wind = -1.0;
limiter=wind*MIN(width*((2.0-sigma)*adw/width+(1.0+sigma)*auw/celldx(dif,0,0))/6.0, MIN(auw, adw));
}
advec_vel_temp = vel1(donor,0,0) + (1.0 - sigma) * limiter;
mom_flux(0,0,0) = advec_vel_temp * node_flux(0,0,0);
}
__global__ void ops_advec_mom_kernel1_x_nonvector(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[0][0] * dims_advec_mom_kernel1_x_nonvector[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[1][0] * dims_advec_mom_kernel1_x_nonvector[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[2][0] * dims_advec_mom_kernel1_x_nonvector[2][1];
arg3 += idx_x * 1*1 + idx_y * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] + idx_z * 0*1 * dims_advec_mom_kernel1_x_nonvector[3][0] * dims_advec_mom_kernel1_x_nonvector[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] + idx_z * 1*1 * dims_advec_mom_kernel1_x_nonvector[4][0] * dims_advec_mom_kernel1_x_nonvector[4][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
const ACC<double> argp0(dims_advec_mom_kernel1_x_nonvector[0][0], dims_advec_mom_kernel1_x_nonvector[0][1], arg0);
const ACC<double> argp1(dims_advec_mom_kernel1_x_nonvector[1][0], dims_advec_mom_kernel1_x_nonvector[1][1], arg1);
ACC<double> argp2(dims_advec_mom_kernel1_x_nonvector[2][0], dims_advec_mom_kernel1_x_nonvector[2][1], arg2);
const ACC<double> argp3(dims_advec_mom_kernel1_x_nonvector[3][0], dims_advec_mom_kernel1_x_nonvector[3][1], arg3);
const ACC<double> argp4(dims_advec_mom_kernel1_x_nonvector[4][0], dims_advec_mom_kernel1_x_nonvector[4][1], arg4);
advec_mom_kernel1_x_nonvector_gpu(argp0, argp1, argp2, argp3,
argp4);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4) {
#else
void ops_par_loop_advec_mom_kernel1_x_nonvector_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,5,range,129)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(129,"advec_mom_kernel1_x_nonvector");
OPS_kernels[129].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
if (xdim0 != dims_advec_mom_kernel1_x_nonvector_h[0][0] || ydim0 != dims_advec_mom_kernel1_x_nonvector_h[0][1] || xdim1 != dims_advec_mom_kernel1_x_nonvector_h[1][0] || ydim1 != dims_advec_mom_kernel1_x_nonvector_h[1][1] || xdim2 != dims_advec_mom_kernel1_x_nonvector_h[2][0] || ydim2 != dims_advec_mom_kernel1_x_nonvector_h[2][1] || xdim3 != dims_advec_mom_kernel1_x_nonvector_h[3][0] || ydim3 != dims_advec_mom_kernel1_x_nonvector_h[3][1] || xdim4 != dims_advec_mom_kernel1_x_nonvector_h[4][0] || ydim4 != dims_advec_mom_kernel1_x_nonvector_h[4][1]) {
dims_advec_mom_kernel1_x_nonvector_h[0][0] = xdim0;
dims_advec_mom_kernel1_x_nonvector_h[0][1] = ydim0;
dims_advec_mom_kernel1_x_nonvector_h[1][0] = xdim1;
dims_advec_mom_kernel1_x_nonvector_h[1][1] = ydim1;
dims_advec_mom_kernel1_x_nonvector_h[2][0] = xdim2;
dims_advec_mom_kernel1_x_nonvector_h[2][1] = ydim2;
dims_advec_mom_kernel1_x_nonvector_h[3][0] = xdim3;
dims_advec_mom_kernel1_x_nonvector_h[3][1] = ydim3;
dims_advec_mom_kernel1_x_nonvector_h[4][0] = xdim4;
dims_advec_mom_kernel1_x_nonvector_h[4][1] = ydim4;
cutilSafeCall(cudaMemcpyToSymbol( dims_advec_mom_kernel1_x_nonvector, dims_advec_mom_kernel1_x_nonvector_h, sizeof(dims_advec_mom_kernel1_x_nonvector)));
}
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
char *p_a[5];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 5);
ops_halo_exchanges(args,5,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[129].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_advec_mom_kernel1_x_nonvector<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[129].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 5);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[129].mpi_time += t2-t1;
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[129].transfer += ops_compute_transfer(dim, start, end, &arg4);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_mom_kernel1_x_nonvector(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 129;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 129;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 5;
desc->args = (ops_arg*)malloc(5*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->function = ops_par_loop_advec_mom_kernel1_x_nonvector_execute;
if (OPS_diags > 1) {
ops_timing_realloc(129,"advec_mom_kernel1_x_nonvector");
}
ops_enqueue_kernel(desc);
}
#endif
|
4afb50111818ca6502d2555deea1b6ae43cd4a81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 5
__global__ void add(int* a, int* b, int* c)
{
int tid;
tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int a[N],b[N],c[N];
int size=sizeof(int);
int i;
for(i=0;i<N;i++)
{
a[i] = i;
b[i] = 2*i;
}
int *d_a, *d_b, *d_c;
hipMalloc((void **)&d_a,N*size);
hipMalloc((void **)&d_b,N*size);
hipMalloc((void **)&d_c,N*size);
hipMemcpy(d_a,a,N*size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,N*size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( add), dim3(1),dim3(N), 0, 0, d_a,d_b,d_c);
hipMemcpy(c,d_c,size*N,hipMemcpyDeviceToHost);
for(i=0;i<N;i++)
printf("Sum is %d\n",c[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | 4afb50111818ca6502d2555deea1b6ae43cd4a81.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define N 5
__global__ void add(int* a, int* b, int* c)
{
int tid;
tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(void)
{
int a[N],b[N],c[N];
int size=sizeof(int);
int i;
for(i=0;i<N;i++)
{
a[i] = i;
b[i] = 2*i;
}
int *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a,N*size);
cudaMalloc((void **)&d_b,N*size);
cudaMalloc((void **)&d_c,N*size);
cudaMemcpy(d_a,a,N*size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,N*size,cudaMemcpyHostToDevice);
add<<<1,N>>>(d_a,d_b,d_c);
cudaMemcpy(c,d_c,size*N,cudaMemcpyDeviceToHost);
for(i=0;i<N;i++)
printf("Sum is %d\n",c[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
f921a30bd931941eae30a7d37fae7153d1e72ca6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>, created on 25.01.2019
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// kernel to swap two NDArrays vals as linear sequences
// input - theSecondBuffer/Shape from input NDArray
// output - theFirstBuffer/Shape from input NDArray
template <typename T>
static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
__shared__ Nd4jLong resultLength, xEws, yEws;
__shared__ bool sameOffsets, sameOrders;
__shared__ T* input;
__shared__ T* output;
if (0 == threadIdx.x) {
resultLength = shape::length(theFirstShape);
input = reinterpret_cast<T*>(theSecondBuffer);
output = reinterpret_cast<T*>(theFirstBuffer);
sameOffsets = shape::haveSameShapeAndStrides(theFirstShape, theSecondShape);
sameOrders = shape::order(theFirstShape) == shape::order(theSecondShape);
xEws = shape::elementWiseStride(theFirstShape);
yEws = shape::elementWiseStride(theSecondShape);
}
__syncthreads();
for (int i = tid; i < resultLength; i += totalThreads) {
if(sameOrders && xEws > 0 && yEws > 0) {
sd::math::nd4j_swap(output[i*xEws], input[i*yEws]);
}
else if(sameOffsets) {
const auto offset = shape::getIndexOffset(i, theFirstShape);
sd::math::nd4j_swap(output[offset], input[offset]);
}
else{
const auto xOffset = shape::getIndexOffset(i, theFirstShape);
const auto yOffset = shape::getIndexOffset(i, theSecondShape);
sd::math::nd4j_swap(output[xOffset], input[yOffset]);
}
}
}
BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape), LIBND4J_TYPES);
template <typename T>
void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, hipStream_t* theStream) {
hipLaunchKernelGGL(( swapUnsafeKernel<T>), dim3(256), dim3(512), 8192, *theStream, theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape);
}
BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, hipStream_t* theStream), LIBND4J_TYPES);
} | f921a30bd931941eae30a7d37fae7153d1e72ca6.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>, created on 25.01.2019
//
#include <loops/special_kernels.h>
namespace sd {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// kernel to swap two NDArrays vals as linear sequences
// input - theSecondBuffer/Shape from input NDArray
// output - theFirstBuffer/Shape from input NDArray
template <typename T>
static __global__ void swapUnsafeKernel(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape) {
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
__shared__ Nd4jLong resultLength, xEws, yEws;
__shared__ bool sameOffsets, sameOrders;
__shared__ T* input;
__shared__ T* output;
if (0 == threadIdx.x) {
resultLength = shape::length(theFirstShape);
input = reinterpret_cast<T*>(theSecondBuffer);
output = reinterpret_cast<T*>(theFirstBuffer);
sameOffsets = shape::haveSameShapeAndStrides(theFirstShape, theSecondShape);
sameOrders = shape::order(theFirstShape) == shape::order(theSecondShape);
xEws = shape::elementWiseStride(theFirstShape);
yEws = shape::elementWiseStride(theSecondShape);
}
__syncthreads();
for (int i = tid; i < resultLength; i += totalThreads) {
if(sameOrders && xEws > 0 && yEws > 0) {
sd::math::nd4j_swap(output[i*xEws], input[i*yEws]);
}
else if(sameOffsets) {
const auto offset = shape::getIndexOffset(i, theFirstShape);
sd::math::nd4j_swap(output[offset], input[offset]);
}
else{
const auto xOffset = shape::getIndexOffset(i, theFirstShape);
const auto yOffset = shape::getIndexOffset(i, theSecondShape);
sd::math::nd4j_swap(output[xOffset], input[yOffset]);
}
}
}
BUILD_SINGLE_TEMPLATE(template __global__ void swapUnsafeKernel, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape), LIBND4J_TYPES);
template <typename T>
void templatedSwapUnsafe(void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, cudaStream_t* theStream) {
swapUnsafeKernel<T><<<256, 512, 8192, *theStream>>>(theFirstBuffer, theFirstShape, theSecondBuffer, theSecondShape);
}
BUILD_SINGLE_TEMPLATE(template void templatedSwapUnsafe, (void* theFirstBuffer, Nd4jLong const* theFirstShape, void* theSecondBuffer, Nd4jLong const* theSecondShape, cudaStream_t* theStream), LIBND4J_TYPES);
} |
1768b55772567a471069c82d953f25954effa63c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* \file dnn/src/cuda/warp_perspective/forward.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/warp_perspective/common.h"
#include "src/cuda/utils.cuh"
#include "src/cuda/warp_perspective/common.cuh"
#include "src/cuda/error_info.cuh"
#include "src/common/rounding_converter.cuh"
#include "megdnn/dtype.h"
#include <cstdio>
#include "src/cuda/integer_subbyte_utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace warp_perspective;
using namespace integer_subbyte;
namespace {
template <typename ctype>
struct CtypeHelper;
template <>
struct CtypeHelper<float> {
static constexpr int bit_width = 32;
};
template <>
struct CtypeHelper<dt_float16> {
static constexpr int bit_width = 16;
};
template <>
struct CtypeHelper<dt_uint8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_int8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_qint4> {
static constexpr int bit_width = 4;
};
template <>
struct CtypeHelper<dt_quint4> {
static constexpr int bit_width = 4;
};
template <typename ctype>
struct DirectSrcVisitor {
const void* ptr;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) *
static_cast<int64_t>(im_size) *
CtypeHelper<ctype>::bit_width / 8);
}
void move_batch(size_t batch, size_t im_size) {
ptr = (char*)ptr + batch * im_size * CtypeHelper<ctype>::bit_width / 8;
}
};
template <typename ctype>
struct IndexedSrcVisitor {
const void* ptr;
const int* idx;
int N_SRC;
AsyncErrorInfo* error_info;
void* error_tracker;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
int orig_batch = batch;
batch = idx[batch];
if (batch < 0 || batch >= N_SRC) {
set_async_error_info(
error_info, error_tracker,
"mat_idx out of bound: mat_idx[%d]=%d src_batch=%d",
orig_batch, batch, N_SRC);
batch = 0;
}
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) *
static_cast<int64_t>(im_size) *
CtypeHelper<ctype>::bit_width / 8);
}
void move_batch(size_t batch, size_t) { idx += batch; }
};
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter>
__global__ void kern_general(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
dst[oh * OW + ow] =
output_converter(sptr[ih0 * IW + iw0] * nalpha * nbeta +
sptr[ih0 * IW + iw1] * nalpha * pbeta +
sptr[ih1 * IW + iw0] * palpha * nbeta +
sptr[ih1 * IW + iw1] * palpha * pbeta);
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter>
__global__ void kern_general_nchw4(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int o_coor = (oh * OW + ow) << 2;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
dst[o_coor + c1] =
output_converter(sptr[i_coor_00 + c1] * nalpha * nbeta +
sptr[i_coor_01 + c1] * nalpha * pbeta +
sptr[i_coor_10 + c1] * palpha * nbeta +
sptr[i_coor_11 + c1] * palpha * pbeta);
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <bool signedness, typename OutputConverter>
MEGDNN_DEVICE __forceinline__ int pack_output_func(
OutputConverter& output_converter, int (&s00)[8], int (&s01)[8],
int (&s10)[8], int (&s11)[8], float w00, float w01, float w10,
float w11) {
#define warp_perspective_transform(idx) \
static_cast<int>(output_converter(s00[idx] * w00 + s01[idx] * w01 + \
s10[idx] * w10 + s11[idx] * w11) \
.as_storage())
return transform_int8_to_b4x8<signedness>(
warp_perspective_transform(0), warp_perspective_transform(1),
warp_perspective_transform(2), warp_perspective_transform(3),
warp_perspective_transform(4), warp_perspective_transform(5),
warp_perspective_transform(6), warp_perspective_transform(7));
#undef warp_perspective_transform
}
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter>
__global__ void kern_general_nchw64(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH,
int IW, int OH, int OW) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval);
ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval);
ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta +
v10 * palpha * nbeta + v11 * palpha * pbeta);
dst[oh * OW + ow] = val;
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw4(SrcVisitor src,
const float* __restrict mat,
ctype* __restrict dst, int C, int IH,
int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
int o_coor = (oh * OW + ow) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
ctype v00 = (okh0 && okw0 ? sptr[i_coor_00 + c1] : bval);
ctype v01 = (okh0 && okw1 ? sptr[i_coor_01 + c1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[i_coor_10 + c1] : bval);
ctype v11 = (okh1 && okw1 ? sptr[i_coor_11 + c1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta +
v10 * palpha * nbeta + v11 * palpha * pbeta);
dst[o_coor + c1] = val;
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw64(SrcVisitor src,
const float* __restrict mat,
ctype* __restrict dst, int C, int IH,
int IW, int OH, int OW, ctype bval) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
bool flag00 = okh0 && okw0, flag01 = okh0 && okw1,
flag10 = okh1 && okw0, flag11 = okh1 && okw1;
int8_t bval_4 = bval.as_storage() & 0xF;
int bval_8 = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int4 bval_int4;
bval_int4.x = bval_8;
bval_int4.y = bval_8;
bval_int4.z = bval_8;
bval_int4.w = bval_8;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
if (flag00) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
} else {
s[0] = bval_int4;
}
if (flag01) {
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
} else {
s[1] = bval_int4;
}
if (flag10) {
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
} else {
s[2] = bval_int4;
}
if (flag11) {
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
} else {
s[3] = bval_int4;
}
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename OutputConverter, int pack_c>
struct KernCoreNHWC {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,const char* src_ptr2, const char* src_ptr3, const int offset,
float w00, float w01, float w10, float w11,
OutputConverter& output_converter, const bool src0_ok, const bool src1_ok,
const bool src2_ok, const bool src3_ok, const ctype bval) {
static_assert(pack_c == 1, "static_assert pack_c == 1");
ctype v00 = src0_ok ? *(ctype*)(src_ptr0 + offset): bval;
ctype v01 = src1_ok ? *(ctype*)(src_ptr1 + offset): bval;
ctype v10 = src2_ok ? *(ctype*)(src_ptr2 + offset): bval;
ctype v11 = src3_ok ? *(ctype*)(src_ptr3 + offset): bval;
ctype res =
output_converter(v00 * w00+ v01 * w01 +
v10 * w10 + v11 * w11);
*(ctype*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 8> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,const char* src_ptr2, const char* src_ptr3, const int offset,
float w00, float w01, float w10, float w11,
OutputConverter& output_converter, const bool src0_ok, const bool src1_ok,
const bool src2_ok, const bool src3_ok, const ctype bval){
static_assert(std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int src_ori[4];
src_ori[0] = src0_ok ? *(int*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int*)(src_ptr3 + offset) : bval_int;
int src[4][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0]);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1]);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2]);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3]);
int res = pack_output_func<signedness>(output_converter, src[0], src[1],
src[2], src[3], w00, w01, w10,
w11);
*(int*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 16> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset,
float w00, float w01, float w10, float w11,
OutputConverter& output_converter, const bool src0_ok,
const bool src1_ok, const bool src2_ok, const bool src3_ok,
const ctype bval) {
static_assert(std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int_temp = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
const int2 bval_int{bval_int_temp, bval_int_temp};
int2 src_ori[4];
src_ori[0] = src0_ok ? *(int2*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int2*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int2*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int2*)(src_ptr3 + offset) : bval_int;
int src[8][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0].x);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1].x);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2].x);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3].x);
transform_b4x8_to_int8<signedness>(src[4], src_ori[0].y);
transform_b4x8_to_int8<signedness>(src[5], src_ori[1].y);
transform_b4x8_to_int8<signedness>(src[6], src_ori[2].y);
transform_b4x8_to_int8<signedness>(src[7], src_ori[3].y);
int2 res;
res.x = pack_output_func<signedness>(output_converter, src[0], src[1],
src[2], src[3], w00, w01, w10,
w11);
res.y = pack_output_func<signedness>(output_converter, src[4], src[5],
src[6], src[7], w00, w01, w10,
w11);
*(int2*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter, int pack_c>
__global__ void kern_general_nhwc(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 =
(char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 =
(char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 =
(char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 =
(char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3,
c * bit_width / 8, w00, w01, w10, w11, output_converter,
true, true, true, true, (ctype)0);
}
}
}
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter, int pack_c>
__global__ void kern_general_nhwc_const(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW, ctype bval) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
bool src0_ok = okh0 && okw0;
bool src1_ok = okh0 && okw1;
bool src2_ok = okh1 && okw0;
bool src3_ok = okh1 && okw1;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8, w00, w01, w10, w11, output_converter, src0_ok, src1_ok,
src2_ok, src3_ok, bval);
}
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor(bool is_nhwc, SrcVisitor src, const float* mat,
ctype* dst, int N, int C, int IH, int IW, int OH,
int OW, ctype bval, BorderMode bmode,
hipStream_t stream) {
constexpr int pack_c = 1;
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
hipLaunchKernelGGL(( kern_general_nhwc<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>, pack_c>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, \
OH, OW); \
} else { \
hipLaunchKernelGGL(( kern_general<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, \
OH, OW); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
hipLaunchKernelGGL(( kern_general_nhwc_const<ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>,
pack_c>), dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval);
} else {
hipLaunchKernelGGL(( kern_const_border<ctype, SrcVisitor,
rounding::RoundingConverter<ctype>>)
, dim3(blocks), dim3(threads), 0, stream,
src, mat, dst, C, IH, IW, OH, OW, bval);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor, int pack_c>
void dispatch_with_visitor_nhwc_bit4(SrcVisitor src, const float* mat,
ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval,
BorderMode bmode, hipStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
hipLaunchKernelGGL(( kern_general_nhwc<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>, pack_c>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, OH, \
OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
case BORDER_CONSTANT:
{
hipLaunchKernelGGL(( kern_general_nhwc_const<ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, OH,
OW, bval);
}
break;
default:
break;
}
#undef DISPATCH
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw4(SrcVisitor src, const float* mat, ctype* dst,
int N, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode,
hipStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
hipLaunchKernelGGL(( kern_general_nchw4<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, OH, \
OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
hipLaunchKernelGGL(( kern_const_border_nchw4<ctype, SrcVisitor,
rounding::RoundingConverter<ctype>>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH,
IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw64(SrcVisitor src, const float* mat, ctype* dst,
int N, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode,
hipStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
hipLaunchKernelGGL(( kern_general_nchw64<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, OH, \
OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW * 2 + BX - 1) / BX, (OH + BY - 1) / BY,
curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
hipLaunchKernelGGL(( kern_const_border_nchw64<ctype, SrcVisitor,
rounding::RoundingConverter<ctype>>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH,
IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename SrcType, typename DstType>
struct CudaTypeCvt;
template <>
struct CudaTypeCvt<dt_quint8, int8_t> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) {
m_src_param = src_param;
};
inline __device__ int8_t operator()(uint8_t val) {
return val - m_src_param.zero_point;
}
};
template <>
struct CudaTypeCvt<dt_quint8, float> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) {
m_src_param = src_param;
};
__device__ __forceinline__ float operator()(uint8_t val) {
return m_src_param.dequantize(dt_quint8(val));
}
};
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = \
warp_out_converter(sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = \
warp_out_converter(sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[2 * IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[2 * IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[2 * IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[2 * IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[2 * IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[2 * IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[2 * IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[2 * IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 1] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 1] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 1] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 2] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 2] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 2] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 2] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 1] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 1] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 1] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 2] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 2] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 2] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 2] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype,
typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N,
int C, int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode,
hipStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (C == 1) { \
hipLaunchKernelGGL(( kern_general_quint8_nhw_nchw4<src_dtype, src_ctype, Getter, \
SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, IH, IW, \
OH, OW, type_cvt); \
} else if (is_nhwc) { \
hipLaunchKernelGGL(( kern_general_quint8_nhw3_nchw4<src_dtype, src_ctype, Getter, \
SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, IH, IW, \
OH, OW, type_cvt); \
} else { \
hipLaunchKernelGGL(( kern_general_quint8_n3hw_nchw4<src_dtype, src_ctype, Getter, \
SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, IH, IW, \
OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (C == 1) {
hipLaunchKernelGGL(( kern_const_border_quint8_nhw_nchw4<src_dtype, src_ctype,
SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, IH,
IW, OH, OW, bval,
type_cvt);
} else if (is_nhwc) {
hipLaunchKernelGGL(( kern_const_border_quint8_nhw3_nchw4<src_dtype, src_ctype,
SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, IH,
IW, OH, OW, bval,
type_cvt);
} else {
hipLaunchKernelGGL(( kern_const_border_quint8_n3hw_nchw4<src_dtype, src_ctype,
SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, IH,
IW, OH, OW, bval,
type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * 4 * OH * OW;
}
}
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[(ih0 * IW + iw0) * C + c] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * C + c] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * C + c] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * C + c] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * C + c] \
: bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * C + c] \
: bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * C + c] \
: bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * C + c] \
: bval); \
float val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype,
typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N,
int C, int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode,
hipStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
hipLaunchKernelGGL(( kern_general_quint8_nhwc_nchw<src_dtype, src_ctype, Getter, \
SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, \
OH, OW, type_cvt); \
} else { \
hipLaunchKernelGGL(( kern_general_quint8_nchw<src_dtype, src_ctype, Getter, SrcVisitor>) \
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C, IH, IW, \
OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
hipLaunchKernelGGL(( kern_const_border_quint8_nhwc_nchw<src_dtype, src_ctype,
SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C,
IH, IW, OH, OW,
bval, type_cvt);
} else {
hipLaunchKernelGGL(( kern_const_border_quint8_nchw<src_dtype, src_ctype,
SrcVisitor>)
, dim3(blocks), dim3(threads), 0, stream, src, mat, dst, C,
IH, IW, OH, OW,
bval, type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace warp_perspective {
template <typename ctype>
void forward_proxy(bool is_nhwc, const ctype* src, const float* mat,
const int* mat_idx, ctype* dst, int N_SRC, int N_MAT, int C,
int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker,
hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor(is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH,
OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor(is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH,
OW, bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype, int pack_c>
void forward_proxy_nhwc_bit4(const ctype* src, const float* mat,
const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info,
void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nhwc_bit4<ctype, IndexedSrcVisitor<ctype>,
pack_c>(visitor, mat, dst, N_MAT, C, IH,
IW, OH, OW, bval, bmode,
stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nhwc_bit4<ctype, DirectSrcVisitor<ctype>, pack_c>(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw4(const ctype* src, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info,
void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw4(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw4(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw64(const ctype* src, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info,
void* error_tracker, hipStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw64(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw64(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
}
after_kernel_launch();
}
#define INST(ctype) \
template void forward_proxy(bool, const ctype*, const float*, const int*, \
ctype*, int, int, int, int, int, int, int, \
ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, hipStream_t);
INST(float)
INST(uint8_t)
#ifndef MEGDNN_DISABLE_FLOAT16
INST(dt_float16)
#endif
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw4( \
const ctype*, const float*, const int*, ctype*, int, int, int, \
int, int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, hipStream_t);
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw64( \
const ctype*, const float*, const int*, ctype*, int, int, int, \
int, int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, hipStream_t);
INST(dt_qint4)
INST(dt_quint4)
#undef INST
#define INST(ctype, pack_c) \
template void forward_proxy_nhwc_bit4<ctype, pack_c>( \
const ctype*, const float*, const int*, ctype*, int, int, int, \
int, int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, hipStream_t);
INST(dt_qint4, 8)
INST(dt_quint4, 8)
INST(dt_qint4, 16)
INST(dt_quint4, 16)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, const src_ctype* src, const float* mat,
const int* mat_idx, dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, src_ctype bval, DTypeParamImpl<src_dtype> param,
BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, hipStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw4( \
bool is_nhwc, const src_ctype*, const float*, const int*, \
dst_ctype*, int, int, int, int, int, int, int, src_ctype, \
DTypeParamImpl<src_dtype> param, BorderMode, \
megcore::AsyncErrorInfo*, void*, hipStream_t);
INST(dt_quint8, uint8_t, int8_t)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, const src_ctype* src, const float* mat,
const int* mat_idx, dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, src_ctype bval, DTypeParamImpl<src_dtype> param,
BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, hipStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw( \
bool is_nhwc, const src_ctype*, const float*, const int*, \
dst_ctype*, int, int, int, int, int, int, int, src_ctype, \
DTypeParamImpl<src_dtype> param, BorderMode, \
megcore::AsyncErrorInfo*, void*, hipStream_t);
INST(dt_quint8, uint8_t, float)
#undef INST
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 1768b55772567a471069c82d953f25954effa63c.cu | /**
* \file dnn/src/cuda/warp_perspective/forward.cu
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "src/cuda/warp_perspective/common.h"
#include "src/cuda/utils.cuh"
#include "src/cuda/warp_perspective/common.cuh"
#include "src/cuda/error_info.cuh"
#include "src/common/rounding_converter.cuh"
#include "megdnn/dtype.h"
#include <cstdio>
#include "src/cuda/integer_subbyte_utils.cuh"
using namespace megdnn;
using namespace cuda;
using namespace warp_perspective;
using namespace integer_subbyte;
namespace {
template <typename ctype>
struct CtypeHelper;
template <>
struct CtypeHelper<float> {
static constexpr int bit_width = 32;
};
template <>
struct CtypeHelper<dt_float16> {
static constexpr int bit_width = 16;
};
template <>
struct CtypeHelper<dt_uint8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_int8> {
static constexpr int bit_width = 8;
};
template <>
struct CtypeHelper<dt_qint4> {
static constexpr int bit_width = 4;
};
template <>
struct CtypeHelper<dt_quint4> {
static constexpr int bit_width = 4;
};
template <typename ctype>
struct DirectSrcVisitor {
const void* ptr;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) *
static_cast<int64_t>(im_size) *
CtypeHelper<ctype>::bit_width / 8);
}
void move_batch(size_t batch, size_t im_size) {
ptr = (char*)ptr + batch * im_size * CtypeHelper<ctype>::bit_width / 8;
}
};
template <typename ctype>
struct IndexedSrcVisitor {
const void* ptr;
const int* idx;
int N_SRC;
AsyncErrorInfo* error_info;
void* error_tracker;
__device__ __forceinline__ const ctype* get(int batch, int im_size) {
int orig_batch = batch;
batch = idx[batch];
if (batch < 0 || batch >= N_SRC) {
set_async_error_info(
error_info, error_tracker,
"mat_idx out of bound: mat_idx[%d]=%d src_batch=%d",
orig_batch, batch, N_SRC);
batch = 0;
}
return (ctype*)((char*)ptr + static_cast<int64_t>(batch) *
static_cast<int64_t>(im_size) *
CtypeHelper<ctype>::bit_width / 8);
}
void move_batch(size_t batch, size_t) { idx += batch; }
};
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter>
__global__ void kern_general(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
dst[oh * OW + ow] =
output_converter(sptr[ih0 * IW + iw0] * nalpha * nbeta +
sptr[ih0 * IW + iw1] * nalpha * pbeta +
sptr[ih1 * IW + iw0] * palpha * nbeta +
sptr[ih1 * IW + iw1] * palpha * pbeta);
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter>
__global__ void kern_general_nchw4(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW) {
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int o_coor = (oh * OW + ow) << 2;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
dst[o_coor + c1] =
output_converter(sptr[i_coor_00 + c1] * nalpha * nbeta +
sptr[i_coor_01 + c1] * nalpha * pbeta +
sptr[i_coor_10 + c1] * palpha * nbeta +
sptr[i_coor_11 + c1] * palpha * pbeta);
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <bool signedness, typename OutputConverter>
MEGDNN_DEVICE __forceinline__ int pack_output_func(
OutputConverter& output_converter, int (&s00)[8], int (&s01)[8],
int (&s10)[8], int (&s11)[8], float w00, float w01, float w10,
float w11) {
#define warp_perspective_transform(idx) \
static_cast<int>(output_converter(s00[idx] * w00 + s01[idx] * w01 + \
s10[idx] * w10 + s11[idx] * w11) \
.as_storage())
return transform_int8_to_b4x8<signedness>(
warp_perspective_transform(0), warp_perspective_transform(1),
warp_perspective_transform(2), warp_perspective_transform(3),
warp_perspective_transform(4), warp_perspective_transform(5),
warp_perspective_transform(6), warp_perspective_transform(7));
#undef warp_perspective_transform
}
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter>
__global__ void kern_general_nchw64(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH,
int IW, int OH, int OW) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
Getter getter;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
for (int c = 0; c < C; ++c) {
ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval);
ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval);
ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta +
v10 * palpha * nbeta + v11 * palpha * pbeta);
dst[oh * OW + ow] = val;
sptr += IH * IW;
dst += OH * OW;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw4(SrcVisitor src,
const float* __restrict mat,
ctype* __restrict dst, int C, int IH,
int IW, int OH, int OW, ctype bval) {
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW;
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
int i_coor_00 = (ih0 * IW + iw0) << 2;
int i_coor_01 = (ih0 * IW + iw1) << 2;
int i_coor_10 = (ih1 * IW + iw0) << 2;
int i_coor_11 = (ih1 * IW + iw1) << 2;
int o_coor = (oh * OW + ow) << 2;
for (int c0 = 0, nr_chan = C / 4; c0 < nr_chan; ++c0) {
#pragma unroll
for (int c1 = 0; c1 < 4; ++c1) {
ctype v00 = (okh0 && okw0 ? sptr[i_coor_00 + c1] : bval);
ctype v01 = (okh0 && okw1 ? sptr[i_coor_01 + c1] : bval);
ctype v10 = (okh1 && okw0 ? sptr[i_coor_10 + c1] : bval);
ctype v11 = (okh1 && okw1 ? sptr[i_coor_11 + c1] : bval);
ctype val = output_converter(
v00 * nalpha * nbeta + v01 * nalpha * pbeta +
v10 * palpha * nbeta + v11 * palpha * pbeta);
dst[o_coor + c1] = val;
}
sptr += IH * IW * 4;
dst += OH * OW * 4;
}
}
}
template <typename ctype, typename SrcVisitor, typename OutputConverter>
__global__ void kern_const_border_nchw64(SrcVisitor src,
const float* __restrict mat,
ctype* __restrict dst, int C, int IH,
int IW, int OH, int OW, ctype bval) {
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
OutputConverter output_converter;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int c1 = ow % 2;
ow = ow / 2;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst += blockIdx.z * C * OH * OW / 2;
mat += blockIdx.z * 3 * 3;
const int4* sptr_int4 = reinterpret_cast<const int4*>(sptr);
int4* dst_int4 = reinterpret_cast<int4*>(dst);
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = floor(iw) + 0;
int iw1 = floor(iw) + 1;
int ih0 = floor(ih) + 0;
int ih1 = floor(ih) + 1;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
int o_coor = (oh * OW + ow) << 1;
int i_coor_00 = (ih0 * IW + iw0) << 1;
int i_coor_01 = (ih0 * IW + iw1) << 1;
int i_coor_10 = (ih1 * IW + iw0) << 1;
int i_coor_11 = (ih1 * IW + iw1) << 1;
bool flag00 = okh0 && okw0, flag01 = okh0 && okw1,
flag10 = okh1 && okw0, flag11 = okh1 && okw1;
int8_t bval_4 = bval.as_storage() & 0xF;
int bval_8 = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int4 bval_int4;
bval_int4.x = bval_8;
bval_int4.y = bval_8;
bval_int4.z = bval_8;
bval_int4.w = bval_8;
int s00[8], s01[8], s10[8], s11[8];
int4 s[4], d;
for (int c0 = 0, nr_chan = C / 64; c0 < nr_chan; ++c0) {
if (flag00) {
s[0] = __ldg(sptr_int4 + i_coor_00 + c1);
} else {
s[0] = bval_int4;
}
if (flag01) {
s[1] = __ldg(sptr_int4 + i_coor_01 + c1);
} else {
s[1] = bval_int4;
}
if (flag10) {
s[2] = __ldg(sptr_int4 + i_coor_10 + c1);
} else {
s[2] = bval_int4;
}
if (flag11) {
s[3] = __ldg(sptr_int4 + i_coor_11 + c1);
} else {
s[3] = bval_int4;
}
transform_b4x8_to_int8<signedness>(s00, s[0].x);
transform_b4x8_to_int8<signedness>(s01, s[1].x);
transform_b4x8_to_int8<signedness>(s10, s[2].x);
transform_b4x8_to_int8<signedness>(s11, s[3].x);
d.x = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].y);
transform_b4x8_to_int8<signedness>(s01, s[1].y);
transform_b4x8_to_int8<signedness>(s10, s[2].y);
transform_b4x8_to_int8<signedness>(s11, s[3].y);
d.y = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].z);
transform_b4x8_to_int8<signedness>(s01, s[1].z);
transform_b4x8_to_int8<signedness>(s10, s[2].z);
transform_b4x8_to_int8<signedness>(s11, s[3].z);
d.z = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
transform_b4x8_to_int8<signedness>(s00, s[0].w);
transform_b4x8_to_int8<signedness>(s01, s[1].w);
transform_b4x8_to_int8<signedness>(s10, s[2].w);
transform_b4x8_to_int8<signedness>(s11, s[3].w);
d.w = pack_output_func<signedness>(output_converter, s00, s01, s10,
s11, w00, w01, w10, w11);
dst_int4[o_coor + c1] = d;
sptr_int4 += IH * IW * 2;
dst_int4 += OH * OW * 2;
}
}
}
template <typename ctype, typename OutputConverter, int pack_c>
struct KernCoreNHWC {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,const char* src_ptr2, const char* src_ptr3, const int offset,
float w00, float w01, float w10, float w11,
OutputConverter& output_converter, const bool src0_ok, const bool src1_ok,
const bool src2_ok, const bool src3_ok, const ctype bval) {
static_assert(pack_c == 1, "static_assert pack_c == 1");
ctype v00 = src0_ok ? *(ctype*)(src_ptr0 + offset): bval;
ctype v01 = src1_ok ? *(ctype*)(src_ptr1 + offset): bval;
ctype v10 = src2_ok ? *(ctype*)(src_ptr2 + offset): bval;
ctype v11 = src3_ok ? *(ctype*)(src_ptr3 + offset): bval;
ctype res =
output_converter(v00 * w00+ v01 * w01 +
v10 * w10 + v11 * w11);
*(ctype*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 8> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,const char* src_ptr2, const char* src_ptr3, const int offset,
float w00, float w01, float w10, float w11,
OutputConverter& output_converter, const bool src0_ok, const bool src1_ok,
const bool src2_ok, const bool src3_ok, const ctype bval){
static_assert(std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
int src_ori[4];
src_ori[0] = src0_ok ? *(int*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int*)(src_ptr3 + offset) : bval_int;
int src[4][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0]);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1]);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2]);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3]);
int res = pack_output_func<signedness>(output_converter, src[0], src[1],
src[2], src[3], w00, w01, w10,
w11);
*(int*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename OutputConverter>
struct KernCoreNHWC<ctype, OutputConverter, 16> {
MEGDNN_DEVICE __forceinline__ static void func(
char* dst_ptr, const char* src_ptr0, const char* src_ptr1,
const char* src_ptr2, const char* src_ptr3, const int offset,
float w00, float w01, float w10, float w11,
OutputConverter& output_converter, const bool src0_ok,
const bool src1_ok, const bool src2_ok, const bool src3_ok,
const ctype bval) {
static_assert(std::is_same<ctype, dt_quint4>::value ||
std::is_same<ctype, dt_qint4>::value,
"assert qu4 or q4");
constexpr bool signedness = std::is_same<ctype, dt_qint4>::value;
int8_t bval_4 = bval.as_storage() & 0xF;
const int bval_int_temp = transform_int8_to_b4x8<signedness>(
bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4, bval_4);
const int2 bval_int{bval_int_temp, bval_int_temp};
int2 src_ori[4];
src_ori[0] = src0_ok ? *(int2*)(src_ptr0 + offset) : bval_int;
src_ori[1] = src1_ok ? *(int2*)(src_ptr1 + offset) : bval_int;
src_ori[2] = src2_ok ? *(int2*)(src_ptr2 + offset) : bval_int;
src_ori[3] = src3_ok ? *(int2*)(src_ptr3 + offset) : bval_int;
int src[8][8];
transform_b4x8_to_int8<signedness>(src[0], src_ori[0].x);
transform_b4x8_to_int8<signedness>(src[1], src_ori[1].x);
transform_b4x8_to_int8<signedness>(src[2], src_ori[2].x);
transform_b4x8_to_int8<signedness>(src[3], src_ori[3].x);
transform_b4x8_to_int8<signedness>(src[4], src_ori[0].y);
transform_b4x8_to_int8<signedness>(src[5], src_ori[1].y);
transform_b4x8_to_int8<signedness>(src[6], src_ori[2].y);
transform_b4x8_to_int8<signedness>(src[7], src_ori[3].y);
int2 res;
res.x = pack_output_func<signedness>(output_converter, src[0], src[1],
src[2], src[3], w00, w01, w10,
w11);
res.y = pack_output_func<signedness>(output_converter, src[4], src[5],
src[6], src[7], w00, w01, w10,
w11);
*(int2*)(dst_ptr + offset) = res;
}
};
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter, int pack_c>
__global__ void kern_general_nhwc(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 =
(char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 =
(char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 =
(char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 =
(char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3,
c * bit_width / 8, w00, w01, w10, w11, output_converter,
true, true, true, true, (ctype)0);
}
}
}
template <typename ctype, typename Getter, typename SrcVisitor,
typename OutputConverter, int pack_c>
__global__ void kern_general_nhwc_const(SrcVisitor src, const float* __restrict mat,
ctype* __restrict dst, int C, int IH, int IW,
int OH, int OW, ctype bval) {
Getter getter;
OutputConverter output_converter;
constexpr int bit_width = CtypeHelper<ctype>::bit_width;
int ow = blockIdx.x * blockDim.x + threadIdx.x;
int oh = blockIdx.y * blockDim.y + threadIdx.y;
const ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW);
dst = (ctype*)((char*)dst + blockIdx.z * C * OH * OW * bit_width / 8);
mat += blockIdx.z * 3 * 3;
if (ow < OW && oh < OH) {
float denominator = mat[6] * ow + mat[7] * oh + mat[8];
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator;
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator;
int iw0 = getter(floor(iw) + 0, IW);
int iw1 = getter(floor(iw) + 1, IW);
int ih0 = getter(floor(ih) + 0, IH);
int ih1 = getter(floor(ih) + 1, IH);
float palpha = ih - floor(ih);
float pbeta = iw - floor(iw);
float nalpha = 1.0f - palpha;
float nbeta = 1.0f - pbeta;
float w00 = nalpha * nbeta;
float w01 = nalpha * pbeta;
float w10 = palpha * nbeta;
float w11 = palpha * pbeta;
const char* src_ptr0 = (char*)sptr + (ih0 * IW + iw0) * C * bit_width / 8;
const char* src_ptr1 = (char*)sptr + (ih0 * IW + iw1) * C * bit_width / 8;
const char* src_ptr2 = (char*)sptr + (ih1 * IW + iw0) * C * bit_width / 8;
const char* src_ptr3 = (char*)sptr + (ih1 * IW + iw1) * C * bit_width / 8;
char* dst_ptr = (char*)dst + (oh * OW + ow) * C * bit_width / 8;
bool okw0 = (iw0 >= 0 && iw0 < IW);
bool okw1 = (iw1 >= 0 && iw1 < IW);
bool okh0 = (ih0 >= 0 && ih0 < IH);
bool okh1 = (ih1 >= 0 && ih1 < IH);
bool src0_ok = okh0 && okw0;
bool src1_ok = okh0 && okw1;
bool src2_ok = okh1 && okw0;
bool src3_ok = okh1 && okw1;
for (int c = 0; c < C; c += pack_c) {
KernCoreNHWC<ctype, OutputConverter, pack_c>::func(
dst_ptr, src_ptr0, src_ptr1, src_ptr2, src_ptr3, c * bit_width / 8, w00, w01, w10, w11, output_converter, src0_ok, src1_ok,
src2_ok, src3_ok, bval);
}
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor(bool is_nhwc, SrcVisitor src, const float* mat,
ctype* dst, int N, int C, int IH, int IW, int OH,
int OW, ctype bval, BorderMode bmode,
cudaStream_t stream) {
constexpr int pack_c = 1;
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
kern_general_nhwc<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>, pack_c> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, \
OH, OW); \
} else { \
kern_general<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, \
OH, OW); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
kern_general_nhwc_const<ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>,
pack_c><<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval);
} else {
kern_const_border<ctype, SrcVisitor,
rounding::RoundingConverter<ctype>>
<<<blocks, threads, 0, stream>>>(
src, mat, dst, C, IH, IW, OH, OW, bval);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor, int pack_c>
void dispatch_with_visitor_nhwc_bit4(SrcVisitor src, const float* mat,
ctype* dst, int N, int C, int IH, int IW,
int OH, int OW, ctype bval,
BorderMode bmode, cudaStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
kern_general_nhwc<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>, pack_c> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, OH, \
OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
case BORDER_CONSTANT:
{
kern_general_nhwc_const<ctype, ConstGetter, SrcVisitor,
rounding::RoundingConverter<ctype>, pack_c>
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, OH,
OW, bval);
}
break;
default:
break;
}
#undef DISPATCH
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw4(SrcVisitor src, const float* mat, ctype* dst,
int N, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode,
cudaStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
kern_general_nchw4<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, OH, \
OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
kern_const_border_nchw4<ctype, SrcVisitor,
rounding::RoundingConverter<ctype>>
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH,
IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
template <typename ctype, typename SrcVisitor>
void dispatch_with_visitor_nchw64(SrcVisitor src, const float* mat, ctype* dst,
int N, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode,
cudaStream_t stream) {
const int BY = 16, BX = 32;
#define DISPATCH(Getter) \
do { \
kern_general_nchw64<ctype, Getter, SrcVisitor, \
rounding::RoundingConverter<ctype>> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, OH, \
OW); \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW * 2 + BX - 1) / BX, (OH + BY - 1) / BY,
curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
kern_const_border_nchw64<ctype, SrcVisitor,
rounding::RoundingConverter<ctype>>
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH,
IW, OH, OW, bval);
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW / 2);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW / 2;
}
}
template <typename SrcType, typename DstType>
struct CudaTypeCvt;
template <>
struct CudaTypeCvt<dt_quint8, int8_t> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) {
m_src_param = src_param;
};
inline __device__ int8_t operator()(uint8_t val) {
return val - m_src_param.zero_point;
}
};
template <>
struct CudaTypeCvt<dt_quint8, float> {
CudaDTypeParamImpl<dt_quint8> m_src_param;
CudaTypeCvt(CudaDTypeParamImpl<dt_quint8> src_param) {
m_src_param = src_param;
};
__device__ __forceinline__ float operator()(uint8_t val) {
return m_src_param.dequantize(dt_quint8(val));
}
};
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = \
warp_out_converter(sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = result.z = result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = \
warp_out_converter(sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[2 * IW * IH + ih0 * IW + iw0] * nalpha * nbeta + \
sptr[2 * IW * IH + ih0 * IW + iw1] * nalpha * pbeta + \
sptr[2 * IW * IH + ih1 * IW + iw0] * palpha * nbeta + \
sptr[2 * IW * IH + ih1 * IW + iw1] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_n3hw_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[2 * IH * IW + ih0 * IW + iw0] : bval); \
v01 = (okh0 && okw1 ? sptr[2 * IH * IW + ih0 * IW + iw1] : bval); \
v10 = (okh1 && okw0 ? sptr[2 * IH * IW + ih1 * IW + iw0] : bval); \
v11 = (okh1 && okw1 ? sptr[2 * IH * IW + ih1 * IW + iw1] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype val_x = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3] * palpha * pbeta); \
src_ctype val_y = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 1] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 1] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 1] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 1] * palpha * pbeta); \
src_ctype val_z = warp_out_converter( \
sptr[(ih0 * IW + iw0) * 3 + 2] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * 3 + 2] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * 3 + 2] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * 3 + 2] * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
#define INST(dst_ctype, vec_dst_type) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhw3_nchw4( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, 3 * IH * IW); \
dst += blockIdx.z * OH * OW * 4; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
vec_dst_type result; \
src_ctype v00, v01, v10, v11; \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3] : bval); \
src_ctype val_x = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 1] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 1] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 1] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 1] : bval); \
src_ctype val_y = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * 3 + 2] : bval); \
v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * 3 + 2] : bval); \
v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * 3 + 2] : bval); \
v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * 3 + 2] : bval); \
src_ctype val_z = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
result.x = type_cvt(val_x); \
result.y = type_cvt(val_y); \
result.z = type_cvt(val_z); \
result.w = 0; \
*((vec_dst_type*)dst + oh * OW + ow) = result; \
} \
}
INST(int8_t, char4)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype,
typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N,
int C, int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode,
cudaStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (C == 1) { \
kern_general_quint8_nhw_nchw4<src_dtype, src_ctype, Getter, \
SrcVisitor> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, IH, IW, \
OH, OW, type_cvt); \
} else if (is_nhwc) { \
kern_general_quint8_nhw3_nchw4<src_dtype, src_ctype, Getter, \
SrcVisitor> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, IH, IW, \
OH, OW, type_cvt); \
} else { \
kern_general_quint8_n3hw_nchw4<src_dtype, src_ctype, Getter, \
SrcVisitor> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, IH, IW, \
OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (C == 1) {
kern_const_border_quint8_nhw_nchw4<src_dtype, src_ctype,
SrcVisitor>
<<<blocks, threads, 0, stream>>>(src, mat, dst, IH,
IW, OH, OW, bval,
type_cvt);
} else if (is_nhwc) {
kern_const_border_quint8_nhw3_nchw4<src_dtype, src_ctype,
SrcVisitor>
<<<blocks, threads, 0, stream>>>(src, mat, dst, IH,
IW, OH, OW, bval,
type_cvt);
} else {
kern_const_border_quint8_n3hw_nchw4<src_dtype, src_ctype,
SrcVisitor>
<<<blocks, threads, 0, stream>>>(src, mat, dst, IH,
IW, OH, OW, bval,
type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * 4 * OH * OW;
}
}
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[ih0 * IW + iw0] * nalpha * nbeta + \
sptr[ih0 * IW + iw1] * nalpha * pbeta + \
sptr[ih1 * IW + iw0] * palpha * nbeta + \
sptr[ih1 * IW + iw1] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = (okh0 && okw0 ? sptr[ih0 * IW + iw0] : bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[ih0 * IW + iw1] : bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[ih1 * IW + iw0] : bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[ih1 * IW + iw1] : bval); \
src_ctype val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
sptr += IH * IW; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename Getter, \
typename SrcVisitor> \
__global__ void kern_general_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
Getter getter; \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = getter(floor(iw) + 0, IW); \
int iw1 = getter(floor(iw) + 1, IW); \
int ih0 = getter(floor(ih) + 0, IH); \
int ih1 = getter(floor(ih) + 1, IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype val = warp_out_converter( \
sptr[(ih0 * IW + iw0) * C + c] * nalpha * nbeta + \
sptr[(ih0 * IW + iw1) * C + c] * nalpha * pbeta + \
sptr[(ih1 * IW + iw0) * C + c] * palpha * nbeta + \
sptr[(ih1 * IW + iw1) * C + c] * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
#define INST(dst_ctype) \
template <typename src_dtype, typename src_ctype, typename SrcVisitor> \
__global__ void kern_const_border_quint8_nhwc_nchw( \
SrcVisitor src, const float* __restrict mat, \
dst_ctype* __restrict dst, int C, int IH, int IW, int OH, int OW, \
src_ctype bval, CudaTypeCvt<src_dtype, dst_ctype> type_cvt) { \
rounding::RoundingConverter<src_ctype> warp_out_converter; \
int ow = blockIdx.x * blockDim.x + threadIdx.x; \
int oh = blockIdx.y * blockDim.y + threadIdx.y; \
const src_ctype* __restrict sptr = src.get(blockIdx.z, C * IH * IW); \
dst += blockIdx.z * C * OH * OW; \
mat += blockIdx.z * 3 * 3; \
if (ow < OW && oh < OH) { \
float denominator = mat[6] * ow + mat[7] * oh + mat[8]; \
float iw = (mat[0] * ow + mat[1] * oh + mat[2]) / denominator; \
float ih = (mat[3] * ow + mat[4] * oh + mat[5]) / denominator; \
int iw0 = floor(iw) + 0; \
int iw1 = floor(iw) + 1; \
int ih0 = floor(ih) + 0; \
int ih1 = floor(ih) + 1; \
bool okw0 = (iw0 >= 0 && iw0 < IW); \
bool okw1 = (iw1 >= 0 && iw1 < IW); \
bool okh0 = (ih0 >= 0 && ih0 < IH); \
bool okh1 = (ih1 >= 0 && ih1 < IH); \
float palpha = ih - floor(ih); \
float pbeta = iw - floor(iw); \
float nalpha = 1.0f - palpha; \
float nbeta = 1.0f - pbeta; \
for (int c = 0; c < C; ++c) { \
src_ctype v00 = (okh0 && okw0 ? sptr[(ih0 * IW + iw0) * C + c] \
: bval); \
src_ctype v01 = (okh0 && okw1 ? sptr[(ih0 * IW + iw1) * C + c] \
: bval); \
src_ctype v10 = (okh1 && okw0 ? sptr[(ih1 * IW + iw0) * C + c] \
: bval); \
src_ctype v11 = (okh1 && okw1 ? sptr[(ih1 * IW + iw1) * C + c] \
: bval); \
float val = warp_out_converter( \
v00 * nalpha * nbeta + v01 * nalpha * pbeta + \
v10 * palpha * nbeta + v11 * palpha * pbeta); \
dst_ctype result; \
result = type_cvt(val); \
dst[oh * OW + ow] = result; \
dst += OH * OW; \
} \
} \
}
INST(float)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype,
typename SrcVisitor>
void dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, SrcVisitor src, const float* mat, dst_ctype* dst, int N,
int C, int IH, int IW, int OH, int OW, src_ctype bval,
CudaDTypeParamImpl<src_dtype> param, BorderMode bmode,
cudaStream_t stream) {
const int BY = 16, BX = 32;
CudaTypeCvt<src_dtype, dst_ctype> type_cvt(param);
#define DISPATCH(Getter) \
do { \
if (is_nhwc) { \
kern_general_quint8_nhwc_nchw<src_dtype, src_ctype, Getter, \
SrcVisitor> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, \
OH, OW, type_cvt); \
} else { \
kern_general_quint8_nchw<src_dtype, src_ctype, Getter, SrcVisitor> \
<<<blocks, threads, 0, stream>>>(src, mat, dst, C, IH, IW, \
OH, OW, type_cvt); \
} \
} while (0)
const int max_batch_size = 65535;
while (N) {
size_t curr_batch_size = N < max_batch_size ? N : max_batch_size;
dim3 threads(BX, BY);
dim3 blocks((OW + BX - 1) / BX, (OH + BY - 1) / BY, curr_batch_size);
switch (bmode) {
case BORDER_REPLICATE:
DISPATCH(ReplicateGetter);
break;
case BORDER_REFLECT:
DISPATCH(ReflectGetter);
break;
case BORDER_REFLECT_101:
DISPATCH(Reflect101Getter);
break;
case BORDER_WRAP:
DISPATCH(WrapGetter);
break;
#undef DISPATCH
case BORDER_CONSTANT:
if (is_nhwc) {
kern_const_border_quint8_nhwc_nchw<src_dtype, src_ctype,
SrcVisitor>
<<<blocks, threads, 0, stream>>>(src, mat, dst, C,
IH, IW, OH, OW,
bval, type_cvt);
} else {
kern_const_border_quint8_nchw<src_dtype, src_ctype,
SrcVisitor>
<<<blocks, threads, 0, stream>>>(src, mat, dst, C,
IH, IW, OH, OW,
bval, type_cvt);
}
break;
default:
break;
}
N -= curr_batch_size;
src.move_batch(curr_batch_size, C * IH * IW);
mat += curr_batch_size * 3 * 3;
dst += curr_batch_size * C * OH * OW;
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace warp_perspective {
template <typename ctype>
void forward_proxy(bool is_nhwc, const ctype* src, const float* mat,
const int* mat_idx, ctype* dst, int N_SRC, int N_MAT, int C,
int IH, int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info, void* error_tracker,
cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor(is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH,
OW, bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor(is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH,
OW, bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype, int pack_c>
void forward_proxy_nhwc_bit4(const ctype* src, const float* mat,
const int* mat_idx, ctype* dst, int N_SRC,
int N_MAT, int C, int IH, int IW, int OH, int OW,
ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info,
void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nhwc_bit4<ctype, IndexedSrcVisitor<ctype>,
pack_c>(visitor, mat, dst, N_MAT, C, IH,
IW, OH, OW, bval, bmode,
stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nhwc_bit4<ctype, DirectSrcVisitor<ctype>, pack_c>(
visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval, bmode,
stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw4(const ctype* src, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info,
void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw4(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw4(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
}
after_kernel_launch();
}
template <typename ctype>
void forward_proxy_nchw64(const ctype* src, const float* mat, const int* mat_idx,
ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, ctype bval, BorderMode bmode,
megcore::AsyncErrorInfo* error_info,
void* error_tracker, cudaStream_t stream) {
if (mat_idx) {
IndexedSrcVisitor<ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_nchw64(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
} else {
DirectSrcVisitor<ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_nchw64(visitor, mat, dst, N_MAT, C, IH, IW, OH, OW,
bval, bmode, stream);
}
after_kernel_launch();
}
#define INST(ctype) \
template void forward_proxy(bool, const ctype*, const float*, const int*, \
ctype*, int, int, int, int, int, int, int, \
ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, cudaStream_t);
INST(float)
INST(uint8_t)
#ifndef MEGDNN_DISABLE_FLOAT16
INST(dt_float16)
#endif
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw4( \
const ctype*, const float*, const int*, ctype*, int, int, int, \
int, int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, cudaStream_t);
INST(int8_t)
#undef INST
#define INST(ctype) \
template void forward_proxy_nchw64( \
const ctype*, const float*, const int*, ctype*, int, int, int, \
int, int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, cudaStream_t);
INST(dt_qint4)
INST(dt_quint4)
#undef INST
#define INST(ctype, pack_c) \
template void forward_proxy_nhwc_bit4<ctype, pack_c>( \
const ctype*, const float*, const int*, ctype*, int, int, int, \
int, int, int, int, ctype, BorderMode, megcore::AsyncErrorInfo*, \
void*, cudaStream_t);
INST(dt_qint4, 8)
INST(dt_quint4, 8)
INST(dt_qint4, 16)
INST(dt_quint4, 16)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw4(
bool is_nhwc, const src_ctype* src, const float* mat,
const int* mat_idx, dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, src_ctype bval, DTypeParamImpl<src_dtype> param,
BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, cudaStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw4(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw4( \
bool is_nhwc, const src_ctype*, const float*, const int*, \
dst_ctype*, int, int, int, int, int, int, int, src_ctype, \
DTypeParamImpl<src_dtype> param, BorderMode, \
megcore::AsyncErrorInfo*, void*, cudaStream_t);
INST(dt_quint8, uint8_t, int8_t)
#undef INST
template <typename src_dtype, typename src_ctype, typename dst_ctype>
void forward_proxy_quint8_dimshuffle_typecvt_nchw(
bool is_nhwc, const src_ctype* src, const float* mat,
const int* mat_idx, dst_ctype* dst, int N_SRC, int N_MAT, int C, int IH,
int IW, int OH, int OW, src_ctype bval, DTypeParamImpl<src_dtype> param,
BorderMode bmode, megcore::AsyncErrorInfo* error_info,
void* error_tracker, cudaStream_t stream) {
CudaDTypeParamImpl<src_dtype> dtype_param(param);
if (mat_idx) {
IndexedSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
visitor.idx = mat_idx;
visitor.N_SRC = N_SRC;
visitor.error_info = error_info;
visitor.error_tracker = error_tracker;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
} else {
DirectSrcVisitor<src_ctype> visitor;
visitor.ptr = src;
dispatch_with_visitor_quint8_dimshuffle_typecvt_nchw(
is_nhwc, visitor, mat, dst, N_MAT, C, IH, IW, OH, OW, bval,
dtype_param, bmode, stream);
}
after_kernel_launch();
}
#define INST(src_dtype, src_ctype, dst_ctype) \
template void forward_proxy_quint8_dimshuffle_typecvt_nchw( \
bool is_nhwc, const src_ctype*, const float*, const int*, \
dst_ctype*, int, int, int, int, int, int, int, src_ctype, \
DTypeParamImpl<src_dtype> param, BorderMode, \
megcore::AsyncErrorInfo*, void*, cudaStream_t);
INST(dt_quint8, uint8_t, float)
#undef INST
} // namespace warp_perspective
} // namespace cuda
} // namespace megdnn
// vim: syntax=cpp.doxygen
|
964027dcec4a8cb473433b1bee5662f9a0fcc624.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* nullKernelSync.cu
*
* Microbenchmark for throughput of synchronous kernel launch.
*
* Build with: nvcc -I ../chLib <options> nullKernelSync.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2014, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chError.h"
#include "chTimer.h"
__global__
void
NullKernel()
{
}
double
usPerLaunch( int cIterations )
{
hipError_t status;
double microseconds, ret;
chTimerTimestamp start, stop;
cuda(Free(0) );
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
hipLaunchKernelGGL(( NullKernel), dim3(1),dim3(1), 0, 0, );
cuda(ThreadSynchronize() );
}
chTimerGetTime( &stop );
microseconds = 1e6*chTimerElapsedTime( &start, &stop );
ret = microseconds / (float) cIterations;
Error:
return (status) ? 0.0 : ret;
}
int
main( int argc, char *argv[] )
{
const int cIterations = 100000;
printf( "Measuring synchronous launch time... " ); fflush( stdout );
printf( "%.2f us\n", usPerLaunch(cIterations) );
return 0;
}
| 964027dcec4a8cb473433b1bee5662f9a0fcc624.cu | /*
*
* nullKernelSync.cu
*
* Microbenchmark for throughput of synchronous kernel launch.
*
* Build with: nvcc -I ../chLib <options> nullKernelSync.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2014, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chError.h"
#include "chTimer.h"
__global__
void
NullKernel()
{
}
double
usPerLaunch( int cIterations )
{
cudaError_t status;
double microseconds, ret;
chTimerTimestamp start, stop;
cuda(Free(0) );
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
NullKernel<<<1,1>>>();
cuda(ThreadSynchronize() );
}
chTimerGetTime( &stop );
microseconds = 1e6*chTimerElapsedTime( &start, &stop );
ret = microseconds / (float) cIterations;
Error:
return (status) ? 0.0 : ret;
}
int
main( int argc, char *argv[] )
{
const int cIterations = 100000;
printf( "Measuring synchronous launch time... " ); fflush( stdout );
printf( "%.2f us\n", usPerLaunch(cIterations) );
return 0;
}
|
1cd35cf6706b8f9df3af9b639f29c46327555a9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "orttraining/training_ops/cuda/loss/softmax_cross_entropy_loss_impl.h"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename Tin>
__global__ void _ComputeWeightsSoftmaxCrossEntropy(
T* weight_data_nd,
const Tin* label_data,
const T* weight_data,
CUDA_LONG N_D,
CUDA_LONG C,
CUDA_LONG ignore_index) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D);
if (label_data[i] != ignore_index) {
CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < C);
weight_data_nd[i] = weight_data != nullptr ? weight_data[label_data[i]] : 1;
}
}
template <typename T, typename Tin>
void ComputeWeightsSoftmaxCrossEntropyImpl(
const Tin* label,
const T* weight,
size_t count,
size_t label_depth,
int64_t ignore_index,
T* weight_data_nd) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N_D = static_cast<CUDA_LONG>(count);
CUDA_LONG C = static_cast<CUDA_LONG>(label_depth);
CUDA_LONG II = static_cast<CUDA_LONG>(ignore_index);
hipLaunchKernelGGL(( _ComputeWeightsSoftmaxCrossEntropy<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
weight_data_nd,
label,
weight,
N_D,
C,
II);
}
template <typename T, typename Tin>
__global__ void _WeightedSoftmaxCrossEntropyLoss(
const T* log_prob_data,
const Tin* label_data,
const T* weight_data,
const T* normalize_factor_data,
T* output_data,
CUDA_LONG N_D,
CUDA_LONG C,
CUDA_LONG II) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D);
if (II == label_data[i]) {
output_data[i] = 0;
} else {
CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < C);
output_data[i] = -log_prob_data[i * C + label_data[i]] * weight_data[i] / (*normalize_factor_data);
}
}
template <typename T, typename Tin>
void SoftmaxCrossEntropyLossImpl(
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
size_t count,
size_t label_depth,
int64_t ignore_index,
T* output_data) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N_D = static_cast<CUDA_LONG>(count);
CUDA_LONG C = static_cast<CUDA_LONG>(label_depth);
CUDA_LONG II = static_cast<CUDA_LONG>(ignore_index);
hipLaunchKernelGGL(( _WeightedSoftmaxCrossEntropyLoss<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
log_prob,
label,
weight,
normalize_factor,
output_data,
N_D,
C,
II);
}
#define SPECIALIZED_IMPL_SoftMaxEntropyLossImpl(T, Tin) \
template void SoftmaxCrossEntropyLossImpl( \
const T* log_prob, \
const Tin* label, \
const T* weight, \
const T* normalize_factor, \
size_t count, \
size_t label_depth, \
int64_t ignore_index, \
T* output_data);
SPECIALIZED_IMPL_SoftMaxEntropyLossImpl(float, int32_t)
SPECIALIZED_IMPL_SoftMaxEntropyLossImpl(float, int64_t)
template <typename T, typename Tin>
__global__ void _WeightedSoftmaxCrossEntropyLossGrad(
const T* dY,
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
T* output_data,
CUDA_LONG N_D,
CUDA_LONG C) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D * C);
int row = i / C;
int d = i % C;
CUDA_KERNEL_ASSERT(weight[row] == 0 || (label[row] >= 0 && label[row] < C));
if(0 == *normalize_factor){
output_data[i] = 0;
} else {
output_data[i] = (*dY) * weight[row] * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor);
}
}
template <typename T, typename Tin>
__global__ void _WeightedReductionNoneSoftmaxCrossEntropyLossGrad(
const T* dY,
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
T* output_data,
CUDA_LONG N_D,
CUDA_LONG C) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D * C);
int row = i / C;
int d = i % C;
CUDA_KERNEL_ASSERT(weight[row] == 0 || (label[row] >= 0 && label[row] < C));
if(0 == *normalize_factor){
output_data[i] = 0;
} else {
output_data[i] = dY[row] * weight[row] * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor);
}
}
template <typename T, typename Tin>
void SoftmaxCrossEntropyLossGradImpl(
const T* dY,
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
size_t count,
size_t label_depth,
bool reduction_none,
T* output_data) {
CUDA_LONG N_D = static_cast<CUDA_LONG>(count);
CUDA_LONG C = static_cast<CUDA_LONG>(label_depth);
int blocksPerGrid = (int)(ceil(static_cast<float>(N_D * C) / GridDim::maxThreadsPerBlock));
if (reduction_none) {
hipLaunchKernelGGL(( _WeightedReductionNoneSoftmaxCrossEntropyLossGrad<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
dY,
log_prob,
label,
weight,
normalize_factor,
output_data,
N_D,
C);
} else {
hipLaunchKernelGGL(( _WeightedSoftmaxCrossEntropyLossGrad<T, Tin>), dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, 0,
dY,
log_prob,
label,
weight,
normalize_factor,
output_data,
N_D,
C);
}
}
#define SPECIALIZED_IMPL_SoftMaxEntropyLossGradImpl(T, Tin) \
template void SoftmaxCrossEntropyLossGradImpl( \
const T* dY, \
const T* log_prob, \
const Tin* label, \
const T* weight, \
const T* normalize_factor, \
size_t count, \
size_t label_depth, \
bool reducation_none, \
T* output_data);
SPECIALIZED_IMPL_SoftMaxEntropyLossGradImpl(float, int32_t)
SPECIALIZED_IMPL_SoftMaxEntropyLossGradImpl(float, int64_t)
#define SPECIALIZED_IMPL_ComputeWeightsSoftmaxCrossEntropyImpl(T, Tin) \
template void ComputeWeightsSoftmaxCrossEntropyImpl( \
const Tin* label, \
const T* weight, \
size_t count, \
size_t label_depth, \
int64_t ignore_index, \
T* weight_data_nd);
SPECIALIZED_IMPL_ComputeWeightsSoftmaxCrossEntropyImpl(float, int32_t)
SPECIALIZED_IMPL_ComputeWeightsSoftmaxCrossEntropyImpl(float, int64_t)
} // namespace cuda
} // namespace onnxruntime | 1cd35cf6706b8f9df3af9b639f29c46327555a9d.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include "core/providers/cuda/cu_inc/common.cuh"
#include "orttraining/training_ops/cuda/loss/softmax_cross_entropy_loss_impl.h"
#include "core/providers/cuda/cuda_common.h"
namespace onnxruntime {
namespace cuda {
template <typename T, typename Tin>
__global__ void _ComputeWeightsSoftmaxCrossEntropy(
T* weight_data_nd,
const Tin* label_data,
const T* weight_data,
CUDA_LONG N_D,
CUDA_LONG C,
CUDA_LONG ignore_index) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D);
if (label_data[i] != ignore_index) {
CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < C);
weight_data_nd[i] = weight_data != nullptr ? weight_data[label_data[i]] : 1;
}
}
template <typename T, typename Tin>
void ComputeWeightsSoftmaxCrossEntropyImpl(
const Tin* label,
const T* weight,
size_t count,
size_t label_depth,
int64_t ignore_index,
T* weight_data_nd) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N_D = static_cast<CUDA_LONG>(count);
CUDA_LONG C = static_cast<CUDA_LONG>(label_depth);
CUDA_LONG II = static_cast<CUDA_LONG>(ignore_index);
_ComputeWeightsSoftmaxCrossEntropy<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
weight_data_nd,
label,
weight,
N_D,
C,
II);
}
template <typename T, typename Tin>
__global__ void _WeightedSoftmaxCrossEntropyLoss(
const T* log_prob_data,
const Tin* label_data,
const T* weight_data,
const T* normalize_factor_data,
T* output_data,
CUDA_LONG N_D,
CUDA_LONG C,
CUDA_LONG II) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D);
if (II == label_data[i]) {
output_data[i] = 0;
} else {
CUDA_KERNEL_ASSERT(label_data[i] >= 0 && label_data[i] < C);
output_data[i] = -log_prob_data[i * C + label_data[i]] * weight_data[i] / (*normalize_factor_data);
}
}
template <typename T, typename Tin>
void SoftmaxCrossEntropyLossImpl(
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
size_t count,
size_t label_depth,
int64_t ignore_index,
T* output_data) {
int blocksPerGrid = (int)(ceil(static_cast<float>(count) / GridDim::maxThreadsPerBlock));
CUDA_LONG N_D = static_cast<CUDA_LONG>(count);
CUDA_LONG C = static_cast<CUDA_LONG>(label_depth);
CUDA_LONG II = static_cast<CUDA_LONG>(ignore_index);
_WeightedSoftmaxCrossEntropyLoss<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
log_prob,
label,
weight,
normalize_factor,
output_data,
N_D,
C,
II);
}
#define SPECIALIZED_IMPL_SoftMaxEntropyLossImpl(T, Tin) \
template void SoftmaxCrossEntropyLossImpl( \
const T* log_prob, \
const Tin* label, \
const T* weight, \
const T* normalize_factor, \
size_t count, \
size_t label_depth, \
int64_t ignore_index, \
T* output_data);
SPECIALIZED_IMPL_SoftMaxEntropyLossImpl(float, int32_t)
SPECIALIZED_IMPL_SoftMaxEntropyLossImpl(float, int64_t)
template <typename T, typename Tin>
__global__ void _WeightedSoftmaxCrossEntropyLossGrad(
const T* dY,
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
T* output_data,
CUDA_LONG N_D,
CUDA_LONG C) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D * C);
int row = i / C;
int d = i % C;
CUDA_KERNEL_ASSERT(weight[row] == 0 || (label[row] >= 0 && label[row] < C));
if(0 == *normalize_factor){
output_data[i] = 0;
} else {
output_data[i] = (*dY) * weight[row] * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor);
}
}
template <typename T, typename Tin>
__global__ void _WeightedReductionNoneSoftmaxCrossEntropyLossGrad(
const T* dY,
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
T* output_data,
CUDA_LONG N_D,
CUDA_LONG C) {
CALCULATE_ELEMENTWISE_INDEX_OR_EXIT(i, N_D * C);
int row = i / C;
int d = i % C;
CUDA_KERNEL_ASSERT(weight[row] == 0 || (label[row] >= 0 && label[row] < C));
if(0 == *normalize_factor){
output_data[i] = 0;
} else {
output_data[i] = dY[row] * weight[row] * (_Exp(log_prob[i]) - 1.0 * (d == label[row])) / (*normalize_factor);
}
}
template <typename T, typename Tin>
void SoftmaxCrossEntropyLossGradImpl(
const T* dY,
const T* log_prob,
const Tin* label,
const T* weight,
const T* normalize_factor,
size_t count,
size_t label_depth,
bool reduction_none,
T* output_data) {
CUDA_LONG N_D = static_cast<CUDA_LONG>(count);
CUDA_LONG C = static_cast<CUDA_LONG>(label_depth);
int blocksPerGrid = (int)(ceil(static_cast<float>(N_D * C) / GridDim::maxThreadsPerBlock));
if (reduction_none) {
_WeightedReductionNoneSoftmaxCrossEntropyLossGrad<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
dY,
log_prob,
label,
weight,
normalize_factor,
output_data,
N_D,
C);
} else {
_WeightedSoftmaxCrossEntropyLossGrad<T, Tin><<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0>>>(
dY,
log_prob,
label,
weight,
normalize_factor,
output_data,
N_D,
C);
}
}
#define SPECIALIZED_IMPL_SoftMaxEntropyLossGradImpl(T, Tin) \
template void SoftmaxCrossEntropyLossGradImpl( \
const T* dY, \
const T* log_prob, \
const Tin* label, \
const T* weight, \
const T* normalize_factor, \
size_t count, \
size_t label_depth, \
bool reducation_none, \
T* output_data);
SPECIALIZED_IMPL_SoftMaxEntropyLossGradImpl(float, int32_t)
SPECIALIZED_IMPL_SoftMaxEntropyLossGradImpl(float, int64_t)
#define SPECIALIZED_IMPL_ComputeWeightsSoftmaxCrossEntropyImpl(T, Tin) \
template void ComputeWeightsSoftmaxCrossEntropyImpl( \
const Tin* label, \
const T* weight, \
size_t count, \
size_t label_depth, \
int64_t ignore_index, \
T* weight_data_nd);
SPECIALIZED_IMPL_ComputeWeightsSoftmaxCrossEntropyImpl(float, int32_t)
SPECIALIZED_IMPL_ComputeWeightsSoftmaxCrossEntropyImpl(float, int64_t)
} // namespace cuda
} // namespace onnxruntime |
ead6988e19f2062e69d29c0f8ce59063550e0c0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) {
if (comp < var_1 + -1.9098E-13f - (-1.6590E-35f + var_2 / var_3)) {
comp += var_4 * var_5 + (-0.0f * (-1.9967E-41f / sinhf(-1.1047E-35f - (var_6 * var_7))));
float tmp_1 = -1.9860E-17f;
comp += tmp_1 + var_8 / sqrtf(acosf((-1.9410E-36f - (var_9 / var_10))));
if (comp < log10f(powf(+1.9282E36f * var_11 * (var_12 - (var_13 / +1.0302E-42f)), var_14 - (+0.0f * powf(-0.0f, -1.6385E0f))))) {
float tmp_2 = atanf(+1.4938E36f / atanf(+1.0958E-35f));
comp = tmp_2 - (+1.6552E36f * +0.0f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15);
hipDeviceSynchronize();
return 0;
}
| ead6988e19f2062e69d29c0f8ce59063550e0c0e.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14) {
if (comp < var_1 + -1.9098E-13f - (-1.6590E-35f + var_2 / var_3)) {
comp += var_4 * var_5 + (-0.0f * (-1.9967E-41f / sinhf(-1.1047E-35f - (var_6 * var_7))));
float tmp_1 = -1.9860E-17f;
comp += tmp_1 + var_8 / sqrtf(acosf((-1.9410E-36f - (var_9 / var_10))));
if (comp < log10f(powf(+1.9282E36f * var_11 * (var_12 - (var_13 / +1.0302E-42f)), var_14 - (+0.0f * powf(-0.0f, -1.6385E0f))))) {
float tmp_2 = atanf(+1.4938E36f / atanf(+1.0958E-35f));
comp = tmp_2 - (+1.6552E36f * +0.0f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15);
cudaDeviceSynchronize();
return 0;
}
|
0475d84613955476b2d0d25b2cd31fce9213ae6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
__constant__ float gaus[3][3] = {{0.0625f, 0.125f, 0.0625f}, {0.1250f, 0.250f, 0.1250f}, {0.0625f, 0.125f, 0.0625f}};
__constant__ int sobx[3][3] = {{-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1}};
__constant__ int soby[3][3] = {{-1, -2, -1}, {0, 0, 0}, {1, 2, 1}};
// https://github.com/smskelley/canny-opencl
// Gaussian Kernel
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
__global__ void gaussian_kernel(unsigned char *data, unsigned char *out, int rows, int cols) {
extern __shared__ int l_mem[];
int* l_data = l_mem;
const int L_SIZE = blockDim.x;
int sum = 0;
const int g_row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int g_col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int l_row = threadIdx.y + 1;
const int l_col = threadIdx.x + 1;
const int pos = g_row * cols + g_col;
// copy to local
l_data[l_row * (L_SIZE + 2) + l_col] = data[pos];
// top most row
if(l_row == 1) {
l_data[0 * (L_SIZE + 2) + l_col] = data[pos - cols];
// top left
if(l_col == 1)
l_data[0 * (L_SIZE + 2) + 0] = data[pos - cols - 1];
// top right
else if(l_col == L_SIZE)
l_data[0 * (L_SIZE + 2) + L_SIZE + 1] = data[pos - cols + 1];
}
// bottom most row
else if(l_row == L_SIZE) {
l_data[(L_SIZE + 1) * (L_SIZE + 2) + l_col] = data[pos + cols];
// bottom left
if(l_col == 1)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + 0] = data[pos + cols - 1];
// bottom right
else if(l_col == L_SIZE)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + L_SIZE + 1] = data[pos + cols + 1];
}
if(l_col == 1)
l_data[l_row * (L_SIZE + 2) + 0] = data[pos - 1];
else if(l_col == L_SIZE)
l_data[l_row * (L_SIZE + 2) + L_SIZE + 1] = data[pos + 1];
__syncthreads();
for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) {
sum += gaus[i][j] * l_data[(i + l_row - 1) * (L_SIZE + 2) + j + l_col - 1];
}
}
out[pos] = min(255, max(0, sum));
}
// Sobel kernel. Apply sobx and soby separately, then find the sqrt of their
// squares.
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
// theta: angle output data
__global__ void sobel_kernel(unsigned char *data, unsigned char *out, unsigned char *theta,
int rows, int cols) {
extern __shared__ int l_mem[];
int* l_data = l_mem;
// collect sums separately. we're storing them into floats because that
// is what hypot and atan2 will expect.
const int L_SIZE = blockDim.x;
const float PI = 3.14159265f;
const int g_row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int g_col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int l_row = threadIdx.y + 1;
const int l_col = threadIdx.x + 1;
const int pos = g_row * cols + g_col;
// copy to local
l_data[l_row * (L_SIZE + 2) + l_col] = data[pos];
// top most row
if(l_row == 1) {
l_data[0 * (L_SIZE + 2) + l_col] = data[pos - cols];
// top left
if(l_col == 1)
l_data[0 * (L_SIZE + 2) + 0] = data[pos - cols - 1];
// top right
else if(l_col == L_SIZE)
l_data[0 * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos - cols + 1];
}
// bottom most row
else if(l_row == L_SIZE) {
l_data[(L_SIZE + 1) * (L_SIZE + 2) + l_col] = data[pos + cols];
// bottom left
if(l_col == 1)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + 0] = data[pos + cols - 1];
// bottom right
else if(l_col == L_SIZE)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + cols + 1];
}
// left
if(l_col == 1)
l_data[l_row * (L_SIZE + 2) + 0] = data[pos - 1];
// right
else if(l_col == L_SIZE)
l_data[l_row * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + 1];
__syncthreads();
float sumx = 0, sumy = 0, angle = 0;
// find x and y derivatives
for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) {
sumx += sobx[i][j] * l_data[(i + l_row - 1) * (L_SIZE + 2) + j + l_col - 1];
sumy += soby[i][j] * l_data[(i + l_row - 1) * (L_SIZE + 2) + j + l_col - 1];
}
}
// The output is now the square root of their squares, but they are
// constrained to 0 <= value <= 255. Note that hypot is a built in function
// defined as: hypot(x,y) = sqrt(x*x, y*y).
out[pos] = min(255, max(0, (int)hypot(sumx, sumy)));
// Compute the direction angle theta in radians
// atan2 has a range of (-PI, PI) degrees
angle = atan2(sumy, sumx);
// If the angle is negative,
// shift the range to (0, 2PI) by adding 2PI to the angle,
// then perform modulo operation of 2PI
if(angle < 0) {
angle = fmod((angle + 2 * PI), (2 * PI));
}
// Round the angle to one of four possibilities: 0, 45, 90, 135 degrees
// then store it in the theta buffer at the proper position
//theta[pos] = ((int)(degrees(angle * (PI/8) + PI/8-0.0001) / 45) * 45) % 180;
if(angle <= PI / 8)
theta[pos] = 0;
else if(angle <= 3 * PI / 8)
theta[pos] = 45;
else if(angle <= 5 * PI / 8)
theta[pos] = 90;
else if(angle <= 7 * PI / 8)
theta[pos] = 135;
else if(angle <= 9 * PI / 8)
theta[pos] = 0;
else if(angle <= 11 * PI / 8)
theta[pos] = 45;
else if(angle <= 13 * PI / 8)
theta[pos] = 90;
else if(angle <= 15 * PI / 8)
theta[pos] = 135;
else
theta[pos] = 0; // (angle <= 16*PI/8)
}
// Non-maximum Supression Kernel
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
// theta: angle input data
__global__ void non_max_supp_kernel(unsigned char *data, unsigned char *out,
unsigned char *theta, int rows, int cols) {
extern __shared__ int l_mem[];
int* l_data = l_mem;
// These variables are offset by one to avoid seg. fault errors
// As such, this kernel ignores the outside ring of pixels
const int L_SIZE = blockDim.x;
const int g_row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int g_col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int l_row = threadIdx.y + 1;
const int l_col = threadIdx.x + 1;
const int pos = g_row * cols + g_col;
// copy to l_data
l_data[l_row * (L_SIZE + 2) + l_col] = data[pos];
// top most row
if(l_row == 1) {
l_data[0 * (L_SIZE + 2) + l_col] = data[pos - cols];
// top left
if(l_col == 1)
l_data[0 * (L_SIZE + 2) + 0] = data[pos - cols - 1];
// top right
else if(l_col == L_SIZE)
l_data[0 * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos - cols + 1];
}
// bottom most row
else if(l_row == L_SIZE) {
l_data[(L_SIZE + 1) * (L_SIZE + 2) + l_col] = data[pos + cols];
// bottom left
if(l_col == 1)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + 0] = data[pos + cols - 1];
// bottom right
else if(l_col == L_SIZE)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + cols + 1];
}
if(l_col == 1)
l_data[l_row * (L_SIZE + 2) + 0] = data[pos - 1];
else if(l_col == L_SIZE)
l_data[l_row * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + 1];
__syncthreads();
unsigned char my_magnitude = l_data[l_row * (L_SIZE + 2) + l_col];
// The following variables are used to address the matrices more easily
switch(theta[pos]) {
// A gradient angle of 0 degrees = an edge that is North/South
// Check neighbors to the East and West
case 0:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[l_row * (L_SIZE + 2) + l_col + 1] || // east
my_magnitude <= l_data[l_row * (L_SIZE + 2) + l_col - 1]) // west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 45 degrees = an edge that is NW/SE
// Check neighbors to the NE and SW
case 45:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col + 1] || // north east
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col - 1]) // south west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 90 degrees = an edge that is E/W
// Check neighbors to the North and South
case 90:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col] || // north
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col]) // south
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 135 degrees = an edge that is NE/SW
// Check neighbors to the NW and SE
case 135:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col - 1] || // north west
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col + 1]) // south east
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
default: out[pos] = my_magnitude; break;
}
}
// Hysteresis Threshold Kernel
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
__global__ void hyst_kernel(unsigned char *data, unsigned char *out, int rows, int cols) {
// Establish our high and low thresholds as floats
float lowThresh = 10;
float highThresh = 70;
// These variables are offset by one to avoid seg. fault errors
// As such, this kernel ignores the outside ring of pixels
const int row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int pos = row * cols + col;
const unsigned char EDGE = 255;
unsigned char magnitude = data[pos];
if(magnitude >= highThresh)
out[pos] = EDGE;
else if(magnitude <= lowThresh)
out[pos] = 0;
else {
float med = (highThresh + lowThresh) / 2;
if(magnitude >= med)
out[pos] = EDGE;
else
out[pos] = 0;
}
}
hipError_t call_gaussian_kernel(int threads, unsigned char *data, unsigned char *out,
int rows, int cols, int l_mem_size){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
hipLaunchKernelGGL(( gaussian_kernel), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, data, out, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
hipError_t err = hipGetLastError();
return err;
}
hipError_t call_sobel_kernel(int threads, unsigned char *data, unsigned char *out,
unsigned char *theta, int rows, int cols, int l_mem_size){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
hipLaunchKernelGGL(( sobel_kernel), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, data, out, theta, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
hipError_t err = hipGetLastError();
return err;
}
hipError_t call_non_max_supp_kernel(int threads, unsigned char *data, unsigned char *out,
unsigned char *theta, int rows, int cols, int l_mem_size){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
hipLaunchKernelGGL(( non_max_supp_kernel), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, data, out, theta, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
hipError_t err = hipGetLastError();
return err;
}
hipError_t call_hyst_kernel(int threads, unsigned char *data, unsigned char *out,
int rows, int cols){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
hipLaunchKernelGGL(( hyst_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, data, out, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
hipError_t err = hipGetLastError();
return err;
}
| 0475d84613955476b2d0d25b2cd31fce9213ae6d.cu | #ifdef ENABLE_CURD
#include<curd_lib_host.h>
#endif
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
__constant__ float gaus[3][3] = {{0.0625f, 0.125f, 0.0625f}, {0.1250f, 0.250f, 0.1250f}, {0.0625f, 0.125f, 0.0625f}};
__constant__ int sobx[3][3] = {{-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1}};
__constant__ int soby[3][3] = {{-1, -2, -1}, {0, 0, 0}, {1, 2, 1}};
// https://github.com/smskelley/canny-opencl
// Gaussian Kernel
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
__global__ void gaussian_kernel(unsigned char *data, unsigned char *out, int rows, int cols) {
extern __shared__ int l_mem[];
int* l_data = l_mem;
const int L_SIZE = blockDim.x;
int sum = 0;
const int g_row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int g_col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int l_row = threadIdx.y + 1;
const int l_col = threadIdx.x + 1;
const int pos = g_row * cols + g_col;
// copy to local
l_data[l_row * (L_SIZE + 2) + l_col] = data[pos];
// top most row
if(l_row == 1) {
l_data[0 * (L_SIZE + 2) + l_col] = data[pos - cols];
// top left
if(l_col == 1)
l_data[0 * (L_SIZE + 2) + 0] = data[pos - cols - 1];
// top right
else if(l_col == L_SIZE)
l_data[0 * (L_SIZE + 2) + L_SIZE + 1] = data[pos - cols + 1];
}
// bottom most row
else if(l_row == L_SIZE) {
l_data[(L_SIZE + 1) * (L_SIZE + 2) + l_col] = data[pos + cols];
// bottom left
if(l_col == 1)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + 0] = data[pos + cols - 1];
// bottom right
else if(l_col == L_SIZE)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + L_SIZE + 1] = data[pos + cols + 1];
}
if(l_col == 1)
l_data[l_row * (L_SIZE + 2) + 0] = data[pos - 1];
else if(l_col == L_SIZE)
l_data[l_row * (L_SIZE + 2) + L_SIZE + 1] = data[pos + 1];
__syncthreads();
for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) {
sum += gaus[i][j] * l_data[(i + l_row - 1) * (L_SIZE + 2) + j + l_col - 1];
}
}
out[pos] = min(255, max(0, sum));
}
// Sobel kernel. Apply sobx and soby separately, then find the sqrt of their
// squares.
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
// theta: angle output data
__global__ void sobel_kernel(unsigned char *data, unsigned char *out, unsigned char *theta,
int rows, int cols) {
extern __shared__ int l_mem[];
int* l_data = l_mem;
// collect sums separately. we're storing them into floats because that
// is what hypot and atan2 will expect.
const int L_SIZE = blockDim.x;
const float PI = 3.14159265f;
const int g_row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int g_col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int l_row = threadIdx.y + 1;
const int l_col = threadIdx.x + 1;
const int pos = g_row * cols + g_col;
// copy to local
l_data[l_row * (L_SIZE + 2) + l_col] = data[pos];
// top most row
if(l_row == 1) {
l_data[0 * (L_SIZE + 2) + l_col] = data[pos - cols];
// top left
if(l_col == 1)
l_data[0 * (L_SIZE + 2) + 0] = data[pos - cols - 1];
// top right
else if(l_col == L_SIZE)
l_data[0 * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos - cols + 1];
}
// bottom most row
else if(l_row == L_SIZE) {
l_data[(L_SIZE + 1) * (L_SIZE + 2) + l_col] = data[pos + cols];
// bottom left
if(l_col == 1)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + 0] = data[pos + cols - 1];
// bottom right
else if(l_col == L_SIZE)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + cols + 1];
}
// left
if(l_col == 1)
l_data[l_row * (L_SIZE + 2) + 0] = data[pos - 1];
// right
else if(l_col == L_SIZE)
l_data[l_row * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + 1];
__syncthreads();
float sumx = 0, sumy = 0, angle = 0;
// find x and y derivatives
for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) {
sumx += sobx[i][j] * l_data[(i + l_row - 1) * (L_SIZE + 2) + j + l_col - 1];
sumy += soby[i][j] * l_data[(i + l_row - 1) * (L_SIZE + 2) + j + l_col - 1];
}
}
// The output is now the square root of their squares, but they are
// constrained to 0 <= value <= 255. Note that hypot is a built in function
// defined as: hypot(x,y) = sqrt(x*x, y*y).
out[pos] = min(255, max(0, (int)hypot(sumx, sumy)));
// Compute the direction angle theta in radians
// atan2 has a range of (-PI, PI) degrees
angle = atan2(sumy, sumx);
// If the angle is negative,
// shift the range to (0, 2PI) by adding 2PI to the angle,
// then perform modulo operation of 2PI
if(angle < 0) {
angle = fmod((angle + 2 * PI), (2 * PI));
}
// Round the angle to one of four possibilities: 0, 45, 90, 135 degrees
// then store it in the theta buffer at the proper position
//theta[pos] = ((int)(degrees(angle * (PI/8) + PI/8-0.0001) / 45) * 45) % 180;
if(angle <= PI / 8)
theta[pos] = 0;
else if(angle <= 3 * PI / 8)
theta[pos] = 45;
else if(angle <= 5 * PI / 8)
theta[pos] = 90;
else if(angle <= 7 * PI / 8)
theta[pos] = 135;
else if(angle <= 9 * PI / 8)
theta[pos] = 0;
else if(angle <= 11 * PI / 8)
theta[pos] = 45;
else if(angle <= 13 * PI / 8)
theta[pos] = 90;
else if(angle <= 15 * PI / 8)
theta[pos] = 135;
else
theta[pos] = 0; // (angle <= 16*PI/8)
}
// Non-maximum Supression Kernel
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
// theta: angle input data
__global__ void non_max_supp_kernel(unsigned char *data, unsigned char *out,
unsigned char *theta, int rows, int cols) {
extern __shared__ int l_mem[];
int* l_data = l_mem;
// These variables are offset by one to avoid seg. fault errors
// As such, this kernel ignores the outside ring of pixels
const int L_SIZE = blockDim.x;
const int g_row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int g_col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int l_row = threadIdx.y + 1;
const int l_col = threadIdx.x + 1;
const int pos = g_row * cols + g_col;
// copy to l_data
l_data[l_row * (L_SIZE + 2) + l_col] = data[pos];
// top most row
if(l_row == 1) {
l_data[0 * (L_SIZE + 2) + l_col] = data[pos - cols];
// top left
if(l_col == 1)
l_data[0 * (L_SIZE + 2) + 0] = data[pos - cols - 1];
// top right
else if(l_col == L_SIZE)
l_data[0 * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos - cols + 1];
}
// bottom most row
else if(l_row == L_SIZE) {
l_data[(L_SIZE + 1) * (L_SIZE + 2) + l_col] = data[pos + cols];
// bottom left
if(l_col == 1)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + 0] = data[pos + cols - 1];
// bottom right
else if(l_col == L_SIZE)
l_data[(L_SIZE + 1) * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + cols + 1];
}
if(l_col == 1)
l_data[l_row * (L_SIZE + 2) + 0] = data[pos - 1];
else if(l_col == L_SIZE)
l_data[l_row * (L_SIZE + 2) + (L_SIZE + 1)] = data[pos + 1];
__syncthreads();
unsigned char my_magnitude = l_data[l_row * (L_SIZE + 2) + l_col];
// The following variables are used to address the matrices more easily
switch(theta[pos]) {
// A gradient angle of 0 degrees = an edge that is North/South
// Check neighbors to the East and West
case 0:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[l_row * (L_SIZE + 2) + l_col + 1] || // east
my_magnitude <= l_data[l_row * (L_SIZE + 2) + l_col - 1]) // west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 45 degrees = an edge that is NW/SE
// Check neighbors to the NE and SW
case 45:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col + 1] || // north east
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col - 1]) // south west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 90 degrees = an edge that is E/W
// Check neighbors to the North and South
case 90:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col] || // north
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col]) // south
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
// A gradient angle of 135 degrees = an edge that is NE/SW
// Check neighbors to the NW and SE
case 135:
// supress me if my neighbor has larger magnitude
if(my_magnitude <= l_data[(l_row - 1) * (L_SIZE + 2) + l_col - 1] || // north west
my_magnitude <= l_data[(l_row + 1) * (L_SIZE + 2) + l_col + 1]) // south east
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else {
out[pos] = my_magnitude;
}
break;
default: out[pos] = my_magnitude; break;
}
}
// Hysteresis Threshold Kernel
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
__global__ void hyst_kernel(unsigned char *data, unsigned char *out, int rows, int cols) {
// Establish our high and low thresholds as floats
float lowThresh = 10;
float highThresh = 70;
// These variables are offset by one to avoid seg. fault errors
// As such, this kernel ignores the outside ring of pixels
const int row = blockIdx.y * blockDim.y + threadIdx.y + 1;
const int col = blockIdx.x * blockDim.x + threadIdx.x + 1;
const int pos = row * cols + col;
const unsigned char EDGE = 255;
unsigned char magnitude = data[pos];
if(magnitude >= highThresh)
out[pos] = EDGE;
else if(magnitude <= lowThresh)
out[pos] = 0;
else {
float med = (highThresh + lowThresh) / 2;
if(magnitude >= med)
out[pos] = EDGE;
else
out[pos] = 0;
}
}
cudaError_t call_gaussian_kernel(int threads, unsigned char *data, unsigned char *out,
int rows, int cols, int l_mem_size){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
gaussian_kernel<<<dimGrid, dimBlock, l_mem_size>>>(data, out, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
cudaError_t err = cudaGetLastError();
return err;
}
cudaError_t call_sobel_kernel(int threads, unsigned char *data, unsigned char *out,
unsigned char *theta, int rows, int cols, int l_mem_size){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
sobel_kernel<<<dimGrid, dimBlock, l_mem_size>>>(data, out, theta, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
cudaError_t err = cudaGetLastError();
return err;
}
cudaError_t call_non_max_supp_kernel(int threads, unsigned char *data, unsigned char *out,
unsigned char *theta, int rows, int cols, int l_mem_size){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
non_max_supp_kernel<<<dimGrid, dimBlock, l_mem_size>>>(data, out, theta, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
cudaError_t err = cudaGetLastError();
return err;
}
cudaError_t call_hyst_kernel(int threads, unsigned char *data, unsigned char *out,
int rows, int cols){
dim3 dimGrid((cols-2)/threads, (rows-2)/threads);
dim3 dimBlock(threads, threads);
#ifdef ENABLE_CURD
allocateReadWriteSets(dimGrid, dimBlock);
#endif
hyst_kernel<<<dimGrid, dimBlock>>>(data, out, rows, cols);
#ifdef ENABLE_CURD
freeReadWriteSets(dimGrid, dimBlock);
#endif
cudaError_t err = cudaGetLastError();
return err;
}
|
bd62272ec92ca200a4bbc516d00b2f0eae583210.hip | // !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <hip/hip_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
hipDeviceProp_t deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(hipGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
int n = 512 ;// * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
checkCudaErrors(hipHostMalloc((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(hipMalloc((void **)&d_a, nbytes));
checkCudaErrors(hipMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(hipDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
hipEventRecord(start, 0);
hipMemcpyAsync(d_a, a, nbytes, hipMemcpyHostToDevice, 0);
hipLaunchKernelGGL(( increment_kernel), dim3(blocks), dim3(threads), 0, 0, d_a, value);
hipMemcpyAsync(a, d_a, nbytes, hipMemcpyDeviceToHost, 0);
hipEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (hipEventQuery(stop) == hipErrorNotReady)
{
counter++;
}
checkCudaErrors(hipEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = correct_output(a, n, value);
// release resources
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
checkCudaErrors(hipHostFree(a));
checkCudaErrors(hipFree(d_a));
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
| bd62272ec92ca200a4bbc516d00b2f0eae583210.cu | ////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
//
// This sample illustrates the usage of CUDA events for both GPU timing and
// overlapping CPU and GPU execution. Events are inserted into a stream
// of CUDA calls. Since CUDA stream calls are asynchronous, the CPU can
// perform computations while GPU is executing (including DMA memcopies
// between the host and device). CPU can query CUDA events to determine
// whether GPU has completed tasks.
//
// includes, system
#include <stdio.h>
// includes CUDA Runtime
#include <cuda_runtime.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper utility functions
__global__ void increment_kernel(int *g_data, int inc_value)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = g_data[idx] + inc_value;
}
bool correct_output(int *data, const int n, const int x)
{
for (int i = 0; i < n; i++)
if (data[i] != x)
{
printf("Error! data[%d] = %d, ref = %d\n", i, data[i], x);
return false;
}
return true;
}
int main(int argc, char *argv[])
{
int devID;
cudaDeviceProp deviceProps;
printf("[%s] - Starting...\n", argv[0]);
// This will pick the best possible CUDA capable device
devID = findCudaDevice(argc, (const char **)argv);
// get device name
checkCudaErrors(cudaGetDeviceProperties(&deviceProps, devID));
printf("CUDA device [%s]\n", deviceProps.name);
int n = 512 ;// * 1024;
int nbytes = n * sizeof(int);
int value = 26;
// allocate host memory
int *a = 0;
checkCudaErrors(cudaMallocHost((void **)&a, nbytes));
memset(a, 0, nbytes);
// allocate device memory
int *d_a=0;
checkCudaErrors(cudaMalloc((void **)&d_a, nbytes));
checkCudaErrors(cudaMemset(d_a, 255, nbytes));
// set kernel launch configuration
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// create cuda event handles
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
checkCudaErrors(cudaDeviceSynchronize());
float gpu_time = 0.0f;
// asynchronously issue work to the GPU (all to stream 0)
sdkStartTimer(&timer);
cudaEventRecord(start, 0);
cudaMemcpyAsync(d_a, a, nbytes, cudaMemcpyHostToDevice, 0);
increment_kernel<<<blocks, threads, 0, 0>>>(d_a, value);
cudaMemcpyAsync(a, d_a, nbytes, cudaMemcpyDeviceToHost, 0);
cudaEventRecord(stop, 0);
sdkStopTimer(&timer);
// have CPU do some work while waiting for stage 1 to finish
unsigned long int counter=0;
while (cudaEventQuery(stop) == cudaErrorNotReady)
{
counter++;
}
checkCudaErrors(cudaEventElapsedTime(&gpu_time, start, stop));
// print the cpu and gpu times
printf("time spent executing by the GPU: %.2f\n", gpu_time);
printf("time spent by CPU in CUDA calls: %.2f\n", sdkGetTimerValue(&timer));
printf("CPU executed %lu iterations while waiting for GPU to finish\n", counter);
// check the output for correctness
bool bFinalResults = correct_output(a, n, value);
// release resources
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
checkCudaErrors(cudaFreeHost(a));
checkCudaErrors(cudaFree(d_a));
exit(bFinalResults ? EXIT_SUCCESS : EXIT_FAILURE);
}
|
52788a24936c5d69d83b01022b6984a84225b817.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#ifndef GPU_POOLING_AVG
#define GPU_POOLING_AVG
#include <limits>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include "gpu.cuh"
#include "pooling_avg.cuh"
#include "utils.hpp"
template <typename Dtype>
__global__ void fill(const int n, Dtype *in_feat, Dtype val) {
CUDA_KERNEL_LOOP(index, n) { in_feat[index] = val; }
}
template <typename Dtype>
__global__ void col2row_major(const int n, const int nrows, const int ncols,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
rowA[i * ncols + j] = colA[index];
}
}
template <typename Dtype>
__global__ void col2row_major_with_div(const int n, const int nrows,
const int ncols,
const Dtype *num_nonzero,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
if (num_nonzero[i]) {
rowA[i * ncols + j] = colA[index] / num_nonzero[i];
} else {
rowA[i * ncols + j] = colA[index];
}
}
}
template <typename Dtype, typename Itype>
__global__ void set_gradient(const int n, const Dtype *d_grad_out,
Dtype *d_grad_in, const Itype *out_index,
int nchannel) {
CUDA_KERNEL_LOOP(index, n) {
atomicAdd(&d_grad_in[out_index[index]], d_grad_out[index]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero_avg(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Dtype *d_num_nonzero,
const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
int curr_num_nonzero = d_num_nonzero[out_map[nrow]];
if (curr_num_nonzero > 0)
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch] / curr_num_nonzero);
}
}
template <typename Dtype, typename Itype>
void NonzeroAvgPoolingForwardKernelGPU(
const Dtype *d_in_feat, int in_nrows, Dtype *d_out_feat, int out_nrows,
Dtype *d_num_nonzero, int nchannel,
const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, bool use_avg,
hipsparseHandle_t cushandle, hipStream_t stream) {
int nnz = 0;
const Dtype alpha = 1;
const Dtype beta = 0;
hipsparseMatDescr_t descr = 0;
Itype *d_in_map, *d_out_map, *d_csr_row;
Dtype *d_ones, *d_csr_val, *d_tmp_out_feat;
// Copy all maps to one vector
for (auto map : in_maps)
nnz += map.size();
CUDA_CHECK(hipMalloc((void **)&d_in_map,
(2 * nnz + out_nrows + 1) * sizeof(Itype)));
d_out_map = d_in_map + nnz;
d_csr_row = d_out_map + nnz;
Itype *d_in_map_iter = d_in_map, *d_out_map_iter = d_out_map;
for (int k = 0; k < in_maps.size(); k++) {
int curr_n = in_maps[k].size();
if (curr_n > 0) {
CUDA_CHECK(hipMemcpy(d_in_map_iter, in_maps[k].data(),
sizeof(Itype) * curr_n, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_out_map_iter, out_maps[k].data(),
sizeof(Itype) * curr_n, hipMemcpyHostToDevice));
d_in_map_iter += curr_n;
d_out_map_iter += curr_n;
}
}
if (use_avg) {
CUDA_CHECK(
hipMalloc((void **)&d_ones,
(in_nrows + nnz + nchannel * out_nrows) * sizeof(Dtype)));
d_csr_val = d_ones + in_nrows;
d_tmp_out_feat = d_csr_val + nnz;
hipLaunchKernelGGL(( fill<Dtype>), dim3(GET_BLOCKS(in_nrows)), dim3(CUDA_NUM_THREADS), 0, stream,
in_nrows, d_ones, (Dtype)1.);
} else {
CUDA_CHECK(hipMalloc((void **)&d_ones,
(nnz + nchannel * out_nrows) * sizeof(Dtype)));
d_csr_val = d_ones;
d_tmp_out_feat = d_csr_val + nnz;
}
// CUDA_CHECK(hipMalloc((void **)&d_ones, in_nrows * sizeof(Dtype)));
// CUDA_CHECK(hipMalloc((void **)&d_csr_val, nnz * sizeof(Dtype)));
// CUDA_CHECK(hipMalloc((void **)&d_tmp_out_feat,
// nchannel * out_nrows * sizeof(Dtype)));
hipLaunchKernelGGL(( fill<Dtype>), dim3(GET_BLOCKS(nnz)), dim3(CUDA_NUM_THREADS), 0, stream, nnz, d_csr_val,
(Dtype)1.);
CUSPARSE_CHECK(hipsparseCreateMatDescr(&descr));
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
// Sort COO first
sort_coo_gpu(cushandle, out_nrows, in_nrows, nnz, d_out_map, d_in_map);
// For CRS, sort row and col inds by row major.
CUSPARSE_CHECK(hipsparseXcoo2csr(cushandle, d_out_map, nnz, out_nrows,
d_csr_row, HIPSPARSE_INDEX_BASE_ZERO));
CUSPARSE_CHECK(
cusparse_csrmm<Dtype>(cushandle,
HIPSPARSE_OPERATION_NON_TRANSPOSE, // op(A)
HIPSPARSE_OPERATION_TRANSPOSE, // op(B)
out_nrows, // M
nchannel, // N
in_nrows, // K
nnz, &alpha, descr,
d_csr_val, // val
d_csr_row, // row
d_in_map, // col
d_in_feat, // B
nchannel, // ldb
&beta,
d_tmp_out_feat, // C
out_nrows // ldc
));
if (use_avg) {
CUSPARSE_CHECK(
cusparse_csrmv<Dtype>(cushandle,
HIPSPARSE_OPERATION_NON_TRANSPOSE, // op(A)
out_nrows, // M
in_nrows, // K
nnz, &alpha, descr,
d_csr_val, // val
d_csr_row, // row
d_in_map, // col
d_ones, // B (in_nrows > out_nrows)
&beta,
d_num_nonzero)); // C
hipLaunchKernelGGL(( col2row_major_with_div<Dtype>)
, dim3(GET_BLOCKS(out_nrows * nchannel)), dim3(CUDA_NUM_THREADS), 0, stream,
out_nrows * nchannel, out_nrows, nchannel, d_num_nonzero,
d_tmp_out_feat, d_out_feat);
} else {
hipLaunchKernelGGL(( col2row_major<Dtype>)
, dim3(GET_BLOCKS(out_nrows * nchannel)), dim3(CUDA_NUM_THREADS), 0, stream,
out_nrows * nchannel, out_nrows, nchannel, d_tmp_out_feat,
d_out_feat);
}
CUSPARSE_CHECK(hipsparseDestroyMatDescr(descr));
hipFree(d_in_map);
hipFree(d_ones);
}
template void NonzeroAvgPoolingForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nrows, float *d_out_feat, int out_nrows,
float *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
hipsparseHandle_t cushandle, hipStream_t stream);
template void NonzeroAvgPoolingForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nrows, double *d_out_feat, int out_nrows,
double *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
hipsparseHandle_t cushandle, hipStream_t stream);
template <typename Dtype, typename Itype>
void NonzeroAvgPoolingBackwardKernelGPU(
Dtype *d_grad_in_feat, int in_nrows, const Dtype *d_grad_out_feat,
int out_nrows, const Dtype *d_num_nonzero, int nchannel,
const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, bool use_avg,
hipStream_t stream) {
int nnz = 0;
Itype *d_in_map, *d_out_map;
// Copy all maps to one vector
for (auto map : in_maps)
nnz += map.size();
CUDA_CHECK(hipMalloc((void **)&d_in_map, 2 * nnz * sizeof(Itype)));
d_out_map = d_in_map + nnz;
// Cleanup gradients
CUDA_CHECK(
hipMemset(d_grad_in_feat, 0, in_nrows * nchannel * sizeof(Dtype)));
Itype *d_in_map_iter = d_in_map, *d_out_map_iter = d_out_map;
for (int k = 0; k < in_maps.size(); k++) {
int curr_n = in_maps[k].size();
if (curr_n > 0) {
CUDA_CHECK(hipMemcpy(d_in_map_iter, in_maps[k].data(),
sizeof(Itype) * curr_n, hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(d_out_map_iter, out_maps[k].data(),
sizeof(Itype) * curr_n, hipMemcpyHostToDevice));
d_in_map_iter += curr_n;
d_out_map_iter += curr_n;
}
}
if (use_avg) {
hipLaunchKernelGGL(( set_gradient_nonzero_avg<Dtype>)
, dim3(GET_BLOCKS(nnz * nchannel)), dim3(CUDA_NUM_THREADS), 0, stream,
nnz * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel,
d_num_nonzero, d_in_map, d_out_map);
} else {
hipLaunchKernelGGL(( set_gradient_nonzero<Dtype>)
, dim3(GET_BLOCKS(nnz * nchannel)), dim3(CUDA_NUM_THREADS), 0, stream,
nnz * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel, d_in_map,
d_out_map);
}
hipFree(d_in_map);
}
template void NonzeroAvgPoolingBackwardKernelGPU<float, int32_t>(
float *d_grad_in_feat, int in_nrows, const float *d_grad_out_feat,
int out_nrows, const float *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
hipStream_t stream);
template void NonzeroAvgPoolingBackwardKernelGPU<double, int32_t>(
double *d_grad_in_feat, int in_nrows, const double *d_grad_out_feat,
int out_nrows, const double *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
hipStream_t stream);
#endif
| 52788a24936c5d69d83b01022b6984a84225b817.cu | /* Copyright (c) Chris Choy ([email protected]).
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#ifndef GPU_POOLING_AVG
#define GPU_POOLING_AVG
#include <limits>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/fill.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include "gpu.cuh"
#include "pooling_avg.cuh"
#include "utils.hpp"
template <typename Dtype>
__global__ void fill(const int n, Dtype *in_feat, Dtype val) {
CUDA_KERNEL_LOOP(index, n) { in_feat[index] = val; }
}
template <typename Dtype>
__global__ void col2row_major(const int n, const int nrows, const int ncols,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
rowA[i * ncols + j] = colA[index];
}
}
template <typename Dtype>
__global__ void col2row_major_with_div(const int n, const int nrows,
const int ncols,
const Dtype *num_nonzero,
const Dtype *colA, Dtype *rowA) {
int i, j;
CUDA_KERNEL_LOOP(index, n) {
i = index % nrows;
j = index / nrows;
if (num_nonzero[i]) {
rowA[i * ncols + j] = colA[index] / num_nonzero[i];
} else {
rowA[i * ncols + j] = colA[index];
}
}
}
template <typename Dtype, typename Itype>
__global__ void set_gradient(const int n, const Dtype *d_grad_out,
Dtype *d_grad_in, const Itype *out_index,
int nchannel) {
CUDA_KERNEL_LOOP(index, n) {
atomicAdd(&d_grad_in[out_index[index]], d_grad_out[index]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch]);
}
}
template <typename Dtype, typename Itype>
__global__ void
set_gradient_nonzero_avg(const int n, const Dtype *d_grad_out, Dtype *d_grad_in,
int nchannel, const Dtype *d_num_nonzero,
const Itype *in_map, const Itype *out_map) {
CUDA_KERNEL_LOOP(index, n) {
int nrow = index / nchannel;
int ch = index % nchannel;
int curr_num_nonzero = d_num_nonzero[out_map[nrow]];
if (curr_num_nonzero > 0)
atomicAdd(&d_grad_in[in_map[nrow] * nchannel + ch],
d_grad_out[out_map[nrow] * nchannel + ch] / curr_num_nonzero);
}
}
template <typename Dtype, typename Itype>
void NonzeroAvgPoolingForwardKernelGPU(
const Dtype *d_in_feat, int in_nrows, Dtype *d_out_feat, int out_nrows,
Dtype *d_num_nonzero, int nchannel,
const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, bool use_avg,
cusparseHandle_t cushandle, cudaStream_t stream) {
int nnz = 0;
const Dtype alpha = 1;
const Dtype beta = 0;
cusparseMatDescr_t descr = 0;
Itype *d_in_map, *d_out_map, *d_csr_row;
Dtype *d_ones, *d_csr_val, *d_tmp_out_feat;
// Copy all maps to one vector
for (auto map : in_maps)
nnz += map.size();
CUDA_CHECK(cudaMalloc((void **)&d_in_map,
(2 * nnz + out_nrows + 1) * sizeof(Itype)));
d_out_map = d_in_map + nnz;
d_csr_row = d_out_map + nnz;
Itype *d_in_map_iter = d_in_map, *d_out_map_iter = d_out_map;
for (int k = 0; k < in_maps.size(); k++) {
int curr_n = in_maps[k].size();
if (curr_n > 0) {
CUDA_CHECK(cudaMemcpy(d_in_map_iter, in_maps[k].data(),
sizeof(Itype) * curr_n, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_out_map_iter, out_maps[k].data(),
sizeof(Itype) * curr_n, cudaMemcpyHostToDevice));
d_in_map_iter += curr_n;
d_out_map_iter += curr_n;
}
}
if (use_avg) {
CUDA_CHECK(
cudaMalloc((void **)&d_ones,
(in_nrows + nnz + nchannel * out_nrows) * sizeof(Dtype)));
d_csr_val = d_ones + in_nrows;
d_tmp_out_feat = d_csr_val + nnz;
fill<Dtype><<<GET_BLOCKS(in_nrows), CUDA_NUM_THREADS, 0, stream>>>(
in_nrows, d_ones, (Dtype)1.);
} else {
CUDA_CHECK(cudaMalloc((void **)&d_ones,
(nnz + nchannel * out_nrows) * sizeof(Dtype)));
d_csr_val = d_ones;
d_tmp_out_feat = d_csr_val + nnz;
}
// CUDA_CHECK(cudaMalloc((void **)&d_ones, in_nrows * sizeof(Dtype)));
// CUDA_CHECK(cudaMalloc((void **)&d_csr_val, nnz * sizeof(Dtype)));
// CUDA_CHECK(cudaMalloc((void **)&d_tmp_out_feat,
// nchannel * out_nrows * sizeof(Dtype)));
fill<Dtype><<<GET_BLOCKS(nnz), CUDA_NUM_THREADS, 0, stream>>>(nnz, d_csr_val,
(Dtype)1.);
CUSPARSE_CHECK(cusparseCreateMatDescr(&descr));
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
// Sort COO first
sort_coo_gpu(cushandle, out_nrows, in_nrows, nnz, d_out_map, d_in_map);
// For CRS, sort row and col inds by row major.
CUSPARSE_CHECK(cusparseXcoo2csr(cushandle, d_out_map, nnz, out_nrows,
d_csr_row, CUSPARSE_INDEX_BASE_ZERO));
CUSPARSE_CHECK(
cusparse_csrmm<Dtype>(cushandle,
CUSPARSE_OPERATION_NON_TRANSPOSE, // op(A)
CUSPARSE_OPERATION_TRANSPOSE, // op(B)
out_nrows, // M
nchannel, // N
in_nrows, // K
nnz, &alpha, descr,
d_csr_val, // val
d_csr_row, // row
d_in_map, // col
d_in_feat, // B
nchannel, // ldb
&beta,
d_tmp_out_feat, // C
out_nrows // ldc
));
if (use_avg) {
CUSPARSE_CHECK(
cusparse_csrmv<Dtype>(cushandle,
CUSPARSE_OPERATION_NON_TRANSPOSE, // op(A)
out_nrows, // M
in_nrows, // K
nnz, &alpha, descr,
d_csr_val, // val
d_csr_row, // row
d_in_map, // col
d_ones, // B (in_nrows > out_nrows)
&beta,
d_num_nonzero)); // C
col2row_major_with_div<Dtype>
<<<GET_BLOCKS(out_nrows * nchannel), CUDA_NUM_THREADS, 0, stream>>>(
out_nrows * nchannel, out_nrows, nchannel, d_num_nonzero,
d_tmp_out_feat, d_out_feat);
} else {
col2row_major<Dtype>
<<<GET_BLOCKS(out_nrows * nchannel), CUDA_NUM_THREADS, 0, stream>>>(
out_nrows * nchannel, out_nrows, nchannel, d_tmp_out_feat,
d_out_feat);
}
CUSPARSE_CHECK(cusparseDestroyMatDescr(descr));
cudaFree(d_in_map);
cudaFree(d_ones);
}
template void NonzeroAvgPoolingForwardKernelGPU<float, int32_t>(
const float *d_in_feat, int in_nrows, float *d_out_feat, int out_nrows,
float *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
cusparseHandle_t cushandle, cudaStream_t stream);
template void NonzeroAvgPoolingForwardKernelGPU<double, int32_t>(
const double *d_in_feat, int in_nrows, double *d_out_feat, int out_nrows,
double *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
cusparseHandle_t cushandle, cudaStream_t stream);
template <typename Dtype, typename Itype>
void NonzeroAvgPoolingBackwardKernelGPU(
Dtype *d_grad_in_feat, int in_nrows, const Dtype *d_grad_out_feat,
int out_nrows, const Dtype *d_num_nonzero, int nchannel,
const std::vector<std::vector<Itype>> &in_maps,
const std::vector<std::vector<Itype>> &out_maps, bool use_avg,
cudaStream_t stream) {
int nnz = 0;
Itype *d_in_map, *d_out_map;
// Copy all maps to one vector
for (auto map : in_maps)
nnz += map.size();
CUDA_CHECK(cudaMalloc((void **)&d_in_map, 2 * nnz * sizeof(Itype)));
d_out_map = d_in_map + nnz;
// Cleanup gradients
CUDA_CHECK(
cudaMemset(d_grad_in_feat, 0, in_nrows * nchannel * sizeof(Dtype)));
Itype *d_in_map_iter = d_in_map, *d_out_map_iter = d_out_map;
for (int k = 0; k < in_maps.size(); k++) {
int curr_n = in_maps[k].size();
if (curr_n > 0) {
CUDA_CHECK(cudaMemcpy(d_in_map_iter, in_maps[k].data(),
sizeof(Itype) * curr_n, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_out_map_iter, out_maps[k].data(),
sizeof(Itype) * curr_n, cudaMemcpyHostToDevice));
d_in_map_iter += curr_n;
d_out_map_iter += curr_n;
}
}
if (use_avg) {
set_gradient_nonzero_avg<Dtype>
<<<GET_BLOCKS(nnz * nchannel), CUDA_NUM_THREADS, 0, stream>>>(
nnz * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel,
d_num_nonzero, d_in_map, d_out_map);
} else {
set_gradient_nonzero<Dtype>
<<<GET_BLOCKS(nnz * nchannel), CUDA_NUM_THREADS, 0, stream>>>(
nnz * nchannel, d_grad_out_feat, d_grad_in_feat, nchannel, d_in_map,
d_out_map);
}
cudaFree(d_in_map);
}
template void NonzeroAvgPoolingBackwardKernelGPU<float, int32_t>(
float *d_grad_in_feat, int in_nrows, const float *d_grad_out_feat,
int out_nrows, const float *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
cudaStream_t stream);
template void NonzeroAvgPoolingBackwardKernelGPU<double, int32_t>(
double *d_grad_in_feat, int in_nrows, const double *d_grad_out_feat,
int out_nrows, const double *d_num_nonzero, int nchannel,
const std::vector<std::vector<int32_t>> &in_map,
const std::vector<std::vector<int32_t>> &out_map, bool use_avg,
cudaStream_t stream);
#endif
|
a986f580183f05cbdf7e8410056cbaf0756ee9ff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#if defined(PADDLE_WITH_CUDA)
#include <hip/hip_fp16.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using float16 = phi::dtype::float16;
template <typename T>
static __device__ __forceinline__ T Relu(T x) {
return static_cast<T>(fmaxf(0.f, x));
}
static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T& first, const T& second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<T>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T shared_mem[BlockDim + 2];
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
T* save_ptr = shared_mem;
T sum_i = 0;
T square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = out[index];
// Add bias
T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
T tmp_3 = tmp_2 + y[index];
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += tmp_3;
square_sum_i += (tmp_3 * tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<T>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<T>());
if (threadIdx.x == 0) {
T mean_i = static_cast<T>(pair.first_ / N);
T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i);
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
T mean_i = shared_mem[BlockDim];
T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon));
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const float16* y_data,
const float16* bias_0_data,
const float16* bias_1_data,
const float16* scale_data,
float16* out_data,
float16* mean_data,
float16* variance_data,
int M,
int N,
float epsilon) {
#if defined(PADDLE_WITH_CUDA)
const half* y = reinterpret_cast<const half*>(y_data);
const half* bias_0 = reinterpret_cast<const half*>(bias_0_data);
const half* bias_1 = reinterpret_cast<const half*>(bias_1_data);
const half* scale = reinterpret_cast<const half*>(scale_data);
half* out = reinterpret_cast<half*>(out_data);
half* mean = reinterpret_cast<half*>(mean_data);
half* variance = reinterpret_cast<half*>(variance_data);
#else
const float16* y = y_data;
const float16* bias_0 = bias_0_data;
const float16* bias_1 = bias_1_data;
const float16* scale = scale_data;
float16* out = out_data;
float16* mean = mean_data;
float16* variance = variance_data;
#endif
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<float>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
#if defined(PADDLE_WITH_CUDA)
__shared__ half shared_mem[BlockDim + 2];
#else
__shared__ float16 shared_mem[BlockDim + 2];
#endif
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
#if defined(PADDLE_WITH_CUDA)
half* save_ptr = shared_mem;
#else
float16* save_ptr = shared_mem;
#endif
float sum_i = 0;
float square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
half tmp_0 = out[index];
// Add bias
half tmp_1;
if (bias_0 != nullptr) {
tmp_1 = __hadd(tmp_0, bias_0[j]);
} else {
tmp_1 = tmp_0;
}
// Relu
half tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
half tmp_3 = __hadd(tmp_2, y[index]);
#else
float16 tmp_0 = out[index];
// Add bias
float16 tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
float16 tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
float16 tmp_3 = tmp_2 + y[index];
#endif
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += static_cast<float>(tmp_3);
#if defined(PADDLE_WITH_CUDA) && __CUDA_ARCH__ >= 530
square_sum_i += static_cast<float>(__hmul(tmp_3, tmp_3));
#elif defined(PADDLE_WITH_CUDA)
square_sum_i += static_cast<float>(tmp_3) * static_cast<float>(tmp_3);
#else
square_sum_i += static_cast<float>(tmp_3 * tmp_3);
#endif
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<float>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<float>());
if (threadIdx.x == 0) {
#if defined(PADDLE_WITH_CUDA)
half mean_i = static_cast<half>(pair.first_ / N);
#if __CUDA_ARCH__ >= 530
half variance_i = static_cast<half>(
pair.second_ / N - static_cast<float>(__hmul(mean_i, mean_i)));
#else
half variance_i =
static_cast<half>(pair.second_ / N - static_cast<float>(mean_i) *
static_cast<float>(mean_i));
#endif
#else
float16 mean_i = static_cast<float16>(pair.first_ / N);
float16 variance_i = static_cast<float16>(
pair.second_ / N - static_cast<float>(mean_i * mean_i));
#endif
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
#if defined(PADDLE_WITH_CUDA)
half mean_i = shared_mem[BlockDim];
half std_i = static_cast<half>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#else
float16 mean_i = shared_mem[BlockDim];
float16 std_i = static_cast<float16>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#endif
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
#if __CUDA_ARCH__ >= 530
half tmp_0 = __hdiv(__hsub(save_ptr[save_index], mean_i), std_i);
half tmp_1 = scale ? __hmul(scale[j], tmp_0) : tmp_0;
#else
half tmp_0 = static_cast<float>(static_cast<float>(save_ptr[save_index]) +
static_cast<float>(mean_i) /
static_cast<float>(std_i));
half tmp_1 = scale ? static_cast<half>(static_cast<float>(scale[j]) *
static_cast<float>(tmp_0))
: tmp_0;
#endif
if (bias_1 != nullptr) {
out[index] = __hadd(tmp_1, bias_1[j]);
} else {
out[index] = tmp_1;
}
#else
float16 tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
float16 tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
#endif
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <typename T>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<T, true, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<T, false, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const float16* y,
const float16* bias_0,
const float16* bias_1,
const float16* scale,
float16* out,
float16* mean,
float16* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<true, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
hipLaunchKernelGGL(( InplaceAddReluAddLayerNormKernel<false, kPowerOfTwoDim>)
, dim3(::max(max_threads / kPowerOfTwoDim, 1)),
dim3(kPowerOfTwoDim),
0,
stream,
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <typename T>
class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<phi::DenseTensor>("X");
auto* w = ctx.Input<phi::DenseTensor>("W");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto w_dims = w->dims();
int N = w_dims[1];
int K = w_dims[0];
int M = phi::product(x->dims()) / K;
const T* x_data = x->data<T>();
const T* w_data = w->data<T>();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* out_data = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.GEMM(false,
false,
M,
N,
K,
static_cast<T>(1.0),
x_data,
K,
w_data,
N,
static_cast<T>(0.0),
out_data,
N);
auto* y = ctx.Input<phi::DenseTensor>("Y");
auto* bias_0 = ctx.Input<phi::DenseTensor>("Bias0");
auto* bias_1 = ctx.Input<phi::DenseTensor>("Bias1");
auto* scale = ctx.Input<phi::DenseTensor>("Scale");
const T* y_data = y->data<T>();
const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr;
const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr;
const T* scale_data = scale ? scale->data<T>() : nullptr;
auto* mean = ctx.Output<phi::DenseTensor>("Mean");
auto* variance = ctx.Output<phi::DenseTensor>("Variance");
T* mean_data =
mean ? dev_ctx.template Alloc<T>(mean, mean->numel() * sizeof(T))
: nullptr;
T* variance_data = variance ? dev_ctx.template Alloc<T>(
variance, variance->numel() * sizeof(T))
: nullptr;
bool with_relu =
(ctx.Attr<std::string>("activation_type") == "relu") ? true : false;
float epsilon = ctx.Attr<float>("epsilon");
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
AddReluAddLayerNorm(dev_ctx.stream(),
with_relu,
max_threads,
y_data,
bias_0_data,
bias_1_data,
scale_data,
out_data,
mean_data,
variance_data,
M,
N,
epsilon);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_fc_elementwise_layernorm,
ops::FusedFCElementwiseLayerNormOpKernel<phi::dtype::float16>,
ops::FusedFCElementwiseLayerNormOpKernel<float>,
ops::FusedFCElementwiseLayerNormOpKernel<double>);
| a986f580183f05cbdf7e8410056cbaf0756ee9ff.cu | /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#if defined(PADDLE_WITH_CUDA)
#include <cuda_fp16.h>
#endif
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
namespace paddle {
namespace operators {
using float16 = phi::dtype::float16;
template <typename T>
static __device__ __forceinline__ T Relu(T x) {
return static_cast<T>(fmaxf(0.f, x));
}
static __device__ __forceinline__ float RealSqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double RealSqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T& first, const T& second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T>& p1, const PairForLayerNorm<T>& p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<T>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T shared_mem[BlockDim + 2];
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
T* save_ptr = shared_mem;
T sum_i = 0;
T square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = out[index];
// Add bias
T tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
T tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
T tmp_3 = tmp_2 + y[index];
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += tmp_3;
square_sum_i += (tmp_3 * tmp_3);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<T>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<T>());
if (threadIdx.x == 0) {
T mean_i = static_cast<T>(pair.first_ / N);
T variance_i = static_cast<T>(pair.second_ / N - mean_i * mean_i);
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
T mean_i = shared_mem[BlockDim];
T std_i = static_cast<T>(RealSqrt(shared_mem[BlockDim + 1] + epsilon));
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
T tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <bool DoRelu, int BlockDim>
__global__ void InplaceAddReluAddLayerNormKernel(const float16* y_data,
const float16* bias_0_data,
const float16* bias_1_data,
const float16* scale_data,
float16* out_data,
float16* mean_data,
float16* variance_data,
int M,
int N,
float epsilon) {
#if defined(PADDLE_WITH_CUDA)
const half* y = reinterpret_cast<const half*>(y_data);
const half* bias_0 = reinterpret_cast<const half*>(bias_0_data);
const half* bias_1 = reinterpret_cast<const half*>(bias_1_data);
const half* scale = reinterpret_cast<const half*>(scale_data);
half* out = reinterpret_cast<half*>(out_data);
half* mean = reinterpret_cast<half*>(mean_data);
half* variance = reinterpret_cast<half*>(variance_data);
#else
const float16* y = y_data;
const float16* bias_0 = bias_0_data;
const float16* bias_1 = bias_1_data;
const float16* scale = scale_data;
float16* out = out_data;
float16* mean = mean_data;
float16* variance = variance_data;
#endif
using BlockReduce = cub::BlockReduce<PairForLayerNorm<float>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
#if defined(PADDLE_WITH_CUDA)
__shared__ half shared_mem[BlockDim + 2];
#else
__shared__ float16 shared_mem[BlockDim + 2];
#endif
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
// The fisrt BlockDim elements will be saved to shared memory.
int save_index = threadIdx.x;
#if defined(PADDLE_WITH_CUDA)
half* save_ptr = shared_mem;
#else
float16* save_ptr = shared_mem;
#endif
float sum_i = 0;
float square_sum_i = 0;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
half tmp_0 = out[index];
// Add bias
half tmp_1;
if (bias_0 != nullptr) {
tmp_1 = __hadd(tmp_0, bias_0[j]);
} else {
tmp_1 = tmp_0;
}
// Relu
half tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
half tmp_3 = __hadd(tmp_2, y[index]);
#else
float16 tmp_0 = out[index];
// Add bias
float16 tmp_1 = bias_0 ? tmp_0 + bias_0[j] : tmp_0;
// Relu
float16 tmp_2 = DoRelu ? Relu(tmp_1) : tmp_1;
// elementwise_add
float16 tmp_3 = tmp_2 + y[index];
#endif
// Save
save_ptr[save_index] = tmp_3;
save_ptr = out;
index += blockDim.x;
save_index = index;
// For layer_norm, reduce to calculate mean and std
sum_i += static_cast<float>(tmp_3);
#if defined(PADDLE_WITH_CUDA) && __CUDA_ARCH__ >= 530
square_sum_i += static_cast<float>(__hmul(tmp_3, tmp_3));
#elif defined(PADDLE_WITH_CUDA)
square_sum_i += static_cast<float>(tmp_3) * static_cast<float>(tmp_3);
#else
square_sum_i += static_cast<float>(tmp_3 * tmp_3);
#endif
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<float>(sum_i, square_sum_i),
PairForLayerNormAddFunctor<float>());
if (threadIdx.x == 0) {
#if defined(PADDLE_WITH_CUDA)
half mean_i = static_cast<half>(pair.first_ / N);
#if __CUDA_ARCH__ >= 530
half variance_i = static_cast<half>(
pair.second_ / N - static_cast<float>(__hmul(mean_i, mean_i)));
#else
half variance_i =
static_cast<half>(pair.second_ / N - static_cast<float>(mean_i) *
static_cast<float>(mean_i));
#endif
#else
float16 mean_i = static_cast<float16>(pair.first_ / N);
float16 variance_i = static_cast<float16>(
pair.second_ / N - static_cast<float>(mean_i * mean_i));
#endif
shared_mem[BlockDim] = mean_i;
shared_mem[BlockDim + 1] = variance_i;
if (mean) {
mean[blockIdx.x] = mean_i;
}
if (variance) {
variance[blockIdx.x] = variance_i;
}
}
__syncthreads();
#if defined(PADDLE_WITH_CUDA)
half mean_i = shared_mem[BlockDim];
half std_i = static_cast<half>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#else
float16 mean_i = shared_mem[BlockDim];
float16 std_i = static_cast<float16>(
RealSqrt(static_cast<float>(shared_mem[BlockDim + 1]) + epsilon));
#endif
index = i * N + threadIdx.x;
// First BlockDim elements loading from shared memory.
save_index = threadIdx.x;
save_ptr = shared_mem;
// For layer_norm, calculate out
for (int j = threadIdx.x; j < N; j += blockDim.x) {
#if defined(PADDLE_WITH_CUDA)
#if __CUDA_ARCH__ >= 530
half tmp_0 = __hdiv(__hsub(save_ptr[save_index], mean_i), std_i);
half tmp_1 = scale ? __hmul(scale[j], tmp_0) : tmp_0;
#else
half tmp_0 = static_cast<float>(static_cast<float>(save_ptr[save_index]) +
static_cast<float>(mean_i) /
static_cast<float>(std_i));
half tmp_1 = scale ? static_cast<half>(static_cast<float>(scale[j]) *
static_cast<float>(tmp_0))
: tmp_0;
#endif
if (bias_1 != nullptr) {
out[index] = __hadd(tmp_1, bias_1[j]);
} else {
out[index] = tmp_1;
}
#else
float16 tmp_0 = (save_ptr[save_index] - mean_i) / std_i;
float16 tmp_1 = scale ? scale[j] * tmp_0 : tmp_0;
out[index] = bias_1 ? tmp_1 + bias_1[j] : tmp_1;
#endif
save_ptr = out;
index += blockDim.x;
save_index = index;
}
}
}
template <typename T>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const T* y,
const T* bias_0,
const T* bias_1,
const T* scale,
T* out,
T* mean,
T* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<T, true, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<T, false, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <>
void AddReluAddLayerNorm(gpuStream_t stream,
bool with_relu,
int max_threads,
const float16* y,
const float16* bias_0,
const float16* bias_1,
const float16* scale,
float16* out,
float16* mean,
float16* variance,
int M,
int N,
float epsilon) {
if (with_relu) {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<true, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
} else {
switch (platform::RoundToPowerOfTwo(N)) {
CUDA_LAUNCH_KERNEL_HELPER(
InplaceAddReluAddLayerNormKernel<false, kPowerOfTwoDim>
<<<std::max(max_threads / kPowerOfTwoDim, 1),
kPowerOfTwoDim,
0,
stream>>>(
y, bias_0, bias_1, scale, out, mean, variance, M, N, epsilon));
}
}
}
template <typename T>
class FusedFCElementwiseLayerNormOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<phi::DenseTensor>("X");
auto* w = ctx.Input<phi::DenseTensor>("W");
auto* out = ctx.Output<phi::DenseTensor>("Out");
auto w_dims = w->dims();
int N = w_dims[1];
int K = w_dims[0];
int M = phi::product(x->dims()) / K;
const T* x_data = x->data<T>();
const T* w_data = w->data<T>();
auto& dev_ctx = ctx.template device_context<phi::GPUContext>();
auto* out_data = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
auto blas = phi::funcs::GetBlas<phi::GPUContext, T>(dev_ctx);
blas.GEMM(false,
false,
M,
N,
K,
static_cast<T>(1.0),
x_data,
K,
w_data,
N,
static_cast<T>(0.0),
out_data,
N);
auto* y = ctx.Input<phi::DenseTensor>("Y");
auto* bias_0 = ctx.Input<phi::DenseTensor>("Bias0");
auto* bias_1 = ctx.Input<phi::DenseTensor>("Bias1");
auto* scale = ctx.Input<phi::DenseTensor>("Scale");
const T* y_data = y->data<T>();
const T* bias_0_data = bias_0 ? bias_0->data<T>() : nullptr;
const T* bias_1_data = bias_1 ? bias_1->data<T>() : nullptr;
const T* scale_data = scale ? scale->data<T>() : nullptr;
auto* mean = ctx.Output<phi::DenseTensor>("Mean");
auto* variance = ctx.Output<phi::DenseTensor>("Variance");
T* mean_data =
mean ? dev_ctx.template Alloc<T>(mean, mean->numel() * sizeof(T))
: nullptr;
T* variance_data = variance ? dev_ctx.template Alloc<T>(
variance, variance->numel() * sizeof(T))
: nullptr;
bool with_relu =
(ctx.Attr<std::string>("activation_type") == "relu") ? true : false;
float epsilon = ctx.Attr<float>("epsilon");
int max_threads = dev_ctx.GetMaxPhysicalThreadCount();
AddReluAddLayerNorm(dev_ctx.stream(),
with_relu,
max_threads,
y_data,
bias_0_data,
bias_1_data,
scale_data,
out_data,
mean_data,
variance_data,
M,
N,
epsilon);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
fused_fc_elementwise_layernorm,
ops::FusedFCElementwiseLayerNormOpKernel<phi::dtype::float16>,
ops::FusedFCElementwiseLayerNormOpKernel<float>,
ops::FusedFCElementwiseLayerNormOpKernel<double>);
|
13c8117c382be57ebd86ee65c0d69937ce8dae3b.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "util.h"
__global__ void histo_prescan_kernel (
unsigned int* input,
int size,
unsigned int* minmax);
__global__ void histo_main_kernel (
uchar4 *sm_mappings,
unsigned int num_elements,
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow);
__global__ void histo_intermediates_kernel (
uint2 *input,
unsigned int height,
unsigned int width,
unsigned int input_pitch,
uchar4 *sm_mappings);
__global__ void histo_final_kernel (
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow,
unsigned int *final_histo);
/******************************************************************************
* Implementation: GPU
* Details:
* in the GPU implementation of histogram, we begin by computing the span of the
* input values into the histogram. Then the histogramming computation is carried
* out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X)
* computes its own partial histogram for a part of the input, and every Y in the
* group exclusively writes to a portion of the span computed in the beginning.
* Finally, a reduction is performed to combine all the partial histograms into
* the final result.
******************************************************************************/
int main(int argc, char* argv[]) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if(!parameters->inpFiles[0]){
fputs("Input file expected\n", stderr);
return -1;
}
char *prescans = "PreScanKernel";
char *postpremems = "PostPreMems";
char *intermediates = "IntermediatesKernel";
char *mains = "MainKernel";
char *finals = "FinalKernel";
pb_InitializeTimerSet(&timers);
pb_AddSubTimer(&timers, prescans, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, postpremems, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, mains, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, finals, pb_TimerID_KERNEL);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
int numIterations = 0;
if (argc >= 2){
numIterations = atoi(argv[1]);
} else {
fputs("Expected at least one command line argument\n", stderr);
return -1;
}
unsigned int img_width, img_height;
unsigned int histo_width, histo_height;
FILE* f = fopen(parameters->inpFiles[0],"rb");
int result = 0;
result += fread(&img_width, sizeof(unsigned int), 1, f);
result += fread(&img_height, sizeof(unsigned int), 1, f);
result += fread(&histo_width, sizeof(unsigned int), 1, f);
result += fread(&histo_height, sizeof(unsigned int), 1, f);
if (result != 4){
fputs("Error reading input and output dimensions from file\n", stderr);
return -1;
}
unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int));
unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char));
result = fread(img, sizeof(unsigned int), img_width*img_height, f);
fclose(f);
if (result != img_width*img_height){
fputs("Error reading input array from file\n", stderr);
return -1;
}
int even_width = ((img_width+1)/2)*2;
unsigned int* input;
unsigned int* ranges;
uchar4* sm_mappings;
unsigned int* global_subhisto;
unsigned short* global_histo;
unsigned int* global_overflow;
unsigned char* final_histo;
hipMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int));
hipMalloc((void**)&ranges , 2*sizeof(unsigned int));
hipMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4));
hipMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int));
hipMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short));
hipMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int));
hipMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char));
hipMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char));
for (int y=0; y < img_height; y++){
hipMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), hipMemcpyHostToDevice);
}
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
printf("iteration:(%d)\n", numIterations);
for (int iter = 0; iter < numIterations; iter++) {
unsigned int ranges_h[2] = {UINT32_MAX, 0};
hipMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), hipMemcpyHostToDevice);
pb_SwitchToSubTimer(&timers, prescans , pb_TimerID_KERNEL);
hipLaunchKernelGGL(( histo_prescan_kernel), dim3(dim3(PRESCAN_BLOCKS_X)),dim3(dim3(PRESCAN_THREADS)), 0, 0, (unsigned int*)input, img_height*img_width, ranges);
pb_SwitchToSubTimer(&timers, postpremems , pb_TimerID_KERNEL);
hipMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int));
pb_SwitchToSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
hipLaunchKernelGGL(( histo_intermediates_kernel), dim3(dim3((img_height + UNROLL-1)/UNROLL)), dim3(dim3((img_width+1)/2)), 0, 0,
(uint2*)(input),
(unsigned int)img_height,
(unsigned int)img_width,
(img_width+1)/2,
(uchar4*)(sm_mappings)
);
pb_SwitchToSubTimer(&timers, mains, pb_TimerID_KERNEL);
hipLaunchKernelGGL(( histo_main_kernel), dim3(dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1)), dim3(dim3(THREADS)), 0, 0,
(uchar4*)(sm_mappings),
img_height*img_width,
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow)
);
pb_SwitchToSubTimer(&timers, finals, pb_TimerID_KERNEL);
hipLaunchKernelGGL(( histo_final_kernel), dim3(dim3(BLOCK_X*3)), dim3(dim3(512)), 0, 0,
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow),
(unsigned int*)(final_histo)
);
}
pb_SwitchToTimer(&timers, pb_TimerID_IO);
hipMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), hipMemcpyDeviceToHost);
hipFree(input);
hipFree(ranges);
hipFree(sm_mappings);
hipFree(global_subhisto);
hipFree(global_histo);
hipFree(global_overflow);
hipFree(final_histo);
if (parameters->outFile) {
dump_histo_img(histo, histo_height, histo_width, parameters->outFile);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(img);
free(histo);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
printf("\n");
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
pb_DestroyTimerSet(&timers);
return 0;
}
| 13c8117c382be57ebd86ee65c0d69937ce8dae3b.cu | /***************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
***************************************************************************/
#include <parboil.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include "util.h"
__global__ void histo_prescan_kernel (
unsigned int* input,
int size,
unsigned int* minmax);
__global__ void histo_main_kernel (
uchar4 *sm_mappings,
unsigned int num_elements,
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow);
__global__ void histo_intermediates_kernel (
uint2 *input,
unsigned int height,
unsigned int width,
unsigned int input_pitch,
uchar4 *sm_mappings);
__global__ void histo_final_kernel (
unsigned int sm_range_min,
unsigned int sm_range_max,
unsigned int histo_height,
unsigned int histo_width,
unsigned int *global_subhisto,
unsigned int *global_histo,
unsigned int *global_overflow,
unsigned int *final_histo);
/******************************************************************************
* Implementation: GPU
* Details:
* in the GPU implementation of histogram, we begin by computing the span of the
* input values into the histogram. Then the histogramming computation is carried
* out by a (BLOCK_X, BLOCK_Y) sized grid, where every group of Y (same X)
* computes its own partial histogram for a part of the input, and every Y in the
* group exclusively writes to a portion of the span computed in the beginning.
* Finally, a reduction is performed to combine all the partial histograms into
* the final result.
******************************************************************************/
int main(int argc, char* argv[]) {
struct pb_TimerSet timers;
struct pb_Parameters *parameters;
parameters = pb_ReadParameters(&argc, argv);
if (!parameters)
return -1;
if(!parameters->inpFiles[0]){
fputs("Input file expected\n", stderr);
return -1;
}
char *prescans = "PreScanKernel";
char *postpremems = "PostPreMems";
char *intermediates = "IntermediatesKernel";
char *mains = "MainKernel";
char *finals = "FinalKernel";
pb_InitializeTimerSet(&timers);
pb_AddSubTimer(&timers, prescans, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, postpremems, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, mains, pb_TimerID_KERNEL);
pb_AddSubTimer(&timers, finals, pb_TimerID_KERNEL);
pb_SwitchToTimer(&timers, pb_TimerID_IO);
int numIterations = 0;
if (argc >= 2){
numIterations = atoi(argv[1]);
} else {
fputs("Expected at least one command line argument\n", stderr);
return -1;
}
unsigned int img_width, img_height;
unsigned int histo_width, histo_height;
FILE* f = fopen(parameters->inpFiles[0],"rb");
int result = 0;
result += fread(&img_width, sizeof(unsigned int), 1, f);
result += fread(&img_height, sizeof(unsigned int), 1, f);
result += fread(&histo_width, sizeof(unsigned int), 1, f);
result += fread(&histo_height, sizeof(unsigned int), 1, f);
if (result != 4){
fputs("Error reading input and output dimensions from file\n", stderr);
return -1;
}
unsigned int* img = (unsigned int*) malloc (img_width*img_height*sizeof(unsigned int));
unsigned char* histo = (unsigned char*) calloc (histo_width*histo_height, sizeof(unsigned char));
result = fread(img, sizeof(unsigned int), img_width*img_height, f);
fclose(f);
if (result != img_width*img_height){
fputs("Error reading input array from file\n", stderr);
return -1;
}
int even_width = ((img_width+1)/2)*2;
unsigned int* input;
unsigned int* ranges;
uchar4* sm_mappings;
unsigned int* global_subhisto;
unsigned short* global_histo;
unsigned int* global_overflow;
unsigned char* final_histo;
cudaMalloc((void**)&input , even_width*(((img_height+UNROLL-1)/UNROLL)*UNROLL)*sizeof(unsigned int));
cudaMalloc((void**)&ranges , 2*sizeof(unsigned int));
cudaMalloc((void**)&sm_mappings , img_width*img_height*sizeof(uchar4));
cudaMalloc((void**)&global_subhisto , BLOCK_X*img_width*histo_height*sizeof(unsigned int));
cudaMalloc((void**)&global_histo , img_width*histo_height*sizeof(unsigned short));
cudaMalloc((void**)&global_overflow , img_width*histo_height*sizeof(unsigned int));
cudaMalloc((void**)&final_histo , img_width*histo_height*sizeof(unsigned char));
cudaMemset(final_histo , 0 , img_width*histo_height*sizeof(unsigned char));
for (int y=0; y < img_height; y++){
cudaMemcpy(&(((unsigned int*)input)[y*even_width]),&img[y*img_width],img_width*sizeof(unsigned int), cudaMemcpyHostToDevice);
}
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
printf("iteration:(%d)\n", numIterations);
for (int iter = 0; iter < numIterations; iter++) {
unsigned int ranges_h[2] = {UINT32_MAX, 0};
cudaMemcpy(ranges,ranges_h, 2*sizeof(unsigned int), cudaMemcpyHostToDevice);
pb_SwitchToSubTimer(&timers, prescans , pb_TimerID_KERNEL);
histo_prescan_kernel<<<dim3(PRESCAN_BLOCKS_X),dim3(PRESCAN_THREADS)>>>((unsigned int*)input, img_height*img_width, ranges);
pb_SwitchToSubTimer(&timers, postpremems , pb_TimerID_KERNEL);
cudaMemcpy(ranges_h,ranges, 2*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemset(global_subhisto,0,img_width*histo_height*sizeof(unsigned int));
pb_SwitchToSubTimer(&timers, intermediates, pb_TimerID_KERNEL);
histo_intermediates_kernel<<<dim3((img_height + UNROLL-1)/UNROLL), dim3((img_width+1)/2)>>>(
(uint2*)(input),
(unsigned int)img_height,
(unsigned int)img_width,
(img_width+1)/2,
(uchar4*)(sm_mappings)
);
pb_SwitchToSubTimer(&timers, mains, pb_TimerID_KERNEL);
histo_main_kernel<<<dim3(BLOCK_X, ranges_h[1]-ranges_h[0]+1), dim3(THREADS)>>>(
(uchar4*)(sm_mappings),
img_height*img_width,
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow)
);
pb_SwitchToSubTimer(&timers, finals, pb_TimerID_KERNEL);
histo_final_kernel<<<dim3(BLOCK_X*3), dim3(512)>>>(
ranges_h[0], ranges_h[1],
histo_height, histo_width,
(unsigned int*)(global_subhisto),
(unsigned int*)(global_histo),
(unsigned int*)(global_overflow),
(unsigned int*)(final_histo)
);
}
pb_SwitchToTimer(&timers, pb_TimerID_IO);
cudaMemcpy(histo,final_histo, histo_height*histo_width*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(input);
cudaFree(ranges);
cudaFree(sm_mappings);
cudaFree(global_subhisto);
cudaFree(global_histo);
cudaFree(global_overflow);
cudaFree(final_histo);
if (parameters->outFile) {
dump_histo_img(histo, histo_height, histo_width, parameters->outFile);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(img);
free(histo);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
printf("\n");
pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
pb_DestroyTimerSet(&timers);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.